├── teta
├── metrics
│ ├── __init__.py
│ ├── _base_metric.py
│ └── teta.py
├── __init__.py
├── datasets
│ ├── __init__.py
│ ├── _base_dataset.py
│ ├── kitti_mots.py
│ ├── coco.py
│ ├── coco_mots.py
│ └── tao.py
├── utils.py
├── _timing.py
├── config.py
└── eval.py
├── figures
├── figure_1.png
└── teta-teaser.png
├── requirements.txt
├── scripts
├── lint.sh
├── dataset_configs
│ ├── box_track.toml
│ └── seg_track.toml
├── run_coco.py
├── run_tao.py
├── run_bdd.py
└── run_ovmot.py
├── .gitignore
├── docs
└── TAO-format.txt
├── setup.py
├── README.md
└── LICENSE
/teta/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .teta import TETA
2 |
--------------------------------------------------------------------------------
/figures/figure_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/siyuanliii/TETA/HEAD/figures/figure_1.png
--------------------------------------------------------------------------------
/figures/teta-teaser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/siyuanliii/TETA/HEAD/figures/teta-teaser.png
--------------------------------------------------------------------------------
/teta/__init__.py:
--------------------------------------------------------------------------------
1 | from . import config, datasets, metrics, utils
2 | from .eval import Evaluator
3 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | git+https://github.com/siyuanliii/TrackEval.git
2 | git+https://github.com/scalabel/scalabel.git@scalabel-evalAPI
3 | scipy
4 | numpy==1.26.0
--------------------------------------------------------------------------------
/scripts/lint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python3 -m black teta
4 | python3 -m isort teta
5 | python3 -m pylint teta
6 | python3 -m pydocstyle teta
7 | python3 -m mypy --strict teta
--------------------------------------------------------------------------------
/teta/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | """Datasets."""
2 | from .coco import COCO
3 | from .tao import TAO
4 | from .bdd import BDD
5 | from .coco_mots import COCOMOTS
6 | from .bdd_mots import BDDMOTS
--------------------------------------------------------------------------------
/scripts/dataset_configs/box_track.toml:
--------------------------------------------------------------------------------
1 | [image_size]
2 | height = 720
3 | width = 1280
4 |
5 | [[attributes]]
6 | name = "crowd"
7 | type = "switch"
8 | tag = "c"
9 |
10 | [[categories]]
11 | name = "human"
12 | [[categories.subcategories]]
13 | name = "pedestrian"
14 |
15 | [[categories.subcategories]]
16 | name = "rider"
17 |
18 | [[categories]]
19 | name = "vehicle"
20 | [[categories.subcategories]]
21 | name = "car"
22 |
23 | [[categories.subcategories]]
24 | name = "truck"
25 |
26 | [[categories.subcategories]]
27 | name = "bus"
28 |
29 | [[categories.subcategories]]
30 | name = "train"
31 |
32 | [[categories]]
33 | name = "bike"
34 | [[categories.subcategories]]
35 | name = "motorcycle"
36 |
37 | [[categories.subcategories]]
38 | name = "bicycle"
--------------------------------------------------------------------------------
/scripts/dataset_configs/seg_track.toml:
--------------------------------------------------------------------------------
1 | [imageSize]
2 | height = 720
3 | width = 1280
4 |
5 | [[attributes]]
6 | name = "crowd"
7 | type = "switch"
8 | tag = "c"
9 |
10 | [[categories]]
11 | name = "human"
12 | [[categories.subcategories]]
13 | name = "pedestrian"
14 |
15 | [[categories.subcategories]]
16 | name = "rider"
17 |
18 | [[categories]]
19 | name = "vehicle"
20 | [[categories.subcategories]]
21 | name = "car"
22 |
23 | [[categories.subcategories]]
24 | name = "truck"
25 |
26 | [[categories.subcategories]]
27 | name = "bus"
28 |
29 | [[categories.subcategories]]
30 | name = "train"
31 |
32 | [[categories]]
33 | name = "bike"
34 | [[categories.subcategories]]
35 | name = "motorcycle"
36 |
37 | [[categories.subcategories]]
38 | name = "bicycle"
39 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
--------------------------------------------------------------------------------
/teta/utils.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import os
3 | from collections import OrderedDict
4 |
5 |
6 | def validate_metrics_list(metrics_list):
7 | """Get names of metric class and ensures they are unique, further checks that the fields within each metric class
8 | do not have overlapping names.
9 | """
10 | metric_names = [metric.get_name() for metric in metrics_list]
11 | # check metric names are unique
12 | if len(metric_names) != len(set(metric_names)):
13 | raise TrackEvalException(
14 | "Code being run with multiple metrics of the same name"
15 | )
16 | fields = []
17 | for m in metrics_list:
18 | fields += m.fields
19 | # check metric fields are unique
20 | if len(fields) != len(set(fields)):
21 | raise TrackEvalException(
22 | "Code being run with multiple metrics with fields of the same name"
23 | )
24 | return metric_names
25 |
26 |
27 | def get_track_id_str(ann):
28 | """Get name of track ID in annotation."""
29 | if "track_id" in ann:
30 | tk_str = "track_id"
31 | elif "instance_id" in ann:
32 | tk_str = "instance_id"
33 | elif "scalabel_id" in ann:
34 | tk_str = "scalabel_id"
35 | else:
36 | assert False, "No track/instance ID."
37 | return tk_str
38 |
39 |
40 | class TrackEvalException(Exception):
41 | """Custom exception for catching expected errors."""
42 |
43 | ...
44 |
--------------------------------------------------------------------------------
/docs/TAO-format.txt:
--------------------------------------------------------------------------------
1 | Taken from: https://github.com/TAO-Dataset/tao/blob/master/tao/toolkit/tao/tao.py
2 |
3 | Annotation file format:
4 | {
5 | "info" : info,
6 | "images" : [image],
7 | "videos": [video],
8 | "tracks": [track],
9 | "annotations" : [annotation],
10 | "categories": [category],
11 | "licenses" : [license],
12 | }
13 | info: As in MS COCO
14 | image: {
15 | "id" : int,
16 | "video_id": int,
17 | "file_name" : str,
18 | "license" : int,
19 | # Redundant fields for COCO-compatibility
20 | "width": int,
21 | "height": int,
22 | "frame_index": int
23 | }
24 | video: {
25 | "id": int,
26 | "name": str,
27 | "width" : int,
28 | "height" : int,
29 | "neg_category_ids": [int],
30 | "not_exhaustive_category_ids": [int],
31 | "metadata": dict, # Metadata about the video
32 | }
33 | track: {
34 | "id": int,
35 | "category_id": int,
36 | "video_id": int
37 | }
38 | category: {
39 | "id": int,
40 | "name": str,
41 | "synset": str, # For non-LVIS objects, this is "unknown"
42 | ... [other fields copied from LVIS v0.5 and unused]
43 | }
44 | annotation: {
45 | "image_id": int,
46 | "track_id": int,
47 | "bbox": [x,y,width,height],
48 | "area": float,
49 | # Redundant field for compatibility with COCO scripts
50 | "category_id": int
51 | }
52 | license: {
53 | "id" : int,
54 | "name" : str,
55 | "url" : str,
56 | }
57 |
58 | Prediction format:
59 |
60 | [{
61 | "image_id" : int,
62 | "category_id" : int,
63 | "bbox" : [x,y,width,height],
64 | "score" : float,
65 | "track_id": int,
66 | "video_id": int
67 | }]
--------------------------------------------------------------------------------
/teta/_timing.py:
--------------------------------------------------------------------------------
1 | import inspect
2 | from functools import wraps
3 | from time import perf_counter
4 |
5 | DO_TIMING = False
6 | DISPLAY_LESS_PROGRESS = False
7 | timer_dict = {}
8 | counter = 0
9 |
10 |
11 | def time(f):
12 | @wraps(f)
13 | def wrap(*args, **kw):
14 | if DO_TIMING:
15 | # Run function with timing
16 | ts = perf_counter()
17 | result = f(*args, **kw)
18 | te = perf_counter()
19 | tt = te - ts
20 |
21 | # Get function name
22 | arg_names = inspect.getfullargspec(f)[0]
23 | if arg_names[0] == "self" and DISPLAY_LESS_PROGRESS:
24 | return result
25 | elif arg_names[0] == "self":
26 | method_name = type(args[0]).__name__ + "." + f.__name__
27 | else:
28 | method_name = f.__name__
29 |
30 | # Record accumulative time in each function for analysis
31 | if method_name in timer_dict.keys():
32 | timer_dict[method_name] += tt
33 | else:
34 | timer_dict[method_name] = tt
35 |
36 | # If code is finished, display timing summary
37 | if method_name == "Evaluator.evaluate":
38 | print("")
39 | print("Timing analysis:")
40 | for key, value in timer_dict.items():
41 | print("%-70s %2.4f sec" % (key, value))
42 | else:
43 | # Get function argument values for printing special arguments of interest
44 | arg_titles = ["tracker", "seq", "cls"]
45 | arg_vals = []
46 | for i, a in enumerate(arg_names):
47 | if a in arg_titles:
48 | arg_vals.append(args[i])
49 | arg_text = "(" + ", ".join(arg_vals) + ")"
50 |
51 | # Display methods and functions with different indentation.
52 | if arg_names[0] == "self":
53 | print("%-74s %2.4f sec" % (" " * 4 + method_name + arg_text, tt))
54 | elif arg_names[0] == "test":
55 | pass
56 | else:
57 | global counter
58 | counter += 1
59 | print("%i %-70s %2.4f sec" % (counter, method_name + arg_text, tt))
60 |
61 | return result
62 | else:
63 | # If config["TIME_PROGRESS"] is false, or config["USE_PARALLEL"] is true, run functions normally without timing.
64 | return f(*args, **kw)
65 |
66 | return wrap
67 |
--------------------------------------------------------------------------------
/scripts/run_coco.py:
--------------------------------------------------------------------------------
1 | """ evaluate.py
2 |
3 | Run example:
4 | evaluate.py --USE_PARALLEL False --METRICS TETA --TRACKERS_TO_EVAL qdtrack
5 |
6 | Command Line Arguments: Defaults, # Comments
7 | Eval arguments:
8 | 'USE_PARALLEL': False,
9 | 'NUM_PARALLEL_CORES': 8,
10 | 'BREAK_ON_ERROR': True, # Raises exception and exits with error
11 | 'RETURN_ON_ERROR': False, # if not BREAK_ON_ERROR, then returns from function on error
12 | 'LOG_ON_ERROR': os.path.join(code_path, 'error_log.txt'), # if not None, save any errors into a log file.
13 | 'PRINT_RESULTS': True,
14 | 'PRINT_ONLY_COMBINED': False,
15 | 'PRINT_CONFIG': True,
16 | 'TIME_PROGRESS': True,
17 | 'DISPLAY_LESS_PROGRESS': True,
18 | 'OUTPUT_SUMMARY': True,
19 | 'OUTPUT_EMPTY_CLASSES': True, # If False, summary files are not output for classes with no detections
20 | 'OUTPUT_TEM_RAW_DATA': True,
21 | Dataset arguments:
22 | 'GT_FOLDER': os.path.join(code_path, 'data/gt/tao/tao_training'), # Location of GT data
23 | 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/tao/tao_training'), # Trackers location
24 | 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
25 | 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)
26 | 'CLASSES_TO_EVAL': None, # Classes to eval (if None, all classes)
27 | 'SPLIT_TO_EVAL': 'training', # Valid: 'training', 'val'
28 | 'PRINT_CONFIG': True, # Whether to print current config
29 | 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
30 | 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
31 | 'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL
32 | 'MAX_DETECTIONS': 300, # Number of maximal allowed detections per image (0 for unlimited)
33 | Metric arguments:
34 | 'METRICS': ['HOTA', 'CLEAR', 'Identity', 'TrackMAP']
35 | """
36 |
37 | import sys
38 | import os
39 | import argparse
40 | from multiprocessing import freeze_support
41 |
42 | from teta.config import parse_configs
43 | from teta.datasets import COCO
44 | from teta.eval import Evaluator
45 | from teta.metrics import TETA
46 |
47 |
48 | def evaluate():
49 | """Evaluate with TETA."""
50 | eval_config, dataset_config, metrics_config = parse_configs()
51 | evaluator = Evaluator(eval_config)
52 | dataset_list = [COCO(dataset_config)]
53 | metrics_list = []
54 | metric = TETA(exhaustive=True)
55 | if metric.get_name() in metrics_config["METRICS"]:
56 | metrics_list.append(metric)
57 | if len(metrics_list) == 0:
58 | raise Exception("No metrics selected for evaluation")
59 | evaluator.evaluate(dataset_list, metrics_list)
60 |
61 |
62 | if __name__ == "__main__":
63 | freeze_support()
64 | evaluate()
65 |
--------------------------------------------------------------------------------
/scripts/run_tao.py:
--------------------------------------------------------------------------------
1 | """ evaluate.py
2 |
3 | Run example:
4 | evaluate.py --USE_PARALLEL False --METRICS TETA --TRACKERS_TO_EVAL qdtrack
5 |
6 | Command Line Arguments: Defaults, # Comments
7 | Eval arguments:
8 | 'USE_PARALLEL': False,
9 | 'NUM_PARALLEL_CORES': 8,
10 | 'BREAK_ON_ERROR': True, # Raises exception and exits with error
11 | 'RETURN_ON_ERROR': False, # if not BREAK_ON_ERROR, then returns from function on error
12 | 'LOG_ON_ERROR': os.path.join(code_path, 'error_log.txt'), # if not None, save any errors into a log file.
13 | 'PRINT_RESULTS': True,
14 | 'PRINT_ONLY_COMBINED': False,
15 | 'PRINT_CONFIG': True,
16 | 'TIME_PROGRESS': True,
17 | 'DISPLAY_LESS_PROGRESS': True,
18 | 'OUTPUT_SUMMARY': True,
19 | 'OUTPUT_EMPTY_CLASSES': True, # If False, summary files are not output for classes with no detections
20 | 'OUTPUT_TEM_RAW_DATA': True, # Output detailed statistics for each class
21 | Dataset arguments:
22 | 'GT_FOLDER': os.path.join(code_path, 'data/gt/tao/tao_training'), # Location of GT data
23 | 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/tao/tao_training'), # Trackers location
24 | 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
25 | 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)
26 | 'CLASSES_TO_EVAL': None, # Classes to eval (if None, all classes)
27 | 'SPLIT_TO_EVAL': 'training', # Valid: 'training', 'val'
28 | 'PRINT_CONFIG': True, # Whether to print current config
29 | 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
30 | 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
31 | 'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL
32 | 'MAX_DETECTIONS': 300, # Number of maximal allowed detections per image (0 for unlimited)
33 | Metric arguments:
34 | 'METRICS': ['TETA']
35 | """
36 |
37 | import sys
38 | import os
39 | import argparse
40 | from multiprocessing import freeze_support
41 |
42 | from teta.config import parse_configs
43 | from teta.datasets import TAO
44 | from teta.eval import Evaluator
45 | from teta.metrics import TETA
46 |
47 |
48 | def evaluate():
49 | """Evaluate with TETA."""
50 | eval_config, dataset_config, metrics_config = parse_configs()
51 | evaluator = Evaluator(eval_config)
52 | dataset_list = [TAO(dataset_config)]
53 | metrics_list = []
54 | metric = TETA(exhaustive=False)
55 | if metric.get_name() in metrics_config["METRICS"]:
56 | metrics_list.append(metric)
57 | if len(metrics_list) == 0:
58 | raise Exception("No metrics selected for evaluation")
59 | evaluator.evaluate(dataset_list, metrics_list)
60 |
61 |
62 | if __name__ == "__main__":
63 | freeze_support()
64 | evaluate()
65 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os
3 | import sys
4 | from shutil import rmtree
5 |
6 | from setuptools import find_packages, setup, Command
7 |
8 | # Package meta-data.
9 | NAME = 'teta'
10 | DESCRIPTION = 'Track Every Thing Accuracy (TETA metric)'
11 | EMAIL = 'siyuan.li@vision.ee.ethz.ch'
12 | AUTHOR = 'Siyuan Li'
13 | REQUIRES_PYTHON = '>=3.6.0'
14 | VERSION = '0.1.0'
15 |
16 | # What packages are required for this module to be executed?
17 | REQUIRED = [
18 | 'script_utils @ git+https://github.com/achalddave/python-script-utils.git@v0.0.2#egg=script_utils',
19 | 'numpy', 'scipy'
20 | ]
21 |
22 | # What packages are optional?
23 | EXTRAS = {
24 | }
25 |
26 | here = os.path.abspath(os.path.dirname(__file__))
27 |
28 | # Import the README and use it as the long-description.
29 | # Note: this will only work if 'README.md' is present in your MANIFEST.in file!
30 | try:
31 | with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
32 | long_description = '\n' + f.read()
33 | except FileNotFoundError:
34 | long_description = DESCRIPTION
35 |
36 | # Load the package's __version__.py module as a dictionary.
37 | about = {}
38 | if not VERSION:
39 | project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
40 | with open(os.path.join(here, project_slug, '__version__.py')) as f:
41 | exec(f.read(), about)
42 | else:
43 | about['__version__'] = VERSION
44 |
45 |
46 | class UploadCommand(Command):
47 | """Support setup.py upload."""
48 |
49 | description = 'Build and publish the package.'
50 | user_options = []
51 |
52 | @staticmethod
53 | def status(s):
54 | """Prints things in bold."""
55 | print('\033[1m{0}\033[0m'.format(s))
56 |
57 | def initialize_options(self):
58 | pass
59 |
60 | def finalize_options(self):
61 | pass
62 |
63 | def run(self):
64 | try:
65 | self.status('Removing previous builds…')
66 | rmtree(os.path.join(here, 'dist'))
67 | except OSError:
68 | pass
69 |
70 | self.status('Building Source and Wheel (universal) distribution…')
71 | os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
72 |
73 | self.status('Uploading the package to PyPI via Twine…')
74 | os.system('twine upload dist/*')
75 |
76 | self.status('Pushing git tags…')
77 | os.system('git tag v{0}'.format(about['__version__']))
78 | os.system('git push --tags')
79 |
80 | sys.exit()
81 |
82 |
83 | # Where the magic happens:
84 | setup(
85 | name=NAME,
86 | version=about['__version__'],
87 | description=DESCRIPTION,
88 | long_description=long_description,
89 | long_description_content_type='text/markdown',
90 | author=AUTHOR,
91 | author_email=EMAIL,
92 | python_requires=REQUIRES_PYTHON,
93 | # url=URL,
94 | packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
95 | # If your package is a single module, use this instead of 'packages':
96 | # py_modules=['tao'],
97 |
98 | # entry_points={
99 | # 'console_scripts': ['mycli=mymodule:cli'],
100 | # },
101 | install_requires=REQUIRED,
102 | extras_require=EXTRAS,
103 | include_package_data=True,
104 | license='MIT',
105 | classifiers=[
106 | # Trove classifiers
107 | # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
108 | 'License :: OSI Approved :: MIT License',
109 | 'Programming Language :: Python',
110 | 'Programming Language :: Python :: 3',
111 | 'Programming Language :: Python :: 3.6',
112 | 'Programming Language :: Python :: Implementation :: CPython',
113 | 'Programming Language :: Python :: Implementation :: PyPy'
114 | ],
115 | # $ setup.py publish support.
116 | cmdclass={
117 | 'upload': UploadCommand,
118 | },
119 | )
--------------------------------------------------------------------------------
/scripts/run_bdd.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import time
4 | from pathlib import Path
5 |
6 | from scalabel.eval.box_track import BoxTrackResult, bdd100k_to_scalabel
7 | from scalabel.eval.hota import HOTAResult, evaluate_track_hota
8 | from scalabel.eval.hotas import evaluate_seg_track_hota
9 | from scalabel.eval.mot import TrackResult, acc_single_video_mot, evaluate_track
10 | from scalabel.eval.mots import acc_single_video_mots, evaluate_seg_track
11 | from scalabel.eval.teta import TETAResult, evaluate_track_teta
12 | from scalabel.eval.tetas import evaluate_seg_track_teta
13 | from scalabel.label.io import group_and_sort, load, load_label_config
14 |
15 | MOT_CFG_FILE = os.path.join(
16 | str(Path(__file__).parent.absolute()), "dataset_configs/box_track.toml"
17 | )
18 | MOTS_CFG_FILE = os.path.join(
19 | str(Path(__file__).parent.absolute()), "dataset_configs/seg_track.toml"
20 | )
21 |
22 | import argparse
23 |
24 | class TETA_BDD100K_Evaluator:
25 | def __init__(self, scalabel_gt, resfile_path, metrics, with_mask, logger, nproc):
26 | self.scalabel_gt = scalabel_gt
27 | self.resfile_path = resfile_path
28 | self.metrics = metrics
29 | self.with_mask = with_mask
30 | self.logger = logger
31 | self.nproc = nproc
32 |
33 | def evaluate(self):
34 | """Evaluate with TETA, HOTA, ClearMOT on BDD100K."""
35 |
36 | eval_results = dict()
37 |
38 | bdd100k_config = load_label_config(MOT_CFG_FILE)
39 | print("Start loading.")
40 |
41 | gts = group_and_sort(load(self.scalabel_gt).frames)
42 | results = group_and_sort(load(self.resfile_path).frames)
43 | print("gt_len", len(gts), "results", len(results))
44 | print("Finish loading")
45 | print("Start evaluation")
46 | print("Ignore unknown cats")
47 |
48 | self.logger.info("Tracking evaluation.")
49 | t = time.time()
50 | gts = [bdd100k_to_scalabel(gt, bdd100k_config) for gt in gts]
51 | results = [bdd100k_to_scalabel(result, bdd100k_config) for result in results]
52 |
53 | if "CLEAR" in self.metrics:
54 | if self.with_mask:
55 | mot_result = evaluate_seg_track(
56 | acc_single_video_mots,
57 | gts,
58 | results,
59 | bdd100k_config,
60 | ignore_unknown_cats=True,
61 | nproc=self.nproc,
62 | )
63 | else:
64 | mot_result = evaluate_track(
65 | acc_single_video_mot,
66 | gts,
67 | results,
68 | bdd100k_config,
69 | ignore_unknown_cats=True,
70 | nproc=self.nproc,
71 | )
72 | print("CLEAR and IDF1 results :")
73 | print(mot_result)
74 | print(mot_result.summary())
75 |
76 | if "HOTA" in self.metrics:
77 | if self.with_mask:
78 | hota_result = evaluate_seg_track_hota(
79 | gts, results, bdd100k_config, self.nproc
80 | )
81 | else:
82 | hota_result = evaluate_track_hota(gts, results, bdd100k_config, self.nproc)
83 | print("HOTA results :")
84 | print(hota_result)
85 | print(hota_result.summary())
86 |
87 | if "TETA" in self.metrics:
88 | if self.with_mask:
89 | teta_result = evaluate_seg_track_teta(
90 | gts, results, bdd100k_config, self.nproc
91 | )
92 | else:
93 | teta_result = evaluate_track_teta(gts, results, bdd100k_config, self.nproc)
94 |
95 | print("TETA results :")
96 | print(teta_result)
97 | print(teta_result.summary())
98 |
99 | if (
100 | "CLEAR" in self.metrics
101 | and "HOTA" in self.metrics
102 | and "TETA" in self.metrics
103 | ):
104 | print("Aggregated results: ")
105 | combined_result = BoxTrackResult(
106 | **{**mot_result.dict(), **hota_result.dict(), **teta_result.dict()}
107 | )
108 | print(combined_result)
109 | print(combined_result.summary())
110 |
111 | t = time.time() - t
112 | self.logger.info("evaluation finishes with %.1f s.", t)
113 |
114 | print("Completed evaluation")
115 | return eval_results
116 |
117 | if __name__ == "__main__":
118 | parser = argparse.ArgumentParser(description="Evaluate tracking performance on BDD100K.")
119 | parser.add_argument('--scalabel_gt', required=True, help='Path to the ground truth file')
120 | parser.add_argument('--resfile_path', required=True, help='Path to the result file')
121 | parser.add_argument('--metrics', nargs='+', default=['TETA', 'HOTA', 'CLEAR'], help='List of metrics to evaluate')
122 | parser.add_argument('--with_mask', action='store_true', help='Whether to evaluate with mask')
123 | parser.add_argument('--nproc', type=int, default=8, help='Number of processes to use')
124 | args = parser.parse_args()
125 |
126 | logger = logging.getLogger(__name__)
127 | logging.basicConfig(level=logging.INFO)
128 |
129 | evaluator = TETA_BDD100K_Evaluator(args.scalabel_gt, args.resfile_path, args.metrics, args.with_mask, logger, args.nproc)
130 | evaluator.evaluate()
131 |
--------------------------------------------------------------------------------
/teta/metrics/_base_metric.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 | import numpy as np
4 |
5 | from .. import _timing
6 | from ..utils import TrackEvalException
7 |
8 |
9 | class _BaseMetric(ABC):
10 | @abstractmethod
11 | def __init__(self):
12 | self.plottable = False
13 | self.integer_fields = []
14 | self.float_fields = []
15 | self.array_labels = []
16 | self.integer_array_fields = []
17 | self.float_array_fields = []
18 | self.fields = []
19 | self.summary_fields = []
20 | self.registered = False
21 |
22 | #####################################################################
23 | # Abstract functions for subclasses to implement
24 |
25 | @_timing.time
26 | @abstractmethod
27 | def eval_sequence(self, data):
28 | ...
29 |
30 | @abstractmethod
31 | def combine_sequences(self, all_res):
32 | ...
33 |
34 | @abstractmethod
35 | def combine_classes_class_averaged(self, all_res, ignore_empty=False):
36 | ...
37 |
38 | @abstractmethod
39 | def combine_classes_det_averaged(self, all_res):
40 | ...
41 |
42 | def plot_single_tracker_results(self, all_res, tracker, output_folder, cls):
43 | """Plot results, only valid for metrics with self.plottable."""
44 | if self.plottable:
45 | raise NotImplementedError(
46 | f"plot_results is not implemented for metric {self.get_name()}"
47 | )
48 | else:
49 | pass
50 |
51 | #####################################################################
52 | # Helper functions which are useful for all metrics:
53 |
54 | @classmethod
55 | def get_name(cls):
56 | return cls.__name__
57 |
58 | @staticmethod
59 | def _combine_sum(all_res, field):
60 | """Combine sequence results via sum"""
61 | return sum([all_res[k][field] for k in all_res.keys()])
62 |
63 | @staticmethod
64 | def _combine_weighted_av(all_res, field, comb_res, weight_field):
65 | """Combine sequence results via weighted average."""
66 | return sum(
67 | [all_res[k][field] * all_res[k][weight_field] for k in all_res.keys()]
68 | ) / np.maximum(1.0, comb_res[weight_field])
69 |
70 | def print_table(self, table_res, tracker, cls):
71 | """Print table of results for all sequences."""
72 | print("")
73 | metric_name = self.get_name()
74 | self._row_print(
75 | [metric_name + ": " + tracker + "-" + cls] + self.summary_fields
76 | )
77 | for seq, results in sorted(table_res.items()):
78 | if seq == "COMBINED_SEQ":
79 | continue
80 | summary_res = self._summary_row(results)
81 | self._row_print([seq] + summary_res)
82 | summary_res = self._summary_row(table_res["COMBINED_SEQ"])
83 | self._row_print(["COMBINED"] + summary_res)
84 |
85 | def _summary_row(self, results_):
86 | vals = []
87 | for h in self.summary_fields:
88 | if h in self.float_array_fields:
89 | vals.append("{0:1.5g}".format(100 * np.mean(results_[h])))
90 | elif h in self.float_fields:
91 | vals.append("{0:1.5g}".format(100 * float(results_[h])))
92 | elif h in self.integer_fields:
93 | vals.append("{0:d}".format(int(results_[h])))
94 | else:
95 | raise NotImplementedError(
96 | "Summary function not implemented for this field type."
97 | )
98 | return vals
99 |
100 | @staticmethod
101 | def _row_print(*argv):
102 | """Print results in evenly spaced rows, with more space in first row."""
103 | if len(argv) == 1:
104 | argv = argv[0]
105 | to_print = "%-35s" % argv[0]
106 | for v in argv[1:]:
107 | to_print += "%-10s" % str(v)
108 | print(to_print)
109 |
110 | def summary_results(self, table_res):
111 | """Return a simple summary of final results for a tracker."""
112 | return dict(
113 | zip(self.summary_fields, self._summary_row(table_res["COMBINED_SEQ"]),)
114 | )
115 |
116 | def detailed_results(self, table_res):
117 | """Return detailed final results for a tracker."""
118 | # Get detailed field information
119 | detailed_fields = self.float_fields + self.integer_fields
120 | for h in self.float_array_fields + self.integer_array_fields:
121 | for alpha in [int(100 * x) for x in self.array_labels]:
122 | detailed_fields.append(h + "___" + str(alpha))
123 | detailed_fields.append(h + "___AUC")
124 |
125 | # Get detailed results
126 | detailed_results = {}
127 | for seq, res in table_res.items():
128 | detailed_row = self._detailed_row(res)
129 | if len(detailed_row) != len(detailed_fields):
130 | raise TrackEvalException(
131 | f"Field names and data have different sizes "
132 | f"({len(detailed_row)} and {len(detailed_fields)})"
133 | )
134 | detailed_results[seq] = dict(zip(detailed_fields, detailed_row))
135 | return detailed_results
136 |
137 | def _detailed_row(self, res):
138 | detailed_row = []
139 | for h in self.float_fields + self.integer_fields:
140 | detailed_row.append(res[h])
141 | for h in self.float_array_fields + self.integer_array_fields:
142 | for i, _ in enumerate([int(100 * x) for x in self.array_labels]):
143 | detailed_row.append(res[h][i])
144 | detailed_row.append(np.mean(res[h]))
145 | return detailed_row
146 |
--------------------------------------------------------------------------------
/scripts/run_ovmot.py:
--------------------------------------------------------------------------------
1 | """ evaluate.py
2 |
3 | Run example:
4 | evaluate.py --USE_PARALLEL False --METRICS TETA --TRACKERS_TO_EVAL qdtrack
5 |
6 | Command Line Arguments: Defaults, # Comments
7 | Eval arguments:
8 | 'USE_PARALLEL': False,
9 | 'NUM_PARALLEL_CORES': 8,
10 | 'BREAK_ON_ERROR': True, # Raises exception and exits with error
11 | 'RETURN_ON_ERROR': False, # if not BREAK_ON_ERROR, then returns from function on error
12 | 'LOG_ON_ERROR': os.path.join(code_path, 'error_log.txt'), # if not None, save any errors into a log file.
13 | 'PRINT_RESULTS': True,
14 | 'PRINT_ONLY_COMBINED': False,
15 | 'PRINT_CONFIG': True,
16 | 'TIME_PROGRESS': True,
17 | 'DISPLAY_LESS_PROGRESS': True,
18 | 'OUTPUT_SUMMARY': True,
19 | 'OUTPUT_EMPTY_CLASSES': True, # If False, summary files are not output for classes with no detections
20 | 'OUTPUT_TEM_RAW_DATA': True, # Output detailed statistics for each class
21 | Dataset arguments:
22 | 'GT_FOLDER': os.path.join(code_path, 'data/gt/tao/tao_training'), # Location of GT data
23 | 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/tao/tao_training'), # Trackers location
24 | 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
25 | 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)
26 | 'CLASSES_TO_EVAL': None, # Classes to eval (if None, all classes)
27 | 'SPLIT_TO_EVAL': 'training', # Valid: 'training', 'val'
28 | 'PRINT_CONFIG': True, # Whether to print current config
29 | 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
30 | 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
31 | 'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL
32 | 'MAX_DETECTIONS': 300, # Number of maximal allowed detections per image (0 for unlimited)
33 | Metric arguments:
34 | 'METRICS': ['TETA']
35 | """
36 |
37 | import sys
38 | import os
39 | import argparse
40 | import pickle
41 | import numpy as np
42 | import json
43 | from multiprocessing import freeze_support
44 |
45 | from teta.config import parse_configs
46 | from teta.datasets import TAO
47 | from teta.eval import Evaluator
48 | from teta.metrics import TETA
49 |
50 |
51 | def compute_teta_on_ovsetup(teta_res, base_class_names, novel_class_names):
52 | if "COMBINED_SEQ" in teta_res:
53 | teta_res = teta_res["COMBINED_SEQ"]
54 |
55 | frequent_teta = []
56 | rare_teta = []
57 | for key in teta_res:
58 | if key in base_class_names:
59 | frequent_teta.append(np.array(teta_res[key]["TETA"][50]).astype(float))
60 | elif key in novel_class_names:
61 | rare_teta.append(np.array(teta_res[key]["TETA"][50]).astype(float))
62 |
63 | print("Base and Novel classes performance")
64 |
65 | # print the header
66 | print(
67 | "{:<10} {:<10} {:<10} {:<10} {:<10} {:<10} {:<10} {:<10} {:<10} {:<10} {:<10}".format(
68 | "TETA50:",
69 | "TETA",
70 | "LocA",
71 | "AssocA",
72 | "ClsA",
73 | "LocRe",
74 | "LocPr",
75 | "AssocRe",
76 | "AssocPr",
77 | "ClsRe",
78 | "ClsPr",
79 | )
80 | )
81 |
82 | if frequent_teta:
83 | freq_teta_mean = np.mean(np.stack(frequent_teta), axis=0)
84 |
85 | # print the frequent teta mean
86 | print("{:<10} ".format("Base"), end="")
87 | print(*["{:<10.3f}".format(num) for num in freq_teta_mean])
88 |
89 | else:
90 | print("No Base classes to evaluate!")
91 | freq_teta_mean = None
92 | if rare_teta:
93 | rare_teta_mean = np.mean(np.stack(rare_teta), axis=0)
94 |
95 | # print the rare teta mean
96 | print("{:<10} ".format("Novel"), end="")
97 | print(*["{:<10.3f}".format(num) for num in rare_teta_mean])
98 | else:
99 | print("No Novel classes to evaluate!")
100 | rare_teta_mean = None
101 |
102 | return freq_teta_mean, rare_teta_mean
103 |
104 |
105 | def evaluate():
106 | """Evaluate with TETA."""
107 | eval_config, dataset_config, metrics_config = parse_configs()
108 | evaluator = Evaluator(eval_config)
109 | dataset_list = [TAO(dataset_config)]
110 | metrics_list = []
111 | metric = TETA(exhaustive=False)
112 | if metric.get_name() in metrics_config["METRICS"]:
113 | metrics_list.append(metric)
114 | if len(metrics_list) == 0:
115 | raise Exception("No metrics selected for evaluation")
116 |
117 | tracker_name = dataset_config["TRACKERS_TO_EVAL"][0]
118 | resfile_path = dataset_config["TRACKERS_FOLDER"]
119 | dataset_gt = json.load(open(dataset_config["GT_FOLDER"]))
120 | eval_results, _ = evaluator.evaluate(dataset_list, metrics_list)
121 |
122 | eval_results_path = os.path.join(
123 | resfile_path, tracker_name, "teta_summary_results.pth"
124 | )
125 | eval_res = pickle.load(open(eval_results_path, "rb"))
126 |
127 | base_class_synset = set(
128 | [
129 | c["name"]
130 | for c in dataset_gt["categories"]
131 | if c["frequency"] != "r"
132 | ]
133 | )
134 | novel_class_synset = set(
135 | [
136 | c["name"]
137 | for c in dataset_gt["categories"]
138 | if c["frequency"] == "r"
139 | ]
140 | )
141 |
142 | compute_teta_on_ovsetup(
143 | eval_res, base_class_synset, novel_class_synset
144 | )
145 |
146 |
147 | if __name__ == "__main__":
148 | freeze_support()
149 | evaluate()
150 |
--------------------------------------------------------------------------------
/teta/config.py:
--------------------------------------------------------------------------------
1 | """Config."""
2 | import argparse
3 | import os
4 |
5 |
6 | def parse_configs():
7 | """Parse command line."""
8 | default_eval_config = get_default_eval_config()
9 | default_eval_config["DISPLAY_LESS_PROGRESS"] = True
10 | default_dataset_config = get_default_dataset_config()
11 | default_metrics_config = {"METRICS": ["TETA"]}
12 | config = {
13 | **default_eval_config,
14 | **default_dataset_config,
15 | **default_metrics_config,
16 | }
17 | parser = argparse.ArgumentParser()
18 | for setting in config.keys():
19 | if type(config[setting]) == list or type(config[setting]) == type(None):
20 | parser.add_argument("--" + setting, nargs="+")
21 | else:
22 | parser.add_argument("--" + setting)
23 | args = parser.parse_args().__dict__
24 | for setting in args.keys():
25 | if args[setting] is not None:
26 | if type(config[setting]) == type(True):
27 | if args[setting] == "True":
28 | x = True
29 | elif args[setting] == "False":
30 | x = False
31 | else:
32 | raise Exception(
33 | f"Command line parameter {setting} must be True/False"
34 | )
35 | elif type(config[setting]) == type(1):
36 | x = int(args[setting])
37 | elif type(args[setting]) == type(None):
38 | x = None
39 | else:
40 | x = args[setting]
41 | config[setting] = x
42 | eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()}
43 | dataset_config = {
44 | k: v for k, v in config.items() if k in default_dataset_config.keys()
45 | }
46 | metrics_config = {
47 | k: v for k, v in config.items() if k in default_metrics_config.keys()
48 | }
49 |
50 | return eval_config, dataset_config, metrics_config
51 |
52 |
53 | def get_default_eval_config():
54 | """Returns the default config values for evaluation."""
55 | code_path = get_code_path()
56 | default_config = {
57 | "USE_PARALLEL": True,
58 | "NUM_PARALLEL_CORES": 8,
59 | "BREAK_ON_ERROR": True,
60 | "RETURN_ON_ERROR": False,
61 | "LOG_ON_ERROR": os.path.join(code_path, "error_log.txt"),
62 | "PRINT_RESULTS": True,
63 | "PRINT_ONLY_COMBINED": True,
64 | "PRINT_CONFIG": True,
65 | "TIME_PROGRESS": True,
66 | "DISPLAY_LESS_PROGRESS": True,
67 | "OUTPUT_SUMMARY": True,
68 | "OUTPUT_EMPTY_CLASSES": True,
69 | "OUTPUT_TEM_RAW_DATA": True,
70 | "OUTPUT_PER_SEQ_RES": True,
71 | }
72 | return default_config
73 |
74 |
75 | def get_default_dataset_config():
76 | """Default class config values"""
77 | code_path = get_code_path()
78 | default_config = {
79 | "GT_FOLDER": os.path.join(
80 | code_path, "data/gt/tao/tao_training"
81 | ), # Location of GT data
82 | "TRACKERS_FOLDER": os.path.join(
83 | code_path, "data/trackers/tao/tao_training"
84 | ), # Trackers location
85 | "OUTPUT_FOLDER": None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
86 | "TRACKERS_TO_EVAL": ['TETer'], # Filenames of trackers to eval (if None, all in folder)
87 | "CLASSES_TO_EVAL": None, # Classes to eval (if None, all classes)
88 | "SPLIT_TO_EVAL": "training", # Valid: 'training', 'val'
89 | "PRINT_CONFIG": True, # Whether to print current config
90 | "TRACKER_SUB_FOLDER": "data", # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
91 | "OUTPUT_SUB_FOLDER": "", # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
92 | "TRACKER_DISPLAY_NAMES": None, # Names of trackers to display, if None: TRACKERS_TO_EVAL
93 | "MAX_DETECTIONS": 0, # Number of maximal allowed detections per image (0 for unlimited)
94 | }
95 | return default_config
96 |
97 |
98 | def init_config(config, default_config, name=None):
99 | """Initialize non-given config values with defaults."""
100 | if config is None:
101 | config = default_config
102 | else:
103 | for k in default_config.keys():
104 | if k not in config.keys():
105 | config[k] = default_config[k]
106 | if name and config["PRINT_CONFIG"]:
107 | print("\n%s Config:" % name)
108 | for c in config.keys():
109 | print("%-20s : %-30s" % (c, config[c]))
110 | return config
111 |
112 |
113 | def update_config(config):
114 | """
115 | Parse the arguments of a script and updates the config values for a given value if specified in the arguments.
116 | :param config: the config to update
117 | :return: the updated config
118 | """
119 | parser = argparse.ArgumentParser()
120 | for setting in config.keys():
121 | if type(config[setting]) == list or type(config[setting]) == type(None):
122 | parser.add_argument("--" + setting, nargs="+")
123 | else:
124 | parser.add_argument("--" + setting)
125 | args = parser.parse_args().__dict__
126 | for setting in args.keys():
127 | if args[setting] is not None:
128 | if type(config[setting]) == type(True):
129 | if args[setting] == "True":
130 | x = True
131 | elif args[setting] == "False":
132 | x = False
133 | else:
134 | raise Exception(
135 | "Command line parameter " + setting + "must be True or False"
136 | )
137 | elif type(config[setting]) == type(1):
138 | x = int(args[setting])
139 | elif type(args[setting]) == type(None):
140 | x = None
141 | else:
142 | x = args[setting]
143 | config[setting] = x
144 | return config
145 |
146 |
147 | def get_code_path():
148 | """Get base path where code is"""
149 | return os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
150 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Track Every Thing Accuracy
2 | [Track Every Thing in the Wild](https://arxiv.org/abs/2207.12978) [ECCV 2022].
3 |
4 | This is the official implementation of TETA metric describe in the paper. This repo is an updated version of the original [TET repo](https://github.com/SysCV/tet) for TETA metric.
5 |
6 |
7 |
8 |
9 | ## Introduction
10 |
11 | TETA is a new metric for tracking evaluation that breaks tracking measurement into three sub-factors: localization, association, and classification, allowing comprehensive benchmarking of tracking performance even under inaccurate classification.
12 | TETA also deals with the challenging incomplete annotation problem in large-scale tracking datasets.
13 | Instead of using the predicted class labels to group per-class tracking results, we use location with the help of local cluster evaluation.
14 | We treat each ground truth bounding box of the target class as the anchor of each cluster and group prediction results inside each cluster to evaluate the localization and association performance.
15 | Our local clusters enable us to evaluate tracks even when the class prediction is wrong.
16 |
17 |
18 |
19 | ## Why you should use TETA metric
20 |
21 | TETA is designed to evaluate multiple object tracking (MOT) and segmentation (MOTS) in large-scale **multiple classes** and **open-vocabulary** scenarios.
22 | It has been widely used for evaluate trackers performance on BDD100K and TAO datasets.
23 | Some key features of TETA are:
24 | - **Disentangle classification from tracking**: TETA disentangles classification from tracking, allowing comprehensive benchmarking of tracking performance. Don't worry if your tracker is not the strongest in classification. TETA can still gives you credits for good localization and association.
25 | - **Comprehensive evaluation**: TETA consists of three parts: a localization score, an association score, and a classification score, which enable us to evaluate the different aspects of each tracker properly.
26 | - **Dealing with incomplete annotations**: TETA evaluates trackers based on a novel local cluster design. TETA deals with the challenging incomplete annotation problem. You can even evaluate your MOT tracker on a single object tracking dataset!
27 | - **Easy to use**: TETA is easy to use and support COCO-VID style format for most tracking datasets and Scalabel format for BDD100K. You just need to prepare your results json in the right format and run the evaluation script.
28 | - **Support evaluation with both mask and box format**: TETA can evaluate tracking results in both mask and box format. The key differences are whether we use mask IoU or box IoU for localization evaluation.
29 |
30 | ## Install
31 | Install the TETA environment using pip.
32 | ```angular2html
33 | pip install -r requirements.txt
34 | ```
35 | Go to the root of the teta folder and quick install by
36 | ```
37 | pip install -e .
38 | ```
39 | ## Support data format
40 |
41 | ### COCO-VID format
42 | Result format follows COCO-VID format. We describe the format in detail [here](./docs/TAO-format.txt)
43 |
44 | ### Scalabel format
45 | For evaluate MOT and MOTS on BDD100K, we support Scalabel format. We describe the format in detail [here](https://github.com/scalabel/scalabel/blob/master/doc/src/format.rst)
46 |
47 | ## Evaluate on TAO TETA benchmark
48 |
49 | Overall you can run following command to evaluate your tracker on TAO TETA benchmark, given the ground truth json file and the prediction json file using [COCO-VID format](./docs/TAO-format.txt).
50 | ```angular2html
51 | python scripts/run_tao.py --METRICS TETA --TRACKERS_TO_EVAL $NAME_OF_YOUR_MODEL$ --GT_FOLDER ${GT_JSON_PATH}.json --TRACKER_SUB_FOLDER ${RESULT_JSON_PATH}.json
52 | ```
53 | ### TAO TETA v0.5
54 | Please note, TAO benchmark initially aligns its class names with LVISv0.5 which has 1230 classes. For example, the initial TETA benchmark on [TET paper](https://github.com/SysCV/tet) is using v0.5 class names.
55 |
56 | **Example Run:**
57 |
58 | * Download GT: If your model use LVIS v0.5 class names, you can evaluate your model on TAO TETA v0.5 benchmark by using the corresponding ground truth json file. We provide TAO Val Ground Truth json file in v0.5 format [tao_val_lvis_v05_classes.json](https://huggingface.co/dereksiyuanli/masa/resolve/main/tao_val_lvis_v05_classes.json)
59 |
60 | * Download Example Pred: You can download an example prediction file from the [TETer-swinL model](https://github.com/SysCV/tet)'s result json file: [teter-swinL-tao-val.json](https://drive.google.com/file/d/1ZXXV-hQOxC-luSQcH3ph1k1ETUe6rYKV/view).
61 |
62 | * Run Command, assume you put your downloaded files in the ```./jsons/``` under the root folder:
63 | ```angular2html
64 | python scripts/run_tao.py --METRICS TETA --TRACKERS_TO_EVAL my_tracker --GT_FOLDER ./jsons/tao_val_lvis_v05_classes.json --TRACKER_SUB_FOLDER ./jsons/teter-swinL-tao-val.json
65 | ```
66 |
67 | ### TAO TETA v1.0
68 | Since LVIS update the class names to v1.0, we also provide TAO Val Ground Truth json file in v1.0 format [tao_val_lvis_v1_classes.json](https://huggingface.co/dereksiyuanli/masa/resolve/main/tao_val_lvis_v1_classes.json)
69 | The conversion script is provided in the [scripts](https://github.com/SysCV/ovtrack/blob/main/tools/convert_datasets/create_tao_v1.py) if you want to convert the v0.5 class names to v1.0 class names by yourself.
70 |
71 | **Example Run:**
72 |
73 | * Download GT: [tao_val_lvis_v1_classes.json](https://huggingface.co/dereksiyuanli/masa/resolve/main/tao_val_lvis_v1_classes.json)
74 | * Download Example Pred: You can download an example prediction file from [MASA-GroundingDINO](https://github.com/siyuanliii/masa/blob/main/docs/model_zoo.md)'s results json file tested on TAO val: [masa-gdino-detic-dets-tao-val-preds.json](https://drive.google.com/file/d/195wFJY4uxBGZKtZGb3DyvB6EdDED4Rx2/view?usp=sharing).
75 |
76 | * Run Command, assume you put your downloaded files in the ```./jsons/``` under the root folder:
77 | ```angular2html
78 | python scripts/run_tao.py --METRICS TETA --TRACKERS_TO_EVAL my_tracker --GT_FOLDER ./jsons/tao_val_lvis_v1_classes.json --TRACKER_SUB_FOLDER ./jsons/masa-gdino-detic-dets-tao-val-preds.json
79 | ```
80 |
81 | ## Evaluate on Open-Vocabulary MOT benchmark
82 |
83 | Open-Vocabulary MOT benchmark is first introduced by [OVTrack](https://openaccess.thecvf.com/content/CVPR2023/papers/Li_OVTrack_Open-Vocabulary_Multiple_Object_Tracking_CVPR_2023_paper.pdf). Here we provide the evaluation script for Open-Vocabulary MOT benchmark.
84 | Open-Vocabulary MOT benchmark uses TAO dataset as the evaluation dataset and use LVIS v1.0 class names.
85 |
86 | Overall, you can use follow command to evaluate your trackers on Open-Vocabulary MOT benchmark.
87 | ```angular2html
88 | python scripts/run_ovmot.py --METRICS TETA --TRACKERS_TO_EVAL $NAME_OF_YOUR_MODEL$ --GT_FOLDER ${GT_JSON_PATH}.json --TRACKER_SUB_FOLDER ${RESULT_JSON_PATH}.json
89 | ```
90 |
91 | ### Run on Open-Vocabulary MOT validation set
92 | * Download GT: [tao_val_lvis_v1_classes.json](https://huggingface.co/dereksiyuanli/masa/resolve/main/tao_val_lvis_v1_classes.json)
93 | * Download Example Pred: You can download an example prediction file from [MASA-GroundingDINO](https://github.com/siyuanliii/masa?tab=readme-ov-file)'s results json file tested on Open-Vocabulary MOT: [masa-gdino-detic-dets-tao-val-preds.json](https://drive.google.com/file/d/195wFJY4uxBGZKtZGb3DyvB6EdDED4Rx2/view?usp=sharing).
94 | * Run Command, assume you put your downloaded files in the ```./jsons/``` under the root folder:
95 | ```angular2html
96 | python scripts/run_ovmot.py --METRICS TETA --TRACKERS_TO_EVAL my_tracker --GT_FOLDER ./jsons/tao_val_lvis_v1_classes.json --TRACKER_SUB_FOLDER ./jsons/masa-gdino-detic-dets-tao-val-preds.json
97 | ```
98 |
99 | ### Run on Open-Vocabulary MOT test set
100 | * Download GT: [tao_test_lvis_v1_classes.json](https://drive.google.com/file/d/19LxOj0w3lNyw4IGeMpwFd5OxC4PzFyrv/view?usp=sharing).
101 | Then you can evaluate your tracker on Open-Vocabulary MOT test set by using the corresponding ground truth json file like above.
102 |
103 |
104 | ## Evaluate on BDD100K MOT TETA benchmark
105 | Run on BDD100K MOT val dataset.
106 |
107 | * Download GT: Please first download the annotations from the [official website](https://doc.bdd100k.com/download.html). On the download page, the required data and annotations are `mot` set annotations: `MOT 2020 Labels`.
108 | * Download Example Pred: You can download an example prediction file from the [MASA-SAM-ViT-B](https://github.com/siyuanliii/masa/blob/main/docs/model_zoo.md)'s results json file tested on BDD100K MOT val: [masa_sam_vitb_bdd_mot_val.json](https://drive.google.com/file/d/19diA2Zij-lbDUP0CzJa36nYWKrGy_4dn/view?usp=sharing)
109 | * Run Command, assume you put your downloaded the pred in the ```./jsons/``` under the root folder and the GT in the ```./data/bdd/annotations/scalabel_gt/box_track_20/val/```:
110 | ```angular2html
111 | python scripts/run_bdd.py --scalabel_gt data/bdd/annotations/scalabel_gt/box_track_20/val/ --resfile_path ./jsons/masa_sam_vitb_bdd_mot_val.json --metrics TETA HOTA CLEAR
112 | ```
113 |
114 |
115 | ## Evaluate on BDD100K MOTS TETA benchmark
116 | Run on BDD100K MOTS val dataset.
117 |
118 | * Download GT: Please first download the annotations from the [official website](https://doc.bdd100k.com/download.html). On the download page, the required data and annotations are `mots` set annotations: `MOTS 2020 Labels`.
119 | * Download Example Pred: You can download an example prediction file from the [MASA-SAM-ViT-B](https://github.com/siyuanliii/masa/blob/main/docs/model_zoo.md)'s results json file tested on BDD100K MOT val: [masa_sam_vitb_bdd_mots_val.json](https://drive.google.com/file/d/19pAS0Nt74hptFnFgRBL_wMs3aZcRR6UV/view?usp=sharing)
120 | * Run Command, assume you put your downloaded the pred in the ```./jsons/``` under the root folder and the GT in the ```./data/bdd/annotations/scalabel_gt/seg_track_20/val/```:
121 | ```angular2html
122 | python scripts/run_bdd.py --scalabel_gt data/bdd/annotations/scalabel_gt/seg_track_20/val/ --resfile_path ./jsons/masa_sam_vitb_bdd_mots_val.json --metrics TETA HOTA CLEAR --with_mask
123 | ```
124 |
125 |
126 | ## Citation
127 |
128 | ```
129 | @InProceedings{trackeverything,
130 | title = {Tracking Every Thing in the Wild},
131 | author = {Li, Siyuan and Danelljan, Martin and Ding, Henghui and Huang, Thomas E. and Yu, Fisher},
132 | booktitle = {Proceedings of the European Conference on Computer Vision (ECCV)},
133 | month = {Oct},
134 | year = {2022}
135 | }
136 | ```
--------------------------------------------------------------------------------
/teta/eval.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import os
3 | import pickle
4 | import time
5 | import traceback
6 | from functools import partial
7 | from multiprocessing.pool import Pool
8 |
9 | import numpy as np
10 |
11 | from . import _timing, utils
12 | from .config import get_default_eval_config, init_config
13 | from .utils import TrackEvalException
14 |
15 |
16 | class Evaluator:
17 | """Evaluator class for evaluating different metrics for each datasets."""
18 |
19 | def __init__(self, config=None):
20 | """Initialize the evaluator with a config file."""
21 | self.config = init_config(config, get_default_eval_config(), "Eval")
22 | # Only run timing analysis if not run in parallel.
23 | if self.config["TIME_PROGRESS"] and not self.config["USE_PARALLEL"]:
24 | _timing.DO_TIMING = True
25 | if self.config["DISPLAY_LESS_PROGRESS"]:
26 | _timing.DISPLAY_LESS_PROGRESS = True
27 |
28 | @_timing.time
29 | def evaluate(self, dataset_list, metrics_list):
30 | """Evaluate a set of metrics on a set of datasets."""
31 | config = self.config
32 | metrics_list = metrics_list
33 | metric_names = utils.validate_metrics_list(metrics_list)
34 | dataset_names = [dataset.get_name() for dataset in dataset_list]
35 | output_res = {}
36 | output_msg = {}
37 |
38 | for dataset, dname in zip(dataset_list, dataset_names):
39 | # Get dataset info about what to evaluate
40 | output_res[dname] = {}
41 | output_msg[dname] = {}
42 | tracker_list, seq_list, class_list = dataset.get_eval_info()
43 | print(
44 | f"\nEvaluating {len(tracker_list)} tracker(s) on "
45 | f"{len(seq_list)} sequence(s) for {len(class_list)} class(es)"
46 | f" on {dname} dataset using the following "
47 | f'metrics: {", ".join(metric_names)}\n'
48 | )
49 |
50 | # Evaluate each tracker
51 | for tracker in tracker_list:
52 | try:
53 | output_res, output_msg = self.evaluate_tracker(
54 | tracker,
55 | dataset,
56 | dname,
57 | class_list,
58 | metrics_list,
59 | metric_names,
60 | seq_list,
61 | output_res,
62 | output_msg,
63 | )
64 | except Exception as err:
65 | output_res[dname][tracker] = None
66 | if type(err) == TrackEvalException:
67 | output_msg[dname][tracker] = str(err)
68 | else:
69 | output_msg[dname][tracker] = "Unknown error occurred."
70 | print("Tracker %s was unable to be evaluated." % tracker)
71 | print(err)
72 | traceback.print_exc()
73 | if config["LOG_ON_ERROR"] is not None:
74 | with open(config["LOG_ON_ERROR"], "a") as f:
75 | print(dname, file=f)
76 | print(tracker, file=f)
77 | print(traceback.format_exc(), file=f)
78 | print("\n\n\n", file=f)
79 | if config["BREAK_ON_ERROR"]:
80 | raise err
81 | elif config["RETURN_ON_ERROR"]:
82 | return output_res, output_msg
83 |
84 | return output_res, output_msg
85 |
86 | def evaluate_tracker(
87 | self,
88 | tracker,
89 | dataset,
90 | dname,
91 | class_list,
92 | metrics_list,
93 | metric_names,
94 | seq_list,
95 | output_res,
96 | output_msg,
97 | ):
98 | """Evaluate each sequence in parallel or in series."""
99 | print("\nEvaluating %s\n" % tracker)
100 | time_start = time.time()
101 | config = self.config
102 | if config["USE_PARALLEL"]:
103 | with Pool(config["NUM_PARALLEL_CORES"]) as pool:
104 | _eval_sequence = partial(
105 | eval_sequence,
106 | dataset=dataset,
107 | tracker=tracker,
108 | class_list=class_list,
109 | metrics_list=metrics_list,
110 | metric_names=metric_names,
111 | )
112 | results = pool.map(_eval_sequence, seq_list)
113 | res = dict(zip(seq_list, results))
114 | else:
115 | res = {}
116 | for curr_seq in sorted(seq_list):
117 | res[curr_seq] = eval_sequence(
118 | curr_seq, dataset, tracker, class_list, metrics_list, metric_names
119 | )
120 |
121 |
122 | # collecting combined cls keys (cls averaged, det averaged, super classes)
123 | cls_keys = []
124 | res["COMBINED_SEQ"] = {}
125 | # combine sequences for each class
126 | for c_cls in class_list:
127 | res["COMBINED_SEQ"][c_cls] = {}
128 | for metric, mname in zip(metrics_list, metric_names):
129 | curr_res = {
130 | seq_key: seq_value[c_cls][mname]
131 | for seq_key, seq_value in res.items()
132 | if seq_key != "COMBINED_SEQ"
133 | }
134 | # combine results over all sequences and then over all classes
135 | res["COMBINED_SEQ"][c_cls][mname] = metric.combine_sequences(curr_res)
136 |
137 | # combine classes
138 | if dataset.should_classes_combine:
139 | if config["OUTPUT_PER_SEQ_RES"]:
140 | video_keys = res.keys()
141 | else:
142 | video_keys = ["COMBINED_SEQ"]
143 | for v_key in video_keys:
144 | cls_keys += ["average"]
145 | res[v_key]["average"] = {}
146 | for metric, mname in zip(metrics_list, metric_names):
147 | cls_res = {
148 | cls_key: cls_value[mname]
149 | for cls_key, cls_value in res[v_key].items()
150 | if cls_key not in cls_keys
151 | }
152 | res[v_key]["average"][
153 | mname
154 | ] = metric.combine_classes_class_averaged(
155 | cls_res, ignore_empty=True
156 | )
157 |
158 | # combine classes to super classes
159 | if dataset.use_super_categories:
160 | for cat, sub_cats in dataset.super_categories.items():
161 | cls_keys.append(cat)
162 | res["COMBINED_SEQ"][cat] = {}
163 | for metric, mname in zip(metrics_list, metric_names):
164 | cat_res = {
165 | cls_key: cls_value[mname]
166 | for cls_key, cls_value in res["COMBINED_SEQ"].items()
167 | if cls_key in sub_cats
168 | }
169 | res["COMBINED_SEQ"][cat][
170 | mname
171 | ] = metric.combine_classes_det_averaged(cat_res)
172 | # Print and output results in various formats
173 | if config["TIME_PROGRESS"]:
174 | print(
175 | f"\nAll sequences for {tracker} finished in"
176 | f" {time.time() - time_start} seconds"
177 | )
178 | output_fol = dataset.get_output_fol(tracker)
179 | os.makedirs(output_fol, exist_ok=True)
180 |
181 | # take a mean of each field of each thr
182 | if config["OUTPUT_PER_SEQ_RES"]:
183 | all_res = copy.deepcopy(res)
184 | summary_keys = res.keys()
185 | else:
186 | all_res = copy.deepcopy(res["COMBINED_SEQ"])
187 | summary_keys = ["COMBINED_SEQ"]
188 | thr_key_list = [50]
189 | for s_key in summary_keys:
190 | for metric, mname in zip(metrics_list, metric_names):
191 | if mname != "TETA":
192 | if s_key == "COMBINED_SEQ":
193 | metric.print_table(
194 | {"COMBINED_SEQ": res["COMBINED_SEQ"][cls_keys[0]][mname]},
195 | tracker,
196 | cls_keys[0],
197 | )
198 | continue
199 |
200 | for c_cls in res[s_key].keys():
201 | for thr in thr_key_list:
202 | all_res[s_key][c_cls][mname][thr] = metric._summary_row(
203 | res[s_key][c_cls][mname][thr]
204 | )
205 | x = (
206 | np.array(list(all_res[s_key][c_cls]["TETA"].values()))
207 | .astype("float")
208 | .mean(axis=0)
209 | )
210 | all_res_summary = list(x.round(decimals=2).astype("str"))
211 | all_res[s_key][c_cls][mname]["ALL"] = all_res_summary
212 | if config["OUTPUT_SUMMARY"] and s_key == "COMBINED_SEQ":
213 | for t in thr_key_list:
214 | metric.print_summary_table(
215 | all_res[s_key][cls_keys[0]][mname][t],
216 | t,
217 | tracker,
218 | cls_keys[0],
219 | )
220 |
221 | if config["OUTPUT_TEM_RAW_DATA"]:
222 | out_file = os.path.join(output_fol, "teta_summary_results.pth")
223 | pickle.dump(all_res, open(out_file, "wb"))
224 | print("Saved the TETA summary results.")
225 |
226 | # output
227 | output_res[dname][mname] = all_res[s_key][cls_keys[0]][mname][t]
228 | output_msg[dname][tracker] = "Success"
229 |
230 | return output_res, output_msg
231 |
232 |
233 | @_timing.time
234 | def eval_sequence(seq, dataset, tracker, class_list, metrics_list, metric_names):
235 | """Function for evaluating a single sequence."""
236 | raw_data = dataset.get_raw_seq_data(tracker, seq)
237 | seq_res = {}
238 |
239 | if "TETA" in metric_names:
240 | thresholds = [50]
241 | data_all_class = dataset.get_preprocessed_seq_data(
242 | raw_data, "all", thresholds=thresholds
243 | )
244 | teta = metrics_list[metric_names.index("TETA")]
245 | assignment = teta.compute_global_assignment(data_all_class)
246 |
247 | # create a dict to save Cls_FP for each class in different thr.
248 | cls_fp = {
249 | key: {
250 | cls: np.zeros((len(np.arange(0.5, 0.99, 0.05)))) for cls in class_list
251 | }
252 | for key in thresholds
253 | }
254 |
255 | for cls in class_list:
256 | seq_res[cls] = {}
257 | data = dataset.get_preprocessed_seq_data(raw_data, cls, assignment, thresholds)
258 |
259 | for metric, mname in zip(metrics_list, metric_names):
260 | if mname == "TETA":
261 | seq_res[cls][mname], cls_fp, _ = metric.eval_sequence(
262 | data, cls, dataset.clsid2cls_name, cls_fp
263 | )
264 | else:
265 | seq_res[cls][mname] = metric.eval_sequence(data)
266 |
267 | if "TETA" in metric_names:
268 | for thr in thresholds:
269 | for cls in class_list:
270 | seq_res[cls]["TETA"][thr]["Cls_FP"] += cls_fp[thr][cls]
271 |
272 | return seq_res
273 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [2022] [Siyuan Li]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/teta/metrics/teta.py:
--------------------------------------------------------------------------------
1 | """Track Every Thing Accuracy metric."""
2 |
3 | import numpy as np
4 | from scipy.optimize import linear_sum_assignment
5 |
6 | from .. import _timing
7 | from ._base_metric import _BaseMetric
8 |
9 | EPS = np.finfo("float").eps # epsilon
10 |
11 |
12 | class TETA(_BaseMetric):
13 | """TETA metric."""
14 |
15 | def __init__(self, exhaustive=False, config=None):
16 | """Initialize metric."""
17 | super().__init__()
18 | self.plottable = True
19 | self.array_labels = np.arange(0.0, 0.99, 0.05)
20 | self.cls_array_labels = np.arange(0.5, 0.99, 0.05)
21 |
22 | self.integer_array_fields = [
23 | "Loc_TP",
24 | "Loc_FN",
25 | "Loc_FP",
26 | "Cls_TP",
27 | "Cls_FN",
28 | "Cls_FP",
29 | ]
30 | self.float_array_fields = (
31 | ["TETA", "LocA", "AssocA", "ClsA"]
32 | + ["LocRe", "LocPr"]
33 | + ["AssocRe", "AssocPr"]
34 | + ["ClsRe", "ClsPr"]
35 | )
36 | self.fields = self.float_array_fields + self.integer_array_fields
37 | self.summary_fields = self.float_array_fields
38 | self.exhaustive = exhaustive
39 |
40 | def compute_global_assignment(self, data_thr, alpha=0.5):
41 | """Compute global assignment of TP."""
42 | res = {
43 | thr: {t: {} for t in range(data_thr[thr]["num_timesteps"])}
44 | for thr in data_thr
45 | }
46 |
47 | for thr in data_thr:
48 | data = data_thr[thr]
49 | # return empty result if tracker or gt sequence is empty
50 | if data["num_tk_overlap_dets"] == 0 or data["num_gt_dets"] == 0:
51 | return res
52 |
53 | # global alignment score
54 | ga_score, _, _ = self.compute_global_alignment_score(data)
55 |
56 | # calculate scores for each timestep
57 | for t, (gt_ids_t, tk_ids_t) in enumerate(
58 | zip(data["gt_ids"], data["tk_ids"])
59 | ):
60 | # get matches optimizing for TETA
61 | amatch_rows, amatch_cols = self.compute_matches(
62 | data, t, ga_score, gt_ids_t, tk_ids_t, alpha=alpha
63 | )
64 | gt_ids = [data["gt_id_map"][tid] for tid in gt_ids_t[amatch_rows[0]]]
65 | matched_ids = [
66 | data["tk_id_map"][tid] for tid in tk_ids_t[amatch_cols[0]]
67 | ]
68 | res[thr][t] = dict(zip(gt_ids, matched_ids))
69 |
70 | return res
71 |
72 | def eval_sequence_single_thr(self, data, cls, cid2clsname, cls_fp_thr, thr):
73 | """Computes TETA metric for one threshold for one sequence."""
74 | res = {}
75 | class_info_list = []
76 | for field in self.float_array_fields + self.integer_array_fields:
77 | if field.startswith("Cls"):
78 | res[field] = np.zeros(len(self.cls_array_labels), dtype=float)
79 | else:
80 | res[field] = np.zeros((len(self.array_labels)), dtype=float)
81 |
82 | # return empty result if tracker or gt sequence is empty
83 | if data["num_tk_overlap_dets"] == 0:
84 | res["Loc_FN"] = data["num_gt_dets"] * np.ones(
85 | (len(self.array_labels)), dtype=float
86 | )
87 | if self.exhaustive:
88 | cls_fp_thr[cls] = data["num_tk_cls_dets"] * np.ones(
89 | (len(self.cls_array_labels)), dtype=float
90 | )
91 | res = self._compute_final_fields(res)
92 | return res, cls_fp_thr, class_info_list
93 |
94 | if data["num_gt_dets"] == 0:
95 | if self.exhaustive:
96 | cls_fp_thr[cls] = data["num_tk_cls_dets"] * np.ones(
97 | (len(self.cls_array_labels)), dtype=float
98 | )
99 | res = self._compute_final_fields(res)
100 | return res, cls_fp_thr, class_info_list
101 |
102 | # global alignment score
103 | ga_score, gt_id_count, tk_id_count = self.compute_global_alignment_score(data)
104 | matches_counts = [np.zeros_like(ga_score) for _ in self.array_labels]
105 |
106 | # calculate scores for each timestep
107 | for t, (gt_ids_t, tk_ids_t, tk_overlap_ids_t, tk_cls_ids_t) in enumerate(
108 | zip(
109 | data["gt_ids"],
110 | data["tk_ids"],
111 | data["tk_overlap_ids"],
112 | data["tk_class_eval_tk_ids"],
113 | )
114 | ):
115 | # deal with the case that there are no gt_det/tk_det in a timestep
116 | if len(gt_ids_t) == 0:
117 | if self.exhaustive:
118 | cls_fp_thr[cls] += len(tk_cls_ids_t)
119 | continue
120 |
121 | # get matches optimizing for TETA
122 | amatch_rows, amatch_cols = self.compute_matches(
123 | data, t, ga_score, gt_ids_t, tk_ids_t, list(self.array_labels)
124 | )
125 |
126 | # map overlap_ids to original ids.
127 | if len(tk_overlap_ids_t) != 0:
128 | sorter = np.argsort(tk_ids_t)
129 | indexes = sorter[
130 | np.searchsorted(tk_ids_t, tk_overlap_ids_t, sorter=sorter)
131 | ]
132 | sim_t = data["sim_scores"][t][:, indexes]
133 | fpl_candidates = tk_overlap_ids_t[(sim_t >= (thr / 100)).any(axis=0)]
134 | fpl_candidates_ori_ids_t = np.array(
135 | [data["tk_id_map"][tid] for tid in fpl_candidates]
136 | )
137 | else:
138 | fpl_candidates_ori_ids_t = []
139 |
140 | if self.exhaustive:
141 | cls_fp_thr[cls] += len(tk_cls_ids_t) - len(tk_overlap_ids_t)
142 |
143 | # calculate and accumulate basic statistics
144 | for a, alpha in enumerate(self.array_labels):
145 | match_row, match_col = amatch_rows[a], amatch_cols[a]
146 | num_matches = len(match_row)
147 | matched_ori_ids = set(
148 | [data["tk_id_map"][tid] for tid in tk_ids_t[match_col]]
149 | )
150 | match_tk_cls = data["tk_classes"][t][match_col]
151 | wrong_tk_cls = match_tk_cls[match_tk_cls != data["gt_classes"][t]]
152 |
153 | num_class_and_det_matches = np.sum(
154 | match_tk_cls == data["gt_classes"][t]
155 | )
156 |
157 | if alpha >= 0.5:
158 | for cid in wrong_tk_cls:
159 | if cid in cid2clsname:
160 | cname = cid2clsname[cid]
161 | cls_fp_thr[cname][a - 10] += 1
162 | res["Cls_TP"][a - 10] += num_class_and_det_matches
163 | res["Cls_FN"][a - 10] += num_matches - num_class_and_det_matches
164 |
165 | res["Loc_TP"][a] += num_matches
166 | res["Loc_FN"][a] += len(gt_ids_t) - num_matches
167 | res["Loc_FP"][a] += len(set(fpl_candidates_ori_ids_t) - matched_ori_ids)
168 |
169 | if num_matches > 0:
170 | matches_counts[a][gt_ids_t[match_row], tk_ids_t[match_col]] += 1
171 |
172 | # calculate AssocA, AssocRe, AssocPr
173 | self.compute_association_scores(res, matches_counts, gt_id_count, tk_id_count)
174 |
175 | # calculate final scores
176 | res = self._compute_final_fields(res)
177 | return res, cls_fp_thr, class_info_list
178 |
179 | def compute_global_alignment_score(self, data):
180 | """Computes global alignment score."""
181 | num_matches = np.zeros((data["num_gt_ids"], data["num_tk_ids"]))
182 | gt_id_count = np.zeros((data["num_gt_ids"], 1))
183 | tk_id_count = np.zeros((1, data["num_tk_ids"]))
184 |
185 | # loop through each timestep and accumulate global track info.
186 | for t, (gt_ids_t, tk_ids_t) in enumerate(zip(data["gt_ids"], data["tk_ids"])):
187 | # count potential matches between ids in each time step
188 | # these are normalized, weighted by match similarity
189 | sim = data["sim_scores"][t]
190 | sim_iou_denom = sim.sum(0, keepdims=True) + sim.sum(1, keepdims=True) - sim
191 | sim_iou = np.zeros_like(sim)
192 | mask = sim_iou_denom > (0 + EPS)
193 | sim_iou[mask] = sim[mask] / sim_iou_denom[mask]
194 | num_matches[gt_ids_t[:, None], tk_ids_t[None, :]] += sim_iou
195 |
196 | # calculate total number of dets for each gt_id and tk_id.
197 | gt_id_count[gt_ids_t] += 1
198 | tk_id_count[0, tk_ids_t] += 1
199 |
200 | # Calculate overall Jaccard alignment score between IDs
201 | ga_score = num_matches / (gt_id_count + tk_id_count - num_matches)
202 | return ga_score, gt_id_count, tk_id_count
203 |
204 | def compute_matches(self, data, t, ga_score, gt_ids, tk_ids, alpha):
205 | """Compute matches based on alignment score."""
206 | sim = data["sim_scores"][t]
207 | score_mat = ga_score[gt_ids[:, None], tk_ids[None, :]] * sim
208 | # Hungarian algorithm to find best matches
209 | match_rows, match_cols = linear_sum_assignment(-score_mat)
210 |
211 | if not isinstance(alpha, list):
212 | alpha = [alpha]
213 | alpha_match_rows, alpha_match_cols = [], []
214 | for a in alpha:
215 | matched_mask = sim[match_rows, match_cols] >= a - EPS
216 | alpha_match_rows.append(match_rows[matched_mask])
217 | alpha_match_cols.append(match_cols[matched_mask])
218 | return alpha_match_rows, alpha_match_cols
219 |
220 | def compute_association_scores(self, res, matches_counts, gt_id_count, tk_id_count):
221 | """Calculate association scores for each alpha.
222 |
223 | First calculate scores per gt_id/tk_id combo,
224 | and then average over the number of detections.
225 | """
226 | for a, _ in enumerate(self.array_labels):
227 | matches_count = matches_counts[a]
228 | ass_a = matches_count / np.maximum(
229 | 1, gt_id_count + tk_id_count - matches_count
230 | )
231 | res["AssocA"][a] = np.sum(matches_count * ass_a) / np.maximum(
232 | 1, res["Loc_TP"][a]
233 | )
234 | ass_re = matches_count / np.maximum(1, gt_id_count)
235 | res["AssocRe"][a] = np.sum(matches_count * ass_re) / np.maximum(
236 | 1, res["Loc_TP"][a]
237 | )
238 | ass_pr = matches_count / np.maximum(1, tk_id_count)
239 | res["AssocPr"][a] = np.sum(matches_count * ass_pr) / np.maximum(
240 | 1, res["Loc_TP"][a]
241 | )
242 |
243 | @_timing.time
244 | def eval_sequence(self, data, cls, cls_id_name_mapping, cls_fp):
245 | """Evaluate a single sequence across all thresholds."""
246 | res = {}
247 | class_info_dict = {}
248 |
249 | for thr in data:
250 | res[thr], cls_fp[thr], cls_info = self.eval_sequence_single_thr(
251 | data[thr], cls, cls_id_name_mapping, cls_fp[thr], thr
252 | )
253 | class_info_dict[thr] = cls_info
254 |
255 | return res, cls_fp, class_info_dict
256 |
257 | def combine_sequences(self, all_res):
258 | """Combines metrics across all sequences."""
259 | data = {}
260 | res = {}
261 |
262 | if all_res:
263 | thresholds = list(list(all_res.values())[0].keys())
264 | else:
265 | thresholds = [50]
266 | for thr in thresholds:
267 | data[thr] = {}
268 | for seq_key in all_res:
269 | data[thr][seq_key] = all_res[seq_key][thr]
270 | for thr in thresholds:
271 | res[thr] = self._combine_sequences_thr(data[thr])
272 |
273 | return res
274 |
275 | def _combine_sequences_thr(self, all_res):
276 | """Combines sequences over each threshold."""
277 | res = {}
278 | for field in self.integer_array_fields:
279 | res[field] = self._combine_sum(all_res, field)
280 | for field in ["AssocRe", "AssocPr", "AssocA"]:
281 | res[field] = self._combine_weighted_av(
282 | all_res, field, res, weight_field="Loc_TP"
283 | )
284 | res = self._compute_final_fields(res)
285 | return res
286 |
287 | def combine_classes_class_averaged(self, all_res, ignore_empty=False):
288 | """Combines metrics across all classes by averaging over classes.
289 |
290 | If 'ignore_empty' is True, then it only sums over classes
291 | with at least one gt or predicted detection.
292 | """
293 | data = {}
294 | res = {}
295 | if all_res:
296 | thresholds = list(list(all_res.values())[0].keys())
297 | else:
298 | thresholds = [50]
299 | for thr in thresholds:
300 | data[thr] = {}
301 | for cls_key in all_res:
302 | data[thr][cls_key] = all_res[cls_key][thr]
303 | for thr in data:
304 | res[thr] = self._combine_classes_class_averaged_thr(
305 | data[thr], ignore_empty=ignore_empty
306 | )
307 | return res
308 |
309 | def _combine_classes_class_averaged_thr(self, all_res, ignore_empty=False):
310 | """Combines classes over each threshold."""
311 | res = {}
312 |
313 | def check_empty(val):
314 | """Returns True if empty."""
315 | return not (val["Loc_TP"] + val["Loc_FN"] + val["Loc_FP"] > 0 + EPS).any()
316 |
317 | for field in self.integer_array_fields:
318 | if ignore_empty:
319 | res_field = {k: v for k, v in all_res.items() if not check_empty(v)}
320 | else:
321 | res_field = {k: v for k, v in all_res.items()}
322 | res[field] = self._combine_sum(res_field, field)
323 |
324 | for field in self.float_array_fields:
325 | if ignore_empty:
326 | res_field = [v[field] for v in all_res.values() if not check_empty(v)]
327 | else:
328 | res_field = [v[field] for v in all_res.values()]
329 | res[field] = np.mean(res_field, axis=0)
330 | return res
331 |
332 | def combine_classes_det_averaged(self, all_res):
333 | """Combines metrics across all classes by averaging over detections."""
334 | data = {}
335 | res = {}
336 | if all_res:
337 | thresholds = list(list(all_res.values())[0].keys())
338 | else:
339 | thresholds = [50]
340 | for thr in thresholds:
341 | data[thr] = {}
342 | for cls_key in all_res:
343 | data[thr][cls_key] = all_res[cls_key][thr]
344 | for thr in data:
345 | res[thr] = self._combine_classes_det_averaged_thr(data[thr])
346 | return res
347 |
348 | def _combine_classes_det_averaged_thr(self, all_res):
349 | """Combines detections over each threshold."""
350 | res = {}
351 | for field in self.integer_array_fields:
352 | res[field] = self._combine_sum(all_res, field)
353 | for field in ["AssocRe", "AssocPr", "AssocA"]:
354 | res[field] = self._combine_weighted_av(
355 | all_res, field, res, weight_field="Loc_TP"
356 | )
357 | res = self._compute_final_fields(res)
358 | return res
359 |
360 | @staticmethod
361 | def _compute_final_fields(res):
362 | """Calculate final metric values.
363 |
364 | This function is used both for both per-sequence calculation,
365 | and in combining values across sequences.
366 | """
367 | # LocA
368 | res["LocRe"] = res["Loc_TP"] / np.maximum(1, res["Loc_TP"] + res["Loc_FN"])
369 | res["LocPr"] = res["Loc_TP"] / np.maximum(1, res["Loc_TP"] + res["Loc_FP"])
370 | res["LocA"] = res["Loc_TP"] / np.maximum(
371 | 1, res["Loc_TP"] + res["Loc_FN"] + res["Loc_FP"]
372 | )
373 |
374 | # ClsA
375 | res["ClsRe"] = res["Cls_TP"] / np.maximum(1, res["Cls_TP"] + res["Cls_FN"])
376 | res["ClsPr"] = res["Cls_TP"] / np.maximum(1, res["Cls_TP"] + res["Cls_FP"])
377 | res["ClsA"] = res["Cls_TP"] / np.maximum(
378 | 1, res["Cls_TP"] + res["Cls_FN"] + res["Cls_FP"]
379 | )
380 |
381 | res["ClsRe"] = np.mean(res["ClsRe"])
382 | res["ClsPr"] = np.mean(res["ClsPr"])
383 | res["ClsA"] = np.mean(res["ClsA"])
384 |
385 | res["TETA"] = (res["LocA"] + res["AssocA"] + res["ClsA"]) / 3
386 |
387 | return res
388 |
389 | def print_summary_table(self, thr_res, thr, tracker, cls):
390 | """Prints summary table of results."""
391 | print("")
392 | metric_name = self.get_name()
393 | self._row_print(
394 | [f"{metric_name}{str(thr)}: {tracker}-{cls}"] + self.summary_fields
395 | )
396 | self._row_print(["COMBINED"] + thr_res)
397 |
--------------------------------------------------------------------------------
/teta/datasets/_base_dataset.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import io
3 | import os
4 | import traceback
5 | import zipfile
6 | from abc import ABC, abstractmethod
7 | from copy import deepcopy
8 |
9 | import numpy as np
10 |
11 | from .. import _timing
12 | from ..utils import TrackEvalException
13 |
14 |
15 | class _BaseDataset(ABC):
16 | @abstractmethod
17 | def __init__(self):
18 | self.tracker_list = None
19 | self.seq_list = None
20 | self.class_list = None
21 | self.output_fol = None
22 | self.output_sub_fol = None
23 | self.should_classes_combine = True
24 | self.use_super_categories = False
25 |
26 | # Functions to implement:
27 |
28 | @abstractmethod
29 | def _load_raw_file(self, tracker, seq, is_gt):
30 | ...
31 |
32 | @_timing.time
33 | @abstractmethod
34 | def get_preprocessed_seq_data(self, raw_data, cls):
35 | ...
36 |
37 | @abstractmethod
38 | def _calculate_similarities(self, gt_dets_t, tracker_dets_t):
39 | ...
40 |
41 | # Helper functions for all datasets:
42 |
43 | @classmethod
44 | def get_class_name(cls):
45 | return cls.__name__
46 |
47 | def get_name(self):
48 | return self.get_class_name()
49 |
50 | def get_output_fol(self, tracker):
51 | return os.path.join(self.output_fol, tracker, self.output_sub_fol)
52 |
53 | def get_display_name(self, tracker):
54 | """Can be overwritten if the trackers name (in files) is different to how it should be displayed.
55 | By default this method just returns the trackers name as is.
56 | """
57 | return tracker
58 |
59 | def get_eval_info(self):
60 | """Return info about the dataset needed for the Evaluator"""
61 | return self.tracker_list, self.seq_list, self.class_list
62 |
63 | @_timing.time
64 | def get_raw_seq_data(self, tracker, seq):
65 | """Loads raw data (tracker and ground-truth) for a single tracker on a single sequence.
66 | Raw data includes all of the information needed for both preprocessing and evaluation, for all classes.
67 | A later function (get_processed_seq_data) will perform such preprocessing and extract relevant information for
68 | the evaluation of each class.
69 |
70 | This returns a dict which contains the fields:
71 | [num_timesteps]: integer
72 | [gt_ids, tracker_ids, gt_classes, tracker_classes, tracker_confidences]:
73 | list (for each timestep) of 1D NDArrays (for each det).
74 | [gt_dets, tracker_dets, gt_crowd_ignore_regions]: list (for each timestep) of lists of detections.
75 | [similarity_scores]: list (for each timestep) of 2D NDArrays.
76 | [gt_extras]: dict (for each extra) of lists (for each timestep) of 1D NDArrays (for each det).
77 |
78 | gt_extras contains dataset specific information used for preprocessing such as occlusion and truncation levels.
79 |
80 | Note that similarities are extracted as part of the dataset and not the metric, because almost all metrics are
81 | independent of the exact method of calculating the similarity. However datasets are not (e.g. segmentation
82 | masks vs 2D boxes vs 3D boxes).
83 | We calculate the similarity before preprocessing because often both preprocessing and evaluation require it and
84 | we don't wish to calculate this twice.
85 | We calculate similarity between all gt and tracker classes (not just each class individually) to allow for
86 | calculation of metrics such as class confusion matrices. Typically the impact of this on performance is low.
87 | """
88 | # Load raw data.
89 | raw_gt_data = self._load_raw_file(tracker, seq, is_gt=True)
90 | raw_tracker_data = self._load_raw_file(tracker, seq, is_gt=False)
91 | raw_data = {**raw_tracker_data, **raw_gt_data} # Merges dictionaries
92 |
93 | # Calculate similarities for each timestep.
94 | similarity_scores = []
95 | for _, (gt_dets_t, tracker_dets_t) in enumerate(
96 | zip(raw_data["gt_dets"], raw_data["tk_dets"])
97 | ):
98 | ious = self._calculate_similarities(gt_dets_t, tracker_dets_t)
99 | similarity_scores.append(ious)
100 | raw_data["similarity_scores"] = similarity_scores
101 | return raw_data
102 |
103 | @staticmethod
104 | def _load_simple_text_file(
105 | file,
106 | time_col=0,
107 | id_col=None,
108 | remove_negative_ids=False,
109 | valid_filter=None,
110 | crowd_ignore_filter=None,
111 | convert_filter=None,
112 | is_zipped=False,
113 | zip_file=None,
114 | force_delimiters=None,
115 | ):
116 | """Function that loads data which is in a commonly used text file format.
117 | Assumes each det is given by one row of a text file.
118 | There is no limit to the number or meaning of each column,
119 | however one column needs to give the timestep of each det (time_col) which is default col 0.
120 |
121 | The file dialect (deliminator, num cols, etc) is determined automatically.
122 | This function automatically separates dets by timestep,
123 | and is much faster than alternatives such as np.loadtext or pandas.
124 |
125 | If remove_negative_ids is True and id_col is not None, dets with negative values in id_col are excluded.
126 | These are not excluded from ignore data.
127 |
128 | valid_filter can be used to only include certain classes.
129 | It is a dict with ints as keys, and lists as values,
130 | such that a row is included if "row[key].lower() is in value" for all key/value pairs in the dict.
131 | If None, all classes are included.
132 |
133 | crowd_ignore_filter can be used to read crowd_ignore regions separately. It has the same format as valid filter.
134 |
135 | convert_filter can be used to convert value read to another format.
136 | This is used most commonly to convert classes given as string to a class id.
137 | This is a dict such that the key is the column to convert, and the value is another dict giving the mapping.
138 |
139 | Optionally, input files could be a zip of multiple text files for storage efficiency.
140 |
141 | Returns read_data and ignore_data.
142 | Each is a dict (with keys as timesteps as strings) of lists (over dets) of lists (over column values).
143 | Note that all data is returned as strings, and must be converted to float/int later if needed.
144 | Note that timesteps will not be present in the returned dict keys if there are no dets for them
145 | """
146 |
147 | if remove_negative_ids and id_col is None:
148 | raise TrackEvalException(
149 | "remove_negative_ids is True, but id_col is not given."
150 | )
151 | if crowd_ignore_filter is None:
152 | crowd_ignore_filter = {}
153 | if convert_filter is None:
154 | convert_filter = {}
155 | try:
156 | if is_zipped: # Either open file directly or within a zip.
157 | if zip_file is None:
158 | raise TrackEvalException(
159 | "is_zipped set to True, but no zip_file is given."
160 | )
161 | archive = zipfile.ZipFile(os.path.join(zip_file), "r")
162 | fp = io.TextIOWrapper(archive.open(file, "r"))
163 | else:
164 | fp = open(file)
165 | read_data = {}
166 | crowd_ignore_data = {}
167 | fp.seek(0, os.SEEK_END)
168 | # check if file is empty
169 | if fp.tell():
170 | fp.seek(0)
171 | dialect = csv.Sniffer().sniff(
172 | fp.readline(), delimiters=force_delimiters
173 | ) # Auto determine structure.
174 | dialect.skipinitialspace = (
175 | True # Deal with extra spaces between columns
176 | )
177 | fp.seek(0)
178 | reader = csv.reader(fp, dialect)
179 | for row in reader:
180 | try:
181 | # Deal with extra trailing spaces at the end of rows
182 | if row[-1] in "":
183 | row = row[:-1]
184 | timestep = str(int(float(row[time_col])))
185 | # Read ignore regions separately.
186 | is_ignored = False
187 | for ignore_key, ignore_value in crowd_ignore_filter.items():
188 | if row[ignore_key].lower() in ignore_value:
189 | # Convert values in one column (e.g. string to id)
190 | for (
191 | convert_key,
192 | convert_value,
193 | ) in convert_filter.items():
194 | row[convert_key] = convert_value[
195 | row[convert_key].lower()
196 | ]
197 | # Save data separated by timestep.
198 | if timestep in crowd_ignore_data.keys():
199 | crowd_ignore_data[timestep].append(row)
200 | else:
201 | crowd_ignore_data[timestep] = [row]
202 | is_ignored = True
203 | if (
204 | is_ignored
205 | ): # if det is an ignore region, it cannot be a normal det.
206 | continue
207 | # Exclude some dets if not valid.
208 | if valid_filter is not None:
209 | for key, value in valid_filter.items():
210 | if row[key].lower() not in value:
211 | continue
212 | if remove_negative_ids:
213 | if int(float(row[id_col])) < 0:
214 | continue
215 | # Convert values in one column (e.g. string to id)
216 | for convert_key, convert_value in convert_filter.items():
217 | row[convert_key] = convert_value[row[convert_key].lower()]
218 | # Save data separated by timestep.
219 | if timestep in read_data.keys():
220 | read_data[timestep].append(row)
221 | else:
222 | read_data[timestep] = [row]
223 | except Exception:
224 | exc_str_init = (
225 | "In file %s the following line cannot be read correctly: \n"
226 | % os.path.basename(file)
227 | )
228 | exc_str = " ".join([exc_str_init] + row)
229 | raise TrackEvalException(exc_str)
230 | fp.close()
231 | except Exception:
232 | print("Error loading file: %s, printing traceback." % file)
233 | traceback.print_exc()
234 | raise TrackEvalException(
235 | "File %s cannot be read because it is either not present or invalidly formatted"
236 | % os.path.basename(file)
237 | )
238 | return read_data, crowd_ignore_data
239 |
240 | @staticmethod
241 | def _calculate_mask_ious(masks1, masks2, is_encoded=False, do_ioa=False):
242 | """Calculates the IOU (intersection over union) between two arrays of segmentation masks.
243 | If is_encoded a run length encoding with pycocotools is assumed as input format, otherwise an input of numpy
244 | arrays of the shape (num_masks, height, width) is assumed and the encoding is performed.
245 | If do_ioa (intersection over area) , then calculates the intersection over the area of masks1 - this is commonly
246 | used to determine if detections are within crowd ignore region.
247 | :param masks1: first set of masks (numpy array of shape (num_masks, height, width) if not encoded,
248 | else pycocotools rle encoded format)
249 | :param masks2: second set of masks (numpy array of shape (num_masks, height, width) if not encoded,
250 | else pycocotools rle encoded format)
251 | :param is_encoded: whether the input is in pycocotools rle encoded format
252 | :param do_ioa: whether to perform IoA computation
253 | :return: the IoU/IoA scores
254 | """
255 |
256 | # Only loaded when run to reduce minimum requirements
257 | from pycocotools import mask as mask_utils
258 |
259 | # use pycocotools for run length encoding of masks
260 | if not is_encoded:
261 | masks1 = mask_utils.encode(
262 | np.array(np.transpose(masks1, (1, 2, 0)), order="F")
263 | )
264 | masks2 = mask_utils.encode(
265 | np.array(np.transpose(masks2, (1, 2, 0)), order="F")
266 | )
267 |
268 | # use pycocotools for iou computation of rle encoded masks
269 | ious = mask_utils.iou(masks1, masks2, [do_ioa] * len(masks2))
270 | if len(masks1) == 0 or len(masks2) == 0:
271 | ious = np.asarray(ious).reshape(len(masks1), len(masks2))
272 | assert (ious >= 0 - np.finfo("float").eps).all()
273 | assert (ious <= 1 + np.finfo("float").eps).all()
274 |
275 | return ious
276 |
277 | @staticmethod
278 | def _calculate_box_ious(bboxes1, bboxes2, box_format="xywh", do_ioa=False):
279 | """Calculates the IOU (intersection over union) between two arrays of boxes.
280 | Allows variable box formats ('xywh' and 'x0y0x1y1').
281 | If do_ioa (intersection over area) , then calculates the intersection over the area of boxes1 - this is commonly
282 | used to determine if detections are within crowd ignore region.
283 | """
284 | if box_format in "xywh":
285 | # layout: (x0, y0, w, h)
286 | bboxes1 = deepcopy(bboxes1)
287 | bboxes2 = deepcopy(bboxes2)
288 |
289 | bboxes1[:, 2] = bboxes1[:, 0] + bboxes1[:, 2]
290 | bboxes1[:, 3] = bboxes1[:, 1] + bboxes1[:, 3]
291 | bboxes2[:, 2] = bboxes2[:, 0] + bboxes2[:, 2]
292 | bboxes2[:, 3] = bboxes2[:, 1] + bboxes2[:, 3]
293 | elif box_format not in "x0y0x1y1":
294 | raise (TrackEvalException("box_format %s is not implemented" % box_format))
295 |
296 | # layout: (x0, y0, x1, y1)
297 | min_ = np.minimum(bboxes1[:, np.newaxis, :], bboxes2[np.newaxis, :, :])
298 | max_ = np.maximum(bboxes1[:, np.newaxis, :], bboxes2[np.newaxis, :, :])
299 | intersection = np.maximum(min_[..., 2] - max_[..., 0], 0) * np.maximum(
300 | min_[..., 3] - max_[..., 1], 0
301 | )
302 | area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
303 | bboxes1[..., 3] - bboxes1[..., 1]
304 | )
305 |
306 | if do_ioa:
307 | ioas = np.zeros_like(intersection)
308 | valid_mask = area1 > 0 + np.finfo("float").eps
309 | ioas[valid_mask, :] = (
310 | intersection[valid_mask, :] / area1[valid_mask][:, np.newaxis]
311 | )
312 |
313 | return ioas
314 | else:
315 | area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
316 | bboxes2[..., 3] - bboxes2[..., 1]
317 | )
318 | union = area1[:, np.newaxis] + area2[np.newaxis, :] - intersection
319 | intersection[area1 <= 0 + np.finfo("float").eps, :] = 0
320 | intersection[:, area2 <= 0 + np.finfo("float").eps] = 0
321 | intersection[union <= 0 + np.finfo("float").eps] = 0
322 | union[union <= 0 + np.finfo("float").eps] = 1
323 | ious = intersection / union
324 | return ious
325 |
326 | @staticmethod
327 | def _calculate_euclidean_similarity(dets1, dets2, zero_distance=2.0):
328 | """Calculates the euclidean distance between two sets of detections, and then converts this into a similarity
329 | measure with values between 0 and 1 using the following formula: sim = max(0, 1 - dist/zero_distance).
330 | The default zero_distance of 2.0, corresponds to the default used in MOT15_3D, such that a 0.5 similarity
331 | threshold corresponds to a 1m distance threshold for TPs.
332 | """
333 | dist = np.linalg.norm(dets1[:, np.newaxis] - dets2[np.newaxis, :], axis=2)
334 | sim = np.maximum(0, 1 - dist / zero_distance)
335 | return sim
336 |
337 | @staticmethod
338 | def _check_unique_ids(data, after_preproc=False):
339 | """Check the requirement that the tracker_ids and gt_ids are unique per timestep"""
340 | gt_ids = data["gt_ids"]
341 | tracker_ids = data["tk_ids"]
342 | for t, (gt_ids_t, tracker_ids_t) in enumerate(zip(gt_ids, tracker_ids)):
343 | if len(tracker_ids_t) > 0:
344 | unique_ids, counts = np.unique(tracker_ids_t, return_counts=True)
345 | if np.max(counts) != 1:
346 | duplicate_ids = unique_ids[counts > 1]
347 | exc_str_init = (
348 | "Tracker predicts the same ID more than once in a single timestep "
349 | "(seq: %s, frame: %i, ids:" % (data["seq"], t + 1)
350 | )
351 | exc_str = (
352 | " ".join([exc_str_init] + [str(d) for d in duplicate_ids]) + ")"
353 | )
354 | if after_preproc:
355 | exc_str_init += (
356 | "\n Note that this error occurred after preprocessing (but not before), "
357 | "so ids may not be as in file, and something seems wrong with preproc."
358 | )
359 | raise TrackEvalException(exc_str)
360 | if len(gt_ids_t) > 0:
361 | unique_ids, counts = np.unique(gt_ids_t, return_counts=True)
362 | if np.max(counts) != 1:
363 | duplicate_ids = unique_ids[counts > 1]
364 | exc_str_init = (
365 | "Ground-truth has the same ID more than once in a single timestep "
366 | "(seq: %s, frame: %i, ids:" % (data["seq"], t + 1)
367 | )
368 | exc_str = (
369 | " ".join([exc_str_init] + [str(d) for d in duplicate_ids]) + ")"
370 | )
371 | if after_preproc:
372 | exc_str_init += (
373 | "\n Note that this error occurred after preprocessing (but not before), "
374 | "so ids may not be as in file, and something seems wrong with preproc."
375 | )
376 | raise TrackEvalException(exc_str)
377 |
--------------------------------------------------------------------------------
/teta/datasets/kitti_mots.py:
--------------------------------------------------------------------------------
1 | import os
2 | import csv
3 | import numpy as np
4 | from scipy.optimize import linear_sum_assignment
5 | from ._base_dataset import _BaseDataset
6 | from .. import utils
7 | from .. import _timing
8 | from ..utils import TrackEvalException
9 |
10 |
11 | class KittiMOTS(_BaseDataset):
12 | """Dataset class for KITTI MOTS tracking"""
13 |
14 | @staticmethod
15 | def get_default_dataset_config():
16 | """Default class config values"""
17 | code_path = utils.get_code_path()
18 | default_config = {
19 | 'GT_FOLDER': os.path.join(code_path, 'data/gt/kitti/kitti_mots_val'), # Location of GT data
20 | 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/kitti/kitti_mots_val'), # Trackers location
21 | 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
22 | 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)
23 | 'CLASSES_TO_EVAL': ['car', 'pedestrian'], # Valid: ['car', 'pedestrian']
24 | 'SPLIT_TO_EVAL': 'val', # Valid: 'training', 'val'
25 | 'INPUT_AS_ZIP': False, # Whether tracker input files are zipped
26 | 'PRINT_CONFIG': True, # Whether to print current config
27 | 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
28 | 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
29 | 'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL
30 | 'SEQMAP_FOLDER': None, # Where seqmaps are found (if None, GT_FOLDER)
31 | 'SEQMAP_FILE': None, # Directly specify seqmap file (if none use seqmap_folder/split_to_eval.seqmap)
32 | 'SEQ_INFO': None, # If not None, directly specify sequences to eval and their number of timesteps
33 | 'GT_LOC_FORMAT': '{gt_folder}/label_02/{seq}.txt', # format of gt localization
34 | }
35 | return default_config
36 |
37 | def __init__(self, config=None):
38 | """Initialise dataset, checking that all required files are present"""
39 | super().__init__()
40 | # Fill non-given config values with defaults
41 | self.config = utils.init_config(config, self.get_default_dataset_config(), self.get_name())
42 | self.gt_fol = self.config['GT_FOLDER']
43 | self.tracker_fol = self.config['TRACKERS_FOLDER']
44 | self.split_to_eval = self.config['SPLIT_TO_EVAL']
45 | self.should_classes_combine = False
46 | self.use_super_categories = False
47 | self.data_is_zipped = self.config['INPUT_AS_ZIP']
48 |
49 | self.output_fol = self.config['OUTPUT_FOLDER']
50 | if self.output_fol is None:
51 | self.output_fol = self.tracker_fol
52 |
53 | self.tracker_sub_fol = self.config['TRACKER_SUB_FOLDER']
54 | self.output_sub_fol = self.config['OUTPUT_SUB_FOLDER']
55 |
56 | # Get classes to eval
57 | self.valid_classes = ['car', 'pedestrian']
58 | self.class_list = [cls.lower() if cls.lower() in self.valid_classes else None
59 | for cls in self.config['CLASSES_TO_EVAL']]
60 | if not all(self.class_list):
61 | raise TrackEvalException('Attempted to evaluate an invalid class. '
62 | 'Only classes [car, pedestrian] are valid.')
63 | self.class_name_to_class_id = {'car': '1', 'pedestrian': '2', 'ignore': '10'}
64 |
65 | # Get sequences to eval and check gt files exist
66 | self.seq_list, self.seq_lengths = self._get_seq_info()
67 | if len(self.seq_list) < 1:
68 | raise TrackEvalException('No sequences are selected to be evaluated.')
69 |
70 | # Check gt files exist
71 | for seq in self.seq_list:
72 | if not self.data_is_zipped:
73 | curr_file = self.config["GT_LOC_FORMAT"].format(gt_folder=self.gt_fol, seq=seq)
74 | if not os.path.isfile(curr_file):
75 | print('GT file not found ' + curr_file)
76 | raise TrackEvalException('GT file not found for sequence: ' + seq)
77 | if self.data_is_zipped:
78 | curr_file = os.path.join(self.gt_fol, 'data.zip')
79 | if not os.path.isfile(curr_file):
80 | raise TrackEvalException('GT file not found: ' + os.path.basename(curr_file))
81 |
82 | # Get trackers to eval
83 | if self.config['TRACKERS_TO_EVAL'] is None:
84 | self.tracker_list = os.listdir(self.tracker_fol)
85 | else:
86 | self.tracker_list = self.config['TRACKERS_TO_EVAL']
87 |
88 | if self.config['TRACKER_DISPLAY_NAMES'] is None:
89 | self.tracker_to_disp = dict(zip(self.tracker_list, self.tracker_list))
90 | elif (self.config['TRACKERS_TO_EVAL'] is not None) and (
91 | len(self.config['TRACKER_DISPLAY_NAMES']) == len(self.tracker_list)):
92 | self.tracker_to_disp = dict(zip(self.tracker_list, self.config['TRACKER_DISPLAY_NAMES']))
93 | else:
94 | raise TrackEvalException('List of tracker files and tracker display names do not match.')
95 |
96 | for tracker in self.tracker_list:
97 | if self.data_is_zipped:
98 | curr_file = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol + '.zip')
99 | if not os.path.isfile(curr_file):
100 | print('Tracker file not found: ' + curr_file)
101 | raise TrackEvalException('Tracker file not found: ' + tracker + '/' + os.path.basename(curr_file))
102 | else:
103 | for seq in self.seq_list:
104 | curr_file = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol, seq + '.txt')
105 | if not os.path.isfile(curr_file):
106 | print('Tracker file not found: ' + curr_file)
107 | raise TrackEvalException(
108 | 'Tracker file not found: ' + tracker + '/' + self.tracker_sub_fol + '/' + os.path.basename(
109 | curr_file))
110 |
111 | def get_display_name(self, tracker):
112 | return self.tracker_to_disp[tracker]
113 |
114 | def _get_seq_info(self):
115 | seq_list = []
116 | seq_lengths = {}
117 | seqmap_name = 'evaluate_mots.seqmap.' + self.config['SPLIT_TO_EVAL']
118 |
119 | if self.config["SEQ_INFO"]:
120 | seq_list = list(self.config["SEQ_INFO"].keys())
121 | seq_lengths = self.config["SEQ_INFO"]
122 | else:
123 | if self.config["SEQMAP_FILE"]:
124 | seqmap_file = self.config["SEQMAP_FILE"]
125 | else:
126 | if self.config["SEQMAP_FOLDER"] is None:
127 | seqmap_file = os.path.join(self.config['GT_FOLDER'], seqmap_name)
128 | else:
129 | seqmap_file = os.path.join(self.config["SEQMAP_FOLDER"], seqmap_name)
130 | if not os.path.isfile(seqmap_file):
131 | print('no seqmap found: ' + seqmap_file)
132 | raise TrackEvalException('no seqmap found: ' + os.path.basename(seqmap_file))
133 | with open(seqmap_file) as fp:
134 | reader = csv.reader(fp)
135 | for i, _ in enumerate(reader):
136 | dialect = csv.Sniffer().sniff(fp.read(1024))
137 | fp.seek(0)
138 | reader = csv.reader(fp, dialect)
139 | for row in reader:
140 | if len(row) >= 4:
141 | seq = "%04d" % int(row[0])
142 | seq_list.append(seq)
143 | seq_lengths[seq] = int(row[3]) + 1
144 | return seq_list, seq_lengths
145 |
146 | def _load_raw_file(self, tracker, seq, is_gt):
147 | """Load a file (gt or tracker) in the KITTI MOTS format
148 |
149 | If is_gt, this returns a dict which contains the fields:
150 | [gt_ids, gt_classes] : list (for each timestep) of 1D NDArrays (for each det).
151 | [gt_dets]: list (for each timestep) of lists of detections.
152 | [gt_ignore_region]: list (for each timestep) of masks for the ignore regions
153 |
154 | if not is_gt, this returns a dict which contains the fields:
155 | [tracker_ids, tracker_classes] : list (for each timestep) of 1D NDArrays (for each det).
156 | [tracker_dets]: list (for each timestep) of lists of detections.
157 | """
158 |
159 | # Only loaded when run to reduce minimum requirements
160 | from pycocotools import mask as mask_utils
161 |
162 | # File location
163 | if self.data_is_zipped:
164 | if is_gt:
165 | zip_file = os.path.join(self.gt_fol, 'data.zip')
166 | else:
167 | zip_file = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol + '.zip')
168 | file = seq + '.txt'
169 | else:
170 | zip_file = None
171 | if is_gt:
172 | file = self.config["GT_LOC_FORMAT"].format(gt_folder=self.gt_fol, seq=seq)
173 | else:
174 | file = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol, seq + '.txt')
175 |
176 | # Ignore regions
177 | if is_gt:
178 | crowd_ignore_filter = {2: ['10']}
179 | else:
180 | crowd_ignore_filter = None
181 |
182 | # Load raw data from text file
183 | read_data, ignore_data = self._load_simple_text_file(file, crowd_ignore_filter=crowd_ignore_filter,
184 | is_zipped=self.data_is_zipped, zip_file=zip_file,
185 | force_delimiters=' ')
186 |
187 | # Convert data to required format
188 | num_timesteps = self.seq_lengths[seq]
189 | data_keys = ['ids', 'classes', 'dets']
190 | if is_gt:
191 | data_keys += ['gt_ignore_region']
192 | raw_data = {key: [None] * num_timesteps for key in data_keys}
193 |
194 | # Check for any extra time keys
195 | current_time_keys = [str(t) for t in range(num_timesteps)]
196 | extra_time_keys = [x for x in read_data.keys() if x not in current_time_keys]
197 | if len(extra_time_keys) > 0:
198 | if is_gt:
199 | text = 'Ground-truth'
200 | else:
201 | text = 'Tracking'
202 | raise TrackEvalException(
203 | text + ' data contains the following invalid timesteps in seq %s: ' % seq + ', '.join(
204 | [str(x) + ', ' for x in extra_time_keys]))
205 |
206 | for t in range(num_timesteps):
207 | time_key = str(t)
208 | # list to collect all masks of a timestep to check for overlapping areas
209 | all_masks = []
210 | if time_key in read_data.keys():
211 | try:
212 | raw_data['dets'][t] = [{'size': [int(region[3]), int(region[4])],
213 | 'counts': region[5].encode(encoding='UTF-8')}
214 | for region in read_data[time_key]]
215 | raw_data['ids'][t] = np.atleast_1d([region[1] for region in read_data[time_key]]).astype(int)
216 | raw_data['classes'][t] = np.atleast_1d([region[2] for region in read_data[time_key]]).astype(int)
217 | all_masks += raw_data['dets'][t]
218 | except IndexError:
219 | self._raise_index_error(is_gt, tracker, seq)
220 | except ValueError:
221 | self._raise_value_error(is_gt, tracker, seq)
222 | else:
223 | raw_data['dets'][t] = []
224 | raw_data['ids'][t] = np.empty(0).astype(int)
225 | raw_data['classes'][t] = np.empty(0).astype(int)
226 | if is_gt:
227 | if time_key in ignore_data.keys():
228 | try:
229 | time_ignore = [{'size': [int(region[3]), int(region[4])],
230 | 'counts': region[5].encode(encoding='UTF-8')}
231 | for region in ignore_data[time_key]]
232 | raw_data['gt_ignore_region'][t] = mask_utils.merge([mask for mask in time_ignore],
233 | intersect=False)
234 | all_masks += [raw_data['gt_ignore_region'][t]]
235 | except IndexError:
236 | self._raise_index_error(is_gt, tracker, seq)
237 | except ValueError:
238 | self._raise_value_error(is_gt, tracker, seq)
239 | else:
240 | raw_data['gt_ignore_region'][t] = mask_utils.merge([], intersect=False)
241 |
242 | # check for overlapping masks
243 | if all_masks:
244 | masks_merged = all_masks[0]
245 | for mask in all_masks[1:]:
246 | if mask_utils.area(mask_utils.merge([masks_merged, mask], intersect=True)) != 0.0:
247 | raise TrackEvalException(
248 | 'Tracker has overlapping masks. Tracker: ' + tracker + ' Seq: ' + seq + ' Timestep: ' + str(
249 | t))
250 | masks_merged = mask_utils.merge([masks_merged, mask], intersect=False)
251 |
252 | if is_gt:
253 | key_map = {'ids': 'gt_ids',
254 | 'classes': 'gt_classes',
255 | 'dets': 'gt_dets'}
256 | else:
257 | key_map = {'ids': 'tracker_ids',
258 | 'classes': 'tracker_classes',
259 | 'dets': 'tracker_dets'}
260 | for k, v in key_map.items():
261 | raw_data[v] = raw_data.pop(k)
262 | raw_data["num_timesteps"] = num_timesteps
263 | raw_data['seq'] = seq
264 | return raw_data
265 |
266 | @_timing.time
267 | def get_preprocessed_seq_data(self, raw_data, cls):
268 | """ Preprocess data for a single sequence for a single class ready for evaluation.
269 | Inputs:
270 | - raw_data is a dict containing the data for the sequence already read in by get_raw_seq_data().
271 | - cls is the class to be evaluated.
272 | Outputs:
273 | - data is a dict containing all of the information that metrics need to perform evaluation.
274 | It contains the following fields:
275 | [num_timesteps, num_gt_ids, num_tracker_ids, num_gt_dets, num_tracker_dets] : integers.
276 | [gt_ids, tracker_ids]: list (for each timestep) of 1D NDArrays (for each det).
277 | [gt_dets, tracker_dets]: list (for each timestep) of lists of detection masks.
278 | [similarity_scores]: list (for each timestep) of 2D NDArrays.
279 | Notes:
280 | General preprocessing (preproc) occurs in 4 steps. Some datasets may not use all of these steps.
281 | 1) Extract only detections relevant for the class to be evaluated (including distractor detections).
282 | 2) Match gt dets and tracker dets. Remove tracker dets that are matched to a gt det that is of a
283 | distractor class, or otherwise marked as to be removed.
284 | 3) Remove unmatched tracker dets if they fall within a crowd ignore region or don't meet a certain
285 | other criteria (e.g. are too small).
286 | 4) Remove gt dets that were only useful for preprocessing and not for actual evaluation.
287 | After the above preprocessing steps, this function also calculates the number of gt and tracker detections
288 | and unique track ids. It also relabels gt and tracker ids to be contiguous and checks that ids are
289 | unique within each timestep.
290 |
291 | KITTI MOTS:
292 | In KITTI MOTS, the 4 preproc steps are as follow:
293 | 1) There are two classes (car and pedestrian) which are evaluated separately.
294 | 2) There are no ground truth detections marked as to be removed/distractor classes.
295 | Therefore also no matched tracker detections are removed.
296 | 3) Ignore regions are used to remove unmatched detections (at least 50% overlap with ignore region).
297 | 4) There are no ground truth detections (e.g. those of distractor classes) to be removed.
298 | """
299 | # Check that input data has unique ids
300 | self._check_unique_ids(raw_data)
301 |
302 | cls_id = int(self.class_name_to_class_id[cls])
303 |
304 | data_keys = ['gt_ids', 'tracker_ids', 'gt_dets', 'tracker_dets', 'similarity_scores']
305 | data = {key: [None] * raw_data['num_timesteps'] for key in data_keys}
306 | unique_gt_ids = []
307 | unique_tracker_ids = []
308 | num_gt_dets = 0
309 | num_tracker_dets = 0
310 | for t in range(raw_data['num_timesteps']):
311 |
312 | # Only extract relevant dets for this class for preproc and eval (cls)
313 | gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id)
314 | gt_class_mask = gt_class_mask.astype(np.bool)
315 | gt_ids = raw_data['gt_ids'][t][gt_class_mask]
316 | gt_dets = [raw_data['gt_dets'][t][ind] for ind in range(len(gt_class_mask)) if gt_class_mask[ind]]
317 |
318 | tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id)
319 | tracker_class_mask = tracker_class_mask.astype(np.bool)
320 | tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask]
321 | tracker_dets = [raw_data['tracker_dets'][t][ind] for ind in range(len(tracker_class_mask)) if
322 | tracker_class_mask[ind]]
323 | similarity_scores = raw_data['similarity_scores'][t][gt_class_mask, :][:, tracker_class_mask]
324 |
325 | # Match tracker and gt dets (with hungarian algorithm)
326 | unmatched_indices = np.arange(tracker_ids.shape[0])
327 | if gt_ids.shape[0] > 0 and tracker_ids.shape[0] > 0:
328 | matching_scores = similarity_scores.copy()
329 | matching_scores[matching_scores < 0.5 - np.finfo('float').eps] = -10000
330 | match_rows, match_cols = linear_sum_assignment(-matching_scores)
331 | actually_matched_mask = matching_scores[match_rows, match_cols] > 0 + np.finfo('float').eps
332 | match_cols = match_cols[actually_matched_mask]
333 |
334 | unmatched_indices = np.delete(unmatched_indices, match_cols, axis=0)
335 |
336 | # For unmatched tracker dets, remove those that are greater than 50% within a crowd ignore region.
337 | unmatched_tracker_dets = [tracker_dets[i] for i in range(len(tracker_dets)) if i in unmatched_indices]
338 | ignore_region = raw_data['gt_ignore_region'][t]
339 | intersection_with_ignore_region = self._calculate_mask_ious(unmatched_tracker_dets, [ignore_region],
340 | is_encoded=True, do_ioa=True)
341 | is_within_ignore_region = np.any(intersection_with_ignore_region > 0.5 + np.finfo('float').eps, axis=1)
342 |
343 | # Apply preprocessing to remove unwanted tracker dets.
344 | to_remove_tracker = unmatched_indices[is_within_ignore_region]
345 | data['tracker_ids'][t] = np.delete(tracker_ids, to_remove_tracker, axis=0)
346 | data['tracker_dets'][t] = np.delete(tracker_dets, to_remove_tracker, axis=0)
347 | similarity_scores = np.delete(similarity_scores, to_remove_tracker, axis=1)
348 |
349 | # Keep all ground truth detections
350 | data['gt_ids'][t] = gt_ids
351 | data['gt_dets'][t] = gt_dets
352 | data['similarity_scores'][t] = similarity_scores
353 |
354 | unique_gt_ids += list(np.unique(data['gt_ids'][t]))
355 | unique_tracker_ids += list(np.unique(data['tracker_ids'][t]))
356 | num_tracker_dets += len(data['tracker_ids'][t])
357 | num_gt_dets += len(data['gt_ids'][t])
358 |
359 | # Re-label IDs such that there are no empty IDs
360 | if len(unique_gt_ids) > 0:
361 | unique_gt_ids = np.unique(unique_gt_ids)
362 | gt_id_map = np.nan * np.ones((np.max(unique_gt_ids) + 1))
363 | gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
364 | for t in range(raw_data['num_timesteps']):
365 | if len(data['gt_ids'][t]) > 0:
366 | data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int)
367 | if len(unique_tracker_ids) > 0:
368 | unique_tracker_ids = np.unique(unique_tracker_ids)
369 | tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1))
370 | tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids))
371 | for t in range(raw_data['num_timesteps']):
372 | if len(data['tracker_ids'][t]) > 0:
373 | data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int)
374 |
375 | # Record overview statistics.
376 | data['num_tracker_dets'] = num_tracker_dets
377 | data['num_gt_dets'] = num_gt_dets
378 | data['num_tracker_ids'] = len(unique_tracker_ids)
379 | data['num_gt_ids'] = len(unique_gt_ids)
380 | data['num_timesteps'] = raw_data['num_timesteps']
381 | data['seq'] = raw_data['seq']
382 | data['cls'] = cls
383 |
384 | # Ensure again that ids are unique per timestep after preproc.
385 | self._check_unique_ids(data, after_preproc=True)
386 |
387 | return data
388 |
389 | def _calculate_similarities(self, gt_dets_t, tracker_dets_t):
390 | similarity_scores = self._calculate_mask_ious(gt_dets_t, tracker_dets_t, is_encoded=True, do_ioa=False)
391 | return similarity_scores
392 |
393 | @staticmethod
394 | def _raise_index_error(is_gt, tracker, seq):
395 | """
396 | Auxiliary method to raise an evaluation error in case of an index error while reading files.
397 | :param is_gt: whether gt or tracker data is read
398 | :param tracker: the name of the tracker
399 | :param seq: the name of the seq
400 | :return: None
401 | """
402 | if is_gt:
403 | err = 'Cannot load gt data from sequence %s, because there are not enough ' \
404 | 'columns in the data.' % seq
405 | raise TrackEvalException(err)
406 | else:
407 | err = 'Cannot load tracker data from tracker %s, sequence %s, because there are not enough ' \
408 | 'columns in the data.' % (tracker, seq)
409 | raise TrackEvalException(err)
410 |
411 | @staticmethod
412 | def _raise_value_error(is_gt, tracker, seq):
413 | """
414 | Auxiliary method to raise an evaluation error in case of an value error while reading files.
415 | :param is_gt: whether gt or tracker data is read
416 | :param tracker: the name of the tracker
417 | :param seq: the name of the seq
418 | :return: None
419 | """
420 | if is_gt:
421 | raise TrackEvalException(
422 | 'GT data for sequence %s cannot be converted to the right format. Is data corrupted?' % seq)
423 | else:
424 | raise TrackEvalException(
425 | 'Tracking data from tracker %s, sequence %s cannot be converted to the right format. '
426 | 'Is data corrupted?' % (tracker, seq))
427 |
--------------------------------------------------------------------------------
/teta/datasets/coco.py:
--------------------------------------------------------------------------------
1 | """COCO Dataset."""
2 | import copy
3 | import itertools
4 | import json
5 | import os
6 | from collections import defaultdict
7 |
8 | import numpy as np
9 | from scipy.optimize import linear_sum_assignment
10 |
11 | from .. import _timing, utils
12 | from ..config import get_default_dataset_config, init_config
13 | from ..utils import TrackEvalException
14 | from ._base_dataset import _BaseDataset
15 |
16 |
17 | class COCO(_BaseDataset):
18 | """Tracking datasets in COCO format."""
19 |
20 | def __init__(self, config=None):
21 | """Initialize dataset, checking that all required files are present."""
22 | super().__init__()
23 | # Fill non-given config values with defaults
24 | self.config = init_config(config, get_default_dataset_config(), self.get_name())
25 | self.gt_fol = self.config["GT_FOLDER"]
26 | self.tracker_fol = self.config["TRACKERS_FOLDER"]
27 | self.should_classes_combine = True
28 | self.use_super_categories = False
29 |
30 | self.tracker_sub_fol = self.config["TRACKER_SUB_FOLDER"]
31 | self.output_fol = self.config["OUTPUT_FOLDER"]
32 | if self.output_fol is None:
33 | self.output_fol = self.tracker_fol
34 | self.output_sub_fol = self.config["OUTPUT_SUB_FOLDER"]
35 |
36 | if self.gt_fol.endswith(".json"):
37 | self.gt_data = json.load(open(self.gt_fol, "r"))
38 | else:
39 | gt_dir_files = [
40 | file for file in os.listdir(self.gt_fol) if file.endswith(".json")
41 | ]
42 | if len(gt_dir_files) != 1:
43 | raise TrackEvalException(
44 | f"{self.gt_fol} does not contain exactly one json file."
45 | )
46 |
47 | with open(os.path.join(self.gt_fol, gt_dir_files[0])) as f:
48 | self.gt_data = json.load(f)
49 |
50 | # fill missing video ids
51 | self._fill_video_ids_inplace(self.gt_data["annotations"])
52 |
53 | # get sequences to eval and sequence information
54 | self.seq_list = [
55 | vid["name"].replace("/", "-") for vid in self.gt_data["videos"]
56 | ]
57 | self.seq_name2seqid = {
58 | vid["name"].replace("/", "-"): vid["id"] for vid in self.gt_data["videos"]
59 | }
60 | # compute mappings from videos to annotation data
61 | self.video2gt_track, self.video2gt_image = self._compute_vid_mappings(
62 | self.gt_data["annotations"]
63 | )
64 | # compute sequence lengths
65 | self.seq_lengths = {vid["id"]: 0 for vid in self.gt_data["videos"]}
66 | for img in self.gt_data["images"]:
67 | self.seq_lengths[img["video_id"]] += 1
68 | self.seq2images2timestep = self._compute_image_to_timestep_mappings()
69 | self.seq2cls = {
70 | vid["id"]: {
71 | "pos_cat_ids": list(
72 | {track["category_id"] for track in self.video2gt_track[vid["id"]]}
73 | ),
74 | }
75 | for vid in self.gt_data["videos"]
76 | }
77 |
78 | # Get classes to eval
79 | considered_vid_ids = [self.seq_name2seqid[vid] for vid in self.seq_list]
80 | seen_cats = set(
81 | [
82 | cat_id
83 | for vid_id in considered_vid_ids
84 | for cat_id in self.seq2cls[vid_id]["pos_cat_ids"]
85 | ]
86 | )
87 | # only classes with ground truth are evaluated in TAO
88 | self.valid_classes = [
89 | cls["name"] for cls in self.gt_data["categories"] if cls["id"] in seen_cats
90 | ]
91 | cls_name2clsid_map = {
92 | cls["name"]: cls["id"] for cls in self.gt_data["categories"]
93 | }
94 |
95 | if self.config["CLASSES_TO_EVAL"]:
96 | self.class_list = [
97 | cls.lower() if cls.lower() in self.valid_classes else None
98 | for cls in self.config["CLASSES_TO_EVAL"]
99 | ]
100 | if not all(self.class_list):
101 | valid_cls = ", ".join(self.valid_classes)
102 | raise TrackEvalException(
103 | "Attempted to evaluate an invalid class. Only classes "
104 | f"{valid_cls} are valid (classes present in ground truth"
105 | " data)."
106 | )
107 | else:
108 | self.class_list = [cls for cls in self.valid_classes]
109 | self.cls_name2clsid = {
110 | k: v for k, v in cls_name2clsid_map.items() if k in self.class_list
111 | }
112 | self.clsid2cls_name = {
113 | v: k for k, v in cls_name2clsid_map.items() if k in self.class_list
114 | }
115 | # get trackers to eval
116 | if self.config["TRACKERS_TO_EVAL"] is None:
117 | self.tracker_list = os.listdir(self.tracker_fol)
118 | else:
119 | self.tracker_list = self.config["TRACKERS_TO_EVAL"]
120 |
121 | if self.config["TRACKER_DISPLAY_NAMES"] is None:
122 | self.tracker_to_disp = dict(zip(self.tracker_list, self.tracker_list))
123 | elif (self.config["TRACKERS_TO_EVAL"] is not None) and (
124 | len(self.config["TK_DISPLAY_NAMES"]) == len(self.tracker_list)
125 | ):
126 | self.tracker_to_disp = dict(
127 | zip(self.tracker_list, self.config["TK_DISPLAY_NAMES"])
128 | )
129 | else:
130 | raise TrackEvalException(
131 | "List of tracker files and tracker display names do not match."
132 | )
133 |
134 | self.tracker_data = {tracker: dict() for tracker in self.tracker_list}
135 |
136 | for tracker in self.tracker_list:
137 | if self.tracker_sub_fol.endswith(".json"):
138 | with open(os.path.join(self.tracker_sub_fol)) as f:
139 | curr_data = json.load(f)
140 | else:
141 | tr_dir = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol)
142 | tr_dir_files = [
143 | file for file in os.listdir(tr_dir) if file.endswith(".json")
144 | ]
145 | if len(tr_dir_files) != 1:
146 | raise TrackEvalException(
147 | f"{tr_dir} does not contain exactly one json file."
148 | )
149 | with open(os.path.join(tr_dir, tr_dir_files[0])) as f:
150 | curr_data = json.load(f)
151 |
152 | # limit detections if MAX_DETECTIONS > 0
153 | if self.config["MAX_DETECTIONS"]:
154 | curr_data = self._limit_dets_per_image(curr_data)
155 |
156 | # fill missing video ids
157 | self._fill_video_ids_inplace(curr_data)
158 |
159 | # make track ids unique over whole evaluation set
160 | self._make_tk_ids_unique(curr_data)
161 |
162 | # get tracker sequence information
163 | curr_vids2tracks, curr_vids2images = self._compute_vid_mappings(curr_data)
164 | self.tracker_data[tracker]["vids_to_tracks"] = curr_vids2tracks
165 | self.tracker_data[tracker]["vids_to_images"] = curr_vids2images
166 |
167 | def get_display_name(self, tracker):
168 | return self.tracker_to_disp[tracker]
169 |
170 | def _load_raw_file(self, tracker, seq, is_gt):
171 | """Load a file (gt or tracker) in the TAO format
172 |
173 | If is_gt, this returns a dict which contains the fields:
174 | [gt_ids, gt_classes]:
175 | list (for each timestep) of 1D NDArrays (for each det).
176 | [gt_dets]: list (for each timestep) of lists of detections.
177 |
178 | if not is_gt, this returns a dict which contains the fields:
179 | [tk_ids, tk_classes]:
180 | list (for each timestep) of 1D NDArrays (for each det).
181 | [tk_dets]: list (for each timestep) of lists of detections.
182 | """
183 | seq_id = self.seq_name2seqid[seq]
184 | # file location
185 | if is_gt:
186 | imgs = self.video2gt_image[seq_id]
187 | else:
188 | imgs = self.tracker_data[tracker]["vids_to_images"][seq_id]
189 |
190 | # convert data to required format
191 | num_timesteps = self.seq_lengths[seq_id]
192 | img_to_timestep = self.seq2images2timestep[seq_id]
193 | data_keys = ["ids", "classes", "dets"]
194 | # if not is_gt:
195 | # data_keys += ["tk_confidences"]
196 | raw_data = {key: [None] * num_timesteps for key in data_keys}
197 | for img in imgs:
198 | # some tracker data contains images without any ground truth info,
199 | # these are ignored
200 | if img["id"] not in img_to_timestep:
201 | continue
202 | t = img_to_timestep[img["id"]]
203 | anns = img["annotations"]
204 | tk_str = utils.get_track_id_str(anns[0])
205 | raw_data["dets"][t] = np.atleast_2d([ann["bbox"] for ann in anns]).astype(
206 | float
207 | )
208 | raw_data["ids"][t] = np.atleast_1d([ann[tk_str] for ann in anns]).astype(
209 | int
210 | )
211 | raw_data["classes"][t] = np.atleast_1d(
212 | [ann["category_id"] for ann in anns]
213 | ).astype(int)
214 | # if not is_gt:
215 | # raw_data["tk_confidences"][t] = np.atleast_1d(
216 | # [ann["score"] for ann in anns]
217 | # ).astype(float)
218 |
219 | for t, d in enumerate(raw_data["dets"]):
220 | if d is None:
221 | raw_data["dets"][t] = np.empty((0, 4)).astype(float)
222 | raw_data["ids"][t] = np.empty(0).astype(int)
223 | raw_data["classes"][t] = np.empty(0).astype(int)
224 | # if not is_gt:
225 | # raw_data["tk_confidences"][t] = np.empty(0)
226 |
227 | if is_gt:
228 | key_map = {"ids": "gt_ids", "classes": "gt_classes", "dets": "gt_dets"}
229 | else:
230 | key_map = {"ids": "tk_ids", "classes": "tk_classes", "dets": "tk_dets"}
231 | for k, v in key_map.items():
232 | raw_data[v] = raw_data.pop(k)
233 |
234 | raw_data["num_timesteps"] = num_timesteps
235 | raw_data["seq"] = seq
236 | return raw_data
237 |
238 | def get_preprocessed_seq_data_thr(self, raw_data, cls, assignment=None):
239 | """Preprocess data for a single sequence for a single class.
240 |
241 | Inputs:
242 | raw_data: dict containing the data for the sequence already
243 | read in by get_raw_seq_data().
244 | cls: class to be evaluated.
245 | Outputs:
246 | gt_ids:
247 | list (for each timestep) of ids of GT tracks
248 | tk_ids:
249 | list (for each timestep) of ids of predicted tracks (all for TP
250 | matching (Det + AssocA))
251 | tk_overlap_ids:
252 | list (for each timestep) of ids of predicted tracks that overlap
253 | with GTs
254 | tk_dets:
255 | list (for each timestep) of lists of detections that
256 | corresponding to the tk_ids
257 | tk_classes:
258 | list (for each timestep) of lists of classes that corresponding
259 | to the tk_ids
260 | tk_confidences:
261 | list (for each timestep) of lists of classes that corresponding
262 | to the tk_ids
263 | sim_scores:
264 | similarity score between gt_ids and tk_ids.
265 | """
266 | if cls != "all":
267 | cls_id = self.cls_name2clsid[cls]
268 |
269 | data_keys = [
270 | "gt_ids",
271 | "tk_ids",
272 | "gt_id_map",
273 | "tk_id_map",
274 | "gt_dets",
275 | "gt_classes",
276 | "gt_class_name",
277 | "tk_overlap_classes",
278 | "tk_overlap_ids",
279 | "tk_class_eval_tk_ids",
280 | "tk_dets",
281 | "tk_classes",
282 | # "tk_confidences",
283 | "tk_exh_ids",
284 | "sim_scores",
285 | ]
286 | data = {key: [None] * raw_data["num_timesteps"] for key in data_keys}
287 | unique_gt_ids = []
288 | unique_tk_ids = []
289 | num_gt_dets = 0
290 | num_tk_cls_dets = 0
291 | num_tk_overlap_dets = 0
292 | overlap_ious_thr = 0.5
293 | loc_and_asso_tk_ids = []
294 | exh_class_tk_ids = []
295 |
296 | for t in range(raw_data["num_timesteps"]):
297 | # only extract relevant dets for this class for preproc and eval
298 | if cls == "all":
299 | gt_class_mask = np.ones_like(raw_data["gt_classes"][t]).astype(bool)
300 | else:
301 | gt_class_mask = np.atleast_1d(
302 | raw_data["gt_classes"][t] == cls_id
303 | ).astype(bool)
304 |
305 | # select GT that is not in the evaluating classes
306 | if assignment is not None and assignment:
307 | all_gt_ids = list(assignment[t].keys())
308 | gt_ids_in = raw_data["gt_ids"][t][gt_class_mask]
309 | gt_ids_out = set(all_gt_ids) - set(gt_ids_in)
310 | tk_ids_out = set([assignment[t][key] for key in list(gt_ids_out)])
311 |
312 | # compute overlapped tracks and add their ids to overlap_tk_ids
313 | sim_scores = raw_data["similarity_scores"]
314 | overlap_ids_masks = (sim_scores[t][gt_class_mask] >= overlap_ious_thr).any(
315 | axis=0
316 | )
317 | overlap_tk_ids_t = raw_data["tk_ids"][t][overlap_ids_masks]
318 | if assignment is not None and assignment:
319 | data["tk_overlap_ids"][t] = list(set(overlap_tk_ids_t) - tk_ids_out)
320 | else:
321 | data["tk_overlap_ids"][t] = list(set(overlap_tk_ids_t))
322 |
323 | loc_and_asso_tk_ids += data["tk_overlap_ids"][t]
324 |
325 | data["tk_exh_ids"][t] = []
326 | if cls == "all":
327 | continue
328 |
329 | # add the track ids of exclusive annotated class to exh_class_tk_ids
330 | tk_exh_mask = np.atleast_1d(raw_data["tk_classes"][t] == cls_id)
331 | tk_exh_mask = tk_exh_mask.astype(bool)
332 | exh_class_tk_ids_t = raw_data["tk_ids"][t][tk_exh_mask]
333 | exh_class_tk_ids.append(exh_class_tk_ids_t)
334 | data["tk_exh_ids"][t] = exh_class_tk_ids_t
335 |
336 | # remove tk_ids that has been assigned to GT belongs to other classes.
337 | loc_and_asso_tk_ids = list(set(loc_and_asso_tk_ids))
338 |
339 | # remove all unwanted unmatched tracker detections
340 | for t in range(raw_data["num_timesteps"]):
341 | # add gt to the data
342 | if cls == "all":
343 | gt_class_mask = np.ones_like(raw_data["gt_classes"][t]).astype(bool)
344 | else:
345 | gt_class_mask = np.atleast_1d(
346 | raw_data["gt_classes"][t] == cls_id
347 | ).astype(bool)
348 | data["gt_classes"][t] = cls_id
349 | data["gt_class_name"][t] = cls
350 |
351 | gt_ids = raw_data["gt_ids"][t][gt_class_mask]
352 | gt_dets = raw_data["gt_dets"][t][gt_class_mask]
353 | data["gt_ids"][t] = gt_ids
354 | data["gt_dets"][t] = gt_dets
355 |
356 | # filter pred and only keep those that highly overlap with GTs
357 | tk_mask = np.isin(
358 | raw_data["tk_ids"][t], np.array(loc_and_asso_tk_ids), assume_unique=True
359 | )
360 | tk_overlap_mask = np.isin(
361 | raw_data["tk_ids"][t],
362 | np.array(data["tk_overlap_ids"][t]),
363 | assume_unique=True,
364 | )
365 |
366 | tk_ids = raw_data["tk_ids"][t][tk_mask]
367 | tk_dets = raw_data["tk_dets"][t][tk_mask]
368 | tracker_classes = raw_data["tk_classes"][t][tk_mask]
369 |
370 | # add overlap classes for computing the FP for Cls term
371 | tracker_overlap_classes = raw_data["tk_classes"][t][tk_overlap_mask]
372 | # tracker_confidences = raw_data["tk_confidences"][t][tk_mask]
373 | sim_scores_masked = sim_scores[t][gt_class_mask, :][:, tk_mask]
374 |
375 | # add filtered prediction to the data
376 | data["tk_classes"][t] = tracker_classes
377 | data["tk_overlap_classes"][t] = tracker_overlap_classes
378 | data["tk_ids"][t] = tk_ids
379 | data["tk_dets"][t] = tk_dets
380 | # data["tk_confidences"][t] = tracker_confidences
381 | data["sim_scores"][t] = sim_scores_masked
382 | data["tk_class_eval_tk_ids"][t] = set(
383 | list(data["tk_overlap_ids"][t]) + list(data["tk_exh_ids"][t])
384 | )
385 |
386 | # count total number of detections
387 | unique_gt_ids += list(np.unique(data["gt_ids"][t]))
388 | # the unique track ids are for association.
389 | unique_tk_ids += list(np.unique(data["tk_ids"][t]))
390 |
391 | num_tk_overlap_dets += len(data["tk_overlap_ids"][t])
392 | num_tk_cls_dets += len(data["tk_class_eval_tk_ids"][t])
393 | num_gt_dets += len(data["gt_ids"][t])
394 |
395 | # re-label IDs such that there are no empty IDs
396 | if len(unique_gt_ids) > 0:
397 | unique_gt_ids = np.unique(unique_gt_ids)
398 | gt_id_map = np.nan * np.ones((np.max(unique_gt_ids) + 1))
399 | gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
400 | data["gt_id_map"] = {}
401 | for gt_id in unique_gt_ids:
402 | new_gt_id = gt_id_map[gt_id].astype(int)
403 | data["gt_id_map"][new_gt_id] = gt_id
404 |
405 | for t in range(raw_data["num_timesteps"]):
406 | if len(data["gt_ids"][t]) > 0:
407 | data["gt_ids"][t] = gt_id_map[data["gt_ids"][t]].astype(int)
408 |
409 | if len(unique_tk_ids) > 0:
410 | unique_tk_ids = np.unique(unique_tk_ids)
411 | tk_id_map = np.nan * np.ones((np.max(unique_tk_ids) + 1))
412 | tk_id_map[unique_tk_ids] = np.arange(len(unique_tk_ids))
413 |
414 | data["tk_id_map"] = {}
415 | for track_id in unique_tk_ids:
416 | new_track_id = tk_id_map[track_id].astype(int)
417 | data["tk_id_map"][new_track_id] = track_id
418 |
419 | for t in range(raw_data["num_timesteps"]):
420 | if len(data["tk_ids"][t]) > 0:
421 | data["tk_ids"][t] = tk_id_map[data["tk_ids"][t]].astype(int)
422 | if len(data["tk_overlap_ids"][t]) > 0:
423 | data["tk_overlap_ids"][t] = tk_id_map[
424 | data["tk_overlap_ids"][t]
425 | ].astype(int)
426 |
427 | # record overview statistics.
428 | data["num_tk_cls_dets"] = num_tk_cls_dets
429 | data["num_tk_overlap_dets"] = num_tk_overlap_dets
430 | data["num_gt_dets"] = num_gt_dets
431 | data["num_tk_ids"] = len(unique_tk_ids)
432 | data["num_gt_ids"] = len(unique_gt_ids)
433 | data["num_timesteps"] = raw_data["num_timesteps"]
434 | data["seq"] = raw_data["seq"]
435 |
436 | self._check_unique_ids(data)
437 |
438 | return data
439 |
440 | @_timing.time
441 | def get_preprocessed_seq_data(
442 | self, raw_data, cls, assignment=None, thresholds=[50, 75]
443 | ):
444 | """Preprocess data for a single sequence for a single class."""
445 | data = {}
446 | if thresholds is None:
447 | thresholds = [50, 75]
448 | elif isinstance(thresholds, int):
449 | thresholds = [thresholds]
450 |
451 | for thr in thresholds:
452 | assignment_thr = None
453 | if assignment is not None:
454 | assignment_thr = assignment[thr]
455 | data[thr] = self.get_preprocessed_seq_data_thr(
456 | raw_data, cls, assignment_thr
457 | )
458 |
459 | return data
460 |
461 | def _calculate_similarities(self, gt_dets_t, tk_dets_t):
462 | """Compute similarity scores."""
463 | sim_scores = self._calculate_box_ious(gt_dets_t, tk_dets_t)
464 | return sim_scores
465 |
466 | def _compute_vid_mappings(self, annotations):
467 | """Computes mappings from videos to corresponding tracks and images."""
468 | vids_to_tracks = {}
469 | vids_to_imgs = {}
470 | vid_ids = [vid["id"] for vid in self.gt_data["videos"]]
471 |
472 | # compute an mapping from image IDs to images
473 | images = {}
474 | for image in self.gt_data["images"]:
475 | images[image["id"]] = image
476 |
477 | tk_str = utils.get_track_id_str(annotations[0])
478 | for ann in annotations:
479 | ann["area"] = ann["bbox"][2] * ann["bbox"][3]
480 |
481 | vid = ann["video_id"]
482 | if ann["video_id"] not in vids_to_tracks.keys():
483 | vids_to_tracks[ann["video_id"]] = list()
484 | if ann["video_id"] not in vids_to_imgs.keys():
485 | vids_to_imgs[ann["video_id"]] = list()
486 |
487 | # fill in vids_to_tracks
488 | tid = ann[tk_str]
489 | exist_tids = [track["id"] for track in vids_to_tracks[vid]]
490 | try:
491 | index1 = exist_tids.index(tid)
492 | except ValueError:
493 | index1 = -1
494 | if tid not in exist_tids:
495 | curr_track = {
496 | "id": tid,
497 | "category_id": ann["category_id"],
498 | "video_id": vid,
499 | "annotations": [ann],
500 | }
501 | vids_to_tracks[vid].append(curr_track)
502 | else:
503 | vids_to_tracks[vid][index1]["annotations"].append(ann)
504 |
505 | # fill in vids_to_imgs
506 | img_id = ann["image_id"]
507 | exist_img_ids = [img["id"] for img in vids_to_imgs[vid]]
508 | try:
509 | index2 = exist_img_ids.index(img_id)
510 | except ValueError:
511 | index2 = -1
512 | if index2 == -1:
513 | curr_img = {"id": img_id, "annotations": [ann]}
514 | vids_to_imgs[vid].append(curr_img)
515 | else:
516 | vids_to_imgs[vid][index2]["annotations"].append(ann)
517 |
518 | # sort annotations by frame index and compute track area
519 | for vid, tracks in vids_to_tracks.items():
520 | for track in tracks:
521 | track["annotations"] = sorted(
522 | track["annotations"],
523 | key=lambda x: images[x["image_id"]]["frame_id"],
524 | )
525 | # compute average area
526 | track["area"] = sum(x["area"] for x in track["annotations"]) / len(
527 | track["annotations"]
528 | )
529 |
530 | # ensure all videos are present
531 | for vid_id in vid_ids:
532 | if vid_id not in vids_to_tracks.keys():
533 | vids_to_tracks[vid_id] = []
534 | if vid_id not in vids_to_imgs.keys():
535 | vids_to_imgs[vid_id] = []
536 |
537 | return vids_to_tracks, vids_to_imgs
538 |
539 | def _compute_image_to_timestep_mappings(self):
540 | """Computes a mapping from images to timestep in sequence."""
541 | images = {}
542 | for image in self.gt_data["images"]:
543 | images[image["id"]] = image
544 |
545 | seq_to_imgs_to_timestep = {vid["id"]: dict() for vid in self.gt_data["videos"]}
546 | for vid in seq_to_imgs_to_timestep:
547 | curr_imgs = [img["id"] for img in self.video2gt_image[vid]]
548 | curr_imgs = sorted(curr_imgs, key=lambda x: images[x]["frame_id"])
549 | seq_to_imgs_to_timestep[vid] = {
550 | curr_imgs[i]: i for i in range(len(curr_imgs))
551 | }
552 |
553 | return seq_to_imgs_to_timestep
554 |
555 | def _limit_dets_per_image(self, annotations):
556 | """Limits the number of detections for each image.
557 |
558 | Adapted from https://github.com/TAO-Dataset/.
559 | """
560 | max_dets = self.config["MAX_DETECTIONS"]
561 | img_ann = defaultdict(list)
562 | for ann in annotations:
563 | img_ann[ann["image_id"]].append(ann)
564 |
565 | for img_id, _anns in img_ann.items():
566 | if len(_anns) <= max_dets:
567 | continue
568 | _anns = sorted(_anns, key=lambda x: x["score"], reverse=True)
569 | img_ann[img_id] = _anns[:max_dets]
570 |
571 | return [ann for anns in img_ann.values() for ann in anns]
572 |
573 | def _fill_video_ids_inplace(self, annotations):
574 | """Fills in missing video IDs inplace.
575 |
576 | Adapted from https://github.com/TAO-Dataset/.
577 | """
578 | missing_video_id = [x for x in annotations if "video_id" not in x]
579 | if missing_video_id:
580 | image_id_to_video_id = {
581 | x["id"]: x["video_id"] for x in self.gt_data["images"]
582 | }
583 | for x in missing_video_id:
584 | x["video_id"] = image_id_to_video_id[x["image_id"]]
585 |
586 | @staticmethod
587 | def _make_tk_ids_unique(annotations):
588 | """Makes track IDs unqiue over the whole annotation set.
589 |
590 | Adapted from https://github.com/TAO-Dataset/.
591 | """
592 | track_id_videos = {}
593 | track_ids_to_update = set()
594 | max_track_id = 0
595 |
596 | tk_str = utils.get_track_id_str(annotations[0])
597 | for ann in annotations:
598 | t = int(ann[tk_str])
599 | if t not in track_id_videos:
600 | track_id_videos[t] = ann["video_id"]
601 |
602 | if ann["video_id"] != track_id_videos[t]:
603 | # track id is assigned to multiple videos
604 | track_ids_to_update.add(t)
605 | max_track_id = max(max_track_id, t)
606 |
607 | if track_ids_to_update:
608 | print("true")
609 | next_id = itertools.count(max_track_id + 1)
610 | new_tk_ids = defaultdict(lambda: next(next_id))
611 | for ann in annotations:
612 | t = ann[tk_str]
613 | v = ann["video_id"]
614 | if t in track_ids_to_update:
615 | ann[tk_str] = new_tk_ids[t, v]
616 | return len(track_ids_to_update)
617 |
--------------------------------------------------------------------------------
/teta/datasets/coco_mots.py:
--------------------------------------------------------------------------------
1 | """COCO Dataset."""
2 | import copy
3 | import itertools
4 | import json
5 | import os
6 | from collections import defaultdict
7 |
8 | import numpy as np
9 | from scipy.optimize import linear_sum_assignment
10 |
11 | from .. import _timing, utils
12 | from ..config import get_default_dataset_config, init_config
13 | from ..utils import TrackEvalException
14 | from ._base_dataset import _BaseDataset
15 |
16 |
17 | class COCOMOTS(_BaseDataset):
18 | """Tracking datasets in COCO format."""
19 |
20 | def __init__(self, config=None):
21 | """Initialize dataset, checking that all required files are present."""
22 | super().__init__()
23 | # Fill non-given config values with defaults
24 | self.config = init_config(config, get_default_dataset_config(), self.get_name())
25 | self.gt_fol = self.config["GT_FOLDER"]
26 | self.tracker_fol = self.config["TRACKERS_FOLDER"]
27 | self.should_classes_combine = True
28 | self.use_super_categories = False
29 |
30 | self.tracker_sub_fol = self.config["TRACKER_SUB_FOLDER"]
31 | self.output_fol = self.config["OUTPUT_FOLDER"]
32 | if self.output_fol is None:
33 | self.output_fol = self.tracker_fol
34 | self.output_sub_fol = self.config["OUTPUT_SUB_FOLDER"]
35 |
36 | if self.gt_fol.endswith(".json"):
37 | self.gt_data = json.load(open(self.gt_fol, "r"))
38 | else:
39 | gt_dir_files = [
40 | file for file in os.listdir(self.gt_fol) if file.endswith(".json")
41 | ]
42 | if len(gt_dir_files) != 1:
43 | raise TrackEvalException(
44 | f"{self.gt_fol} does not contain exactly one json file."
45 | )
46 |
47 | with open(os.path.join(self.gt_fol, gt_dir_files[0])) as f:
48 | self.gt_data = json.load(f)
49 |
50 | # fill missing video ids
51 | self._fill_video_ids_inplace(self.gt_data["annotations"])
52 |
53 | # get sequences to eval and sequence information
54 | self.seq_list = [
55 | vid["name"].replace("/", "-") for vid in self.gt_data["videos"]
56 | ]
57 | self.seq_name2seqid = {
58 | vid["name"].replace("/", "-"): vid["id"] for vid in self.gt_data["videos"]
59 | }
60 | # compute mappings from videos to annotation data
61 | self.video2gt_track, self.video2gt_image = self._compute_vid_mappings(
62 | self.gt_data["annotations"]
63 | )
64 | # compute sequence lengths
65 | self.seq_lengths = {vid["id"]: 0 for vid in self.gt_data["videos"]}
66 | for img in self.gt_data["images"]:
67 | self.seq_lengths[img["video_id"]] += 1
68 | self.seq2images2timestep = self._compute_image_to_timestep_mappings()
69 | self.seq2cls = {
70 | vid["id"]: {
71 | "pos_cat_ids": list(
72 | {track["category_id"] for track in self.video2gt_track[vid["id"]]}
73 | ),
74 | }
75 | for vid in self.gt_data["videos"]
76 | }
77 |
78 | # Get classes to eval
79 | considered_vid_ids = [self.seq_name2seqid[vid] for vid in self.seq_list]
80 | seen_cats = set(
81 | [
82 | cat_id
83 | for vid_id in considered_vid_ids
84 | for cat_id in self.seq2cls[vid_id]["pos_cat_ids"]
85 | ]
86 | )
87 | # only classes with ground truth are evaluated in TAO
88 | self.valid_classes = [
89 | cls["name"] for cls in self.gt_data["categories"] if cls["id"] in seen_cats
90 | ]
91 | cls_name2clsid_map = {
92 | cls["name"]: cls["id"] for cls in self.gt_data["categories"]
93 | }
94 |
95 | if self.config["CLASSES_TO_EVAL"]:
96 | self.class_list = [
97 | cls.lower() if cls.lower() in self.valid_classes else None
98 | for cls in self.config["CLASSES_TO_EVAL"]
99 | ]
100 | if not all(self.class_list):
101 | valid_cls = ", ".join(self.valid_classes)
102 | raise TrackEvalException(
103 | "Attempted to evaluate an invalid class. Only classes "
104 | f"{valid_cls} are valid (classes present in ground truth"
105 | " data)."
106 | )
107 | else:
108 | self.class_list = [cls for cls in self.valid_classes]
109 | self.cls_name2clsid = {
110 | k: v for k, v in cls_name2clsid_map.items() if k in self.class_list
111 | }
112 | self.clsid2cls_name = {
113 | v: k for k, v in cls_name2clsid_map.items() if k in self.class_list
114 | }
115 | # get trackers to eval
116 | if self.config["TRACKERS_TO_EVAL"] is None:
117 | self.tracker_list = os.listdir(self.tracker_fol)
118 | else:
119 | self.tracker_list = self.config["TRACKERS_TO_EVAL"]
120 |
121 | if self.config["TRACKER_DISPLAY_NAMES"] is None:
122 | self.tracker_to_disp = dict(zip(self.tracker_list, self.tracker_list))
123 | elif (self.config["TRACKERS_TO_EVAL"] is not None) and (
124 | len(self.config["TK_DISPLAY_NAMES"]) == len(self.tracker_list)
125 | ):
126 | self.tracker_to_disp = dict(
127 | zip(self.tracker_list, self.config["TK_DISPLAY_NAMES"])
128 | )
129 | else:
130 | raise TrackEvalException(
131 | "List of tracker files and tracker display names do not match."
132 | )
133 |
134 | self.tracker_data = {tracker: dict() for tracker in self.tracker_list}
135 |
136 | for tracker in self.tracker_list:
137 | if self.tracker_sub_fol.endswith(".json"):
138 | with open(os.path.join(self.tracker_sub_fol)) as f:
139 | curr_data = json.load(f)
140 | else:
141 | tr_dir = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol)
142 | tr_dir_files = [
143 | file for file in os.listdir(tr_dir) if file.endswith(".json")
144 | ]
145 | if len(tr_dir_files) != 1:
146 | raise TrackEvalException(
147 | f"{tr_dir} does not contain exactly one json file."
148 | )
149 | with open(os.path.join(tr_dir, tr_dir_files[0])) as f:
150 | curr_data = json.load(f)
151 |
152 | # limit detections if MAX_DETECTIONS > 0
153 | if self.config["MAX_DETECTIONS"]:
154 | curr_data = self._limit_dets_per_image(curr_data)
155 |
156 | # fill missing video ids
157 | self._fill_video_ids_inplace(curr_data)
158 |
159 | # make track ids unique over whole evaluation set
160 | self._make_tk_ids_unique(curr_data)
161 |
162 | # get tracker sequence information
163 | curr_vids2tracks, curr_vids2images = self._compute_vid_mappings(curr_data)
164 | self.tracker_data[tracker]["vids_to_tracks"] = curr_vids2tracks
165 | self.tracker_data[tracker]["vids_to_images"] = curr_vids2images
166 |
167 | def get_display_name(self, tracker):
168 | return self.tracker_to_disp[tracker]
169 |
170 | def _load_raw_file(self, tracker, seq, is_gt):
171 | """Load a file (gt or tracker) in the TAO format
172 |
173 | If is_gt, this returns a dict which contains the fields:
174 | [gt_ids, gt_classes]:
175 | list (for each timestep) of 1D NDArrays (for each det).
176 | [gt_dets]: list (for each timestep) of lists of detections.
177 |
178 | if not is_gt, this returns a dict which contains the fields:
179 | [tk_ids, tk_classes]:
180 | list (for each timestep) of 1D NDArrays (for each det).
181 | [tk_dets]: list (for each timestep) of lists of detections.
182 | """
183 | seq_id = self.seq_name2seqid[seq]
184 | # file location
185 | if is_gt:
186 | imgs = self.video2gt_image[seq_id]
187 | else:
188 | imgs = self.tracker_data[tracker]["vids_to_images"][seq_id]
189 |
190 | # convert data to required format
191 | num_timesteps = self.seq_lengths[seq_id]
192 | img_to_timestep = self.seq2images2timestep[seq_id]
193 | data_keys = ["ids", "classes", "dets"]
194 | # if not is_gt:
195 | # data_keys += ["tk_confidences"]
196 | raw_data = {key: [None] * num_timesteps for key in data_keys}
197 | for img in imgs:
198 | # some tracker data contains images without any ground truth info,
199 | # these are ignored
200 | if img["id"] not in img_to_timestep:
201 | continue
202 | t = img_to_timestep[img["id"]]
203 | anns = img["annotations"]
204 | tk_str = utils.get_track_id_str(anns[0])
205 | raw_data["dets"][t] = np.atleast_2d([ann["bbox"] for ann in anns]).astype(
206 | float
207 | )
208 | raw_data["ids"][t] = np.atleast_1d([ann[tk_str] for ann in anns]).astype(
209 | int
210 | )
211 | raw_data["classes"][t] = np.atleast_1d(
212 | [ann["category_id"] for ann in anns]
213 | ).astype(int)
214 | # if not is_gt:
215 | # raw_data["tk_confidences"][t] = np.atleast_1d(
216 | # [ann["score"] for ann in anns]
217 | # ).astype(float)
218 |
219 | for t, d in enumerate(raw_data["dets"]):
220 | if d is None:
221 | raw_data["dets"][t] = np.empty((0, 4)).astype(float)
222 | raw_data["ids"][t] = np.empty(0).astype(int)
223 | raw_data["classes"][t] = np.empty(0).astype(int)
224 | # if not is_gt:
225 | # raw_data["tk_confidences"][t] = np.empty(0)
226 |
227 | if is_gt:
228 | key_map = {"ids": "gt_ids", "classes": "gt_classes", "dets": "gt_dets"}
229 | else:
230 | key_map = {"ids": "tk_ids", "classes": "tk_classes", "dets": "tk_dets"}
231 | for k, v in key_map.items():
232 | raw_data[v] = raw_data.pop(k)
233 |
234 | raw_data["num_timesteps"] = num_timesteps
235 | raw_data["seq"] = seq
236 | return raw_data
237 |
238 | def get_preprocessed_seq_data_thr(self, raw_data, cls, assignment=None):
239 | """Preprocess data for a single sequence for a single class.
240 |
241 | Inputs:
242 | raw_data: dict containing the data for the sequence already
243 | read in by get_raw_seq_data().
244 | cls: class to be evaluated.
245 | Outputs:
246 | gt_ids:
247 | list (for each timestep) of ids of GT tracks
248 | tk_ids:
249 | list (for each timestep) of ids of predicted tracks (all for TP
250 | matching (Det + AssocA))
251 | tk_overlap_ids:
252 | list (for each timestep) of ids of predicted tracks that overlap
253 | with GTs
254 | tk_dets:
255 | list (for each timestep) of lists of detections that
256 | corresponding to the tk_ids
257 | tk_classes:
258 | list (for each timestep) of lists of classes that corresponding
259 | to the tk_ids
260 | tk_confidences:
261 | list (for each timestep) of lists of classes that corresponding
262 | to the tk_ids
263 | sim_scores:
264 | similarity score between gt_ids and tk_ids.
265 | """
266 | if cls != "all":
267 | cls_id = self.cls_name2clsid[cls]
268 |
269 | data_keys = [
270 | "gt_ids",
271 | "tk_ids",
272 | "gt_id_map",
273 | "tk_id_map",
274 | "gt_dets",
275 | "gt_classes",
276 | "gt_class_name",
277 | "tk_overlap_classes",
278 | "tk_overlap_ids",
279 | "tk_class_eval_tk_ids",
280 | "tk_dets",
281 | "tk_classes",
282 | # "tk_confidences",
283 | "tk_exh_ids",
284 | "sim_scores",
285 | ]
286 | data = {key: [None] * raw_data["num_timesteps"] for key in data_keys}
287 | unique_gt_ids = []
288 | unique_tk_ids = []
289 | num_gt_dets = 0
290 | num_tk_cls_dets = 0
291 | num_tk_overlap_dets = 0
292 | overlap_ious_thr = 0.5
293 | loc_and_asso_tk_ids = []
294 | exh_class_tk_ids = []
295 |
296 | for t in range(raw_data["num_timesteps"]):
297 | # only extract relevant dets for this class for preproc and eval
298 | if cls == "all":
299 | gt_class_mask = np.ones_like(raw_data["gt_classes"][t]).astype(bool)
300 | else:
301 | gt_class_mask = np.atleast_1d(
302 | raw_data["gt_classes"][t] == cls_id
303 | ).astype(bool)
304 |
305 | # select GT that is not in the evaluating classes
306 | if assignment is not None and assignment:
307 | all_gt_ids = list(assignment[t].keys())
308 | gt_ids_in = raw_data["gt_ids"][t][gt_class_mask]
309 | gt_ids_out = set(all_gt_ids) - set(gt_ids_in)
310 | tk_ids_out = set([assignment[t][key] for key in list(gt_ids_out)])
311 |
312 | # compute overlapped tracks and add their ids to overlap_tk_ids
313 | sim_scores = raw_data["similarity_scores"]
314 | overlap_ids_masks = (sim_scores[t][gt_class_mask] >= overlap_ious_thr).any(
315 | axis=0
316 | )
317 | overlap_tk_ids_t = raw_data["tk_ids"][t][overlap_ids_masks]
318 | if assignment is not None and assignment:
319 | data["tk_overlap_ids"][t] = list(set(overlap_tk_ids_t) - tk_ids_out)
320 | else:
321 | data["tk_overlap_ids"][t] = list(set(overlap_tk_ids_t))
322 |
323 | loc_and_asso_tk_ids += data["tk_overlap_ids"][t]
324 |
325 | data["tk_exh_ids"][t] = []
326 | if cls == "all":
327 | continue
328 |
329 | # add the track ids of exclusive annotated class to exh_class_tk_ids
330 | tk_exh_mask = np.atleast_1d(raw_data["tk_classes"][t] == cls_id)
331 | tk_exh_mask = tk_exh_mask.astype(bool)
332 | exh_class_tk_ids_t = raw_data["tk_ids"][t][tk_exh_mask]
333 | exh_class_tk_ids.append(exh_class_tk_ids_t)
334 | data["tk_exh_ids"][t] = exh_class_tk_ids_t
335 |
336 | # remove tk_ids that has been assigned to GT belongs to other classes.
337 | loc_and_asso_tk_ids = list(set(loc_and_asso_tk_ids))
338 |
339 | # remove all unwanted unmatched tracker detections
340 | for t in range(raw_data["num_timesteps"]):
341 | # add gt to the data
342 | if cls == "all":
343 | gt_class_mask = np.ones_like(raw_data["gt_classes"][t]).astype(bool)
344 | else:
345 | gt_class_mask = np.atleast_1d(
346 | raw_data["gt_classes"][t] == cls_id
347 | ).astype(bool)
348 | data["gt_classes"][t] = cls_id
349 | data["gt_class_name"][t] = cls
350 |
351 | gt_ids = raw_data["gt_ids"][t][gt_class_mask]
352 | gt_dets = [raw_data['gt_dets'][t][ind] for ind in range(len(gt_class_mask)) if gt_class_mask[ind]]
353 | data["gt_ids"][t] = gt_ids
354 | data["gt_dets"][t] = gt_dets
355 |
356 | # filter pred and only keep those that highly overlap with GTs
357 | tk_mask = np.isin(
358 | raw_data["tk_ids"][t], np.array(loc_and_asso_tk_ids), assume_unique=True
359 | )
360 | tk_overlap_mask = np.isin(
361 | raw_data["tk_ids"][t],
362 | np.array(data["tk_overlap_ids"][t]),
363 | assume_unique=True,
364 | )
365 |
366 | tk_ids = raw_data["tk_ids"][t][tk_mask]
367 | tk_dets = [raw_data['tk_dets'][t][ind] for ind in range(len(tk_mask)) if
368 | tk_mask[ind]]
369 | tracker_classes = raw_data["tk_classes"][t][tk_mask]
370 |
371 | # add overlap classes for computing the FP for Cls term
372 | tracker_overlap_classes = raw_data["tk_classes"][t][tk_overlap_mask]
373 | # tracker_confidences = raw_data["tk_confidences"][t][tk_mask]
374 | sim_scores_masked = sim_scores[t][gt_class_mask, :][:, tk_mask]
375 |
376 | # add filtered prediction to the data
377 | data["tk_classes"][t] = tracker_classes
378 | data["tk_overlap_classes"][t] = tracker_overlap_classes
379 | data["tk_ids"][t] = tk_ids
380 | data["tk_dets"][t] = tk_dets
381 | # data["tk_confidences"][t] = tracker_confidences
382 | data["sim_scores"][t] = sim_scores_masked
383 | data["tk_class_eval_tk_ids"][t] = set(
384 | list(data["tk_overlap_ids"][t]) + list(data["tk_exh_ids"][t])
385 | )
386 |
387 | # count total number of detections
388 | unique_gt_ids += list(np.unique(data["gt_ids"][t]))
389 | # the unique track ids are for association.
390 | unique_tk_ids += list(np.unique(data["tk_ids"][t]))
391 |
392 | num_tk_overlap_dets += len(data["tk_overlap_ids"][t])
393 | num_tk_cls_dets += len(data["tk_class_eval_tk_ids"][t])
394 | num_gt_dets += len(data["gt_ids"][t])
395 |
396 | # re-label IDs such that there are no empty IDs
397 | if len(unique_gt_ids) > 0:
398 | unique_gt_ids = np.unique(unique_gt_ids)
399 | gt_id_map = np.nan * np.ones((np.max(unique_gt_ids) + 1))
400 | gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
401 | data["gt_id_map"] = {}
402 | for gt_id in unique_gt_ids:
403 | new_gt_id = gt_id_map[gt_id].astype(int)
404 | data["gt_id_map"][new_gt_id] = gt_id
405 |
406 | for t in range(raw_data["num_timesteps"]):
407 | if len(data["gt_ids"][t]) > 0:
408 | data["gt_ids"][t] = gt_id_map[data["gt_ids"][t]].astype(int)
409 |
410 | if len(unique_tk_ids) > 0:
411 | unique_tk_ids = np.unique(unique_tk_ids)
412 | tk_id_map = np.nan * np.ones((np.max(unique_tk_ids) + 1))
413 | tk_id_map[unique_tk_ids] = np.arange(len(unique_tk_ids))
414 |
415 | data["tk_id_map"] = {}
416 | for track_id in unique_tk_ids:
417 | new_track_id = tk_id_map[track_id].astype(int)
418 | data["tk_id_map"][new_track_id] = track_id
419 |
420 | for t in range(raw_data["num_timesteps"]):
421 | if len(data["tk_ids"][t]) > 0:
422 | data["tk_ids"][t] = tk_id_map[data["tk_ids"][t]].astype(int)
423 | if len(data["tk_overlap_ids"][t]) > 0:
424 | data["tk_overlap_ids"][t] = tk_id_map[
425 | data["tk_overlap_ids"][t]
426 | ].astype(int)
427 |
428 | # record overview statistics.
429 | data["num_tk_cls_dets"] = num_tk_cls_dets
430 | data["num_tk_overlap_dets"] = num_tk_overlap_dets
431 | data["num_gt_dets"] = num_gt_dets
432 | data["num_tk_ids"] = len(unique_tk_ids)
433 | data["num_gt_ids"] = len(unique_gt_ids)
434 | data["num_timesteps"] = raw_data["num_timesteps"]
435 | data["seq"] = raw_data["seq"]
436 |
437 | self._check_unique_ids(data)
438 |
439 | return data
440 |
441 | @_timing.time
442 | def get_preprocessed_seq_data(
443 | self, raw_data, cls, assignment=None, thresholds=[50, 75]
444 | ):
445 | """Preprocess data for a single sequence for a single class."""
446 | data = {}
447 | if thresholds is None:
448 | thresholds = [50, 75]
449 | elif isinstance(thresholds, int):
450 | thresholds = [thresholds]
451 |
452 | for thr in thresholds:
453 | assignment_thr = None
454 | if assignment is not None:
455 | assignment_thr = assignment[thr]
456 | data[thr] = self.get_preprocessed_seq_data_thr(
457 | raw_data, cls, assignment_thr
458 | )
459 |
460 | return data
461 |
462 | def _calculate_similarities(self, gt_dets_t, tracker_dets_t):
463 | similarity_scores = self._calculate_mask_ious(gt_dets_t, tracker_dets_t, is_encoded=True, do_ioa=False)
464 | return similarity_scores
465 |
466 | def _compute_vid_mappings(self, annotations):
467 | """Computes mappings from videos to corresponding tracks and images."""
468 | vids_to_tracks = {}
469 | vids_to_imgs = {}
470 | vid_ids = [vid["id"] for vid in self.gt_data["videos"]]
471 |
472 | # compute an mapping from image IDs to images
473 | images = {}
474 | for image in self.gt_data["images"]:
475 | images[image["id"]] = image
476 |
477 | tk_str = utils.get_track_id_str(annotations[0])
478 | for ann in annotations:
479 | ann["area"] = ann["bbox"][2] * ann["bbox"][3]
480 |
481 | vid = ann["video_id"]
482 | if ann["video_id"] not in vids_to_tracks.keys():
483 | vids_to_tracks[ann["video_id"]] = list()
484 | if ann["video_id"] not in vids_to_imgs.keys():
485 | vids_to_imgs[ann["video_id"]] = list()
486 |
487 | # fill in vids_to_tracks
488 | tid = ann[tk_str]
489 | exist_tids = [track["id"] for track in vids_to_tracks[vid]]
490 | try:
491 | index1 = exist_tids.index(tid)
492 | except ValueError:
493 | index1 = -1
494 | if tid not in exist_tids:
495 | curr_track = {
496 | "id": tid,
497 | "category_id": ann["category_id"],
498 | "video_id": vid,
499 | "annotations": [ann],
500 | }
501 | vids_to_tracks[vid].append(curr_track)
502 | else:
503 | vids_to_tracks[vid][index1]["annotations"].append(ann)
504 |
505 | # fill in vids_to_imgs
506 | img_id = ann["image_id"]
507 | exist_img_ids = [img["id"] for img in vids_to_imgs[vid]]
508 | try:
509 | index2 = exist_img_ids.index(img_id)
510 | except ValueError:
511 | index2 = -1
512 | if index2 == -1:
513 | curr_img = {"id": img_id, "annotations": [ann]}
514 | vids_to_imgs[vid].append(curr_img)
515 | else:
516 | vids_to_imgs[vid][index2]["annotations"].append(ann)
517 |
518 | # sort annotations by frame index and compute track area
519 | for vid, tracks in vids_to_tracks.items():
520 | for track in tracks:
521 | track["annotations"] = sorted(
522 | track["annotations"],
523 | key=lambda x: images[x["image_id"]]["frame_id"],
524 | )
525 | # compute average area
526 | track["area"] = sum(x["area"] for x in track["annotations"]) / len(
527 | track["annotations"]
528 | )
529 |
530 | # ensure all videos are present
531 | for vid_id in vid_ids:
532 | if vid_id not in vids_to_tracks.keys():
533 | vids_to_tracks[vid_id] = []
534 | if vid_id not in vids_to_imgs.keys():
535 | vids_to_imgs[vid_id] = []
536 |
537 | return vids_to_tracks, vids_to_imgs
538 |
539 | def _compute_image_to_timestep_mappings(self):
540 | """Computes a mapping from images to timestep in sequence."""
541 | images = {}
542 | for image in self.gt_data["images"]:
543 | images[image["id"]] = image
544 |
545 | seq_to_imgs_to_timestep = {vid["id"]: dict() for vid in self.gt_data["videos"]}
546 | for vid in seq_to_imgs_to_timestep:
547 | curr_imgs = [img["id"] for img in self.video2gt_image[vid]]
548 | curr_imgs = sorted(curr_imgs, key=lambda x: images[x]["frame_id"])
549 | seq_to_imgs_to_timestep[vid] = {
550 | curr_imgs[i]: i for i in range(len(curr_imgs))
551 | }
552 |
553 | return seq_to_imgs_to_timestep
554 |
555 | def _limit_dets_per_image(self, annotations):
556 | """Limits the number of detections for each image.
557 |
558 | Adapted from https://github.com/TAO-Dataset/.
559 | """
560 | max_dets = self.config["MAX_DETECTIONS"]
561 | img_ann = defaultdict(list)
562 | for ann in annotations:
563 | img_ann[ann["image_id"]].append(ann)
564 |
565 | for img_id, _anns in img_ann.items():
566 | if len(_anns) <= max_dets:
567 | continue
568 | _anns = sorted(_anns, key=lambda x: x["score"], reverse=True)
569 | img_ann[img_id] = _anns[:max_dets]
570 |
571 | return [ann for anns in img_ann.values() for ann in anns]
572 |
573 | def _fill_video_ids_inplace(self, annotations):
574 | """Fills in missing video IDs inplace.
575 |
576 | Adapted from https://github.com/TAO-Dataset/.
577 | """
578 | missing_video_id = [x for x in annotations if "video_id" not in x]
579 | if missing_video_id:
580 | image_id_to_video_id = {
581 | x["id"]: x["video_id"] for x in self.gt_data["images"]
582 | }
583 | for x in missing_video_id:
584 | x["video_id"] = image_id_to_video_id[x["image_id"]]
585 |
586 | @staticmethod
587 | def _make_tk_ids_unique(annotations):
588 | """Makes track IDs unqiue over the whole annotation set.
589 |
590 | Adapted from https://github.com/TAO-Dataset/.
591 | """
592 | track_id_videos = {}
593 | track_ids_to_update = set()
594 | max_track_id = 0
595 |
596 | tk_str = utils.get_track_id_str(annotations[0])
597 | for ann in annotations:
598 | t = int(ann[tk_str])
599 | if t not in track_id_videos:
600 | track_id_videos[t] = ann["video_id"]
601 |
602 | if ann["video_id"] != track_id_videos[t]:
603 | # track id is assigned to multiple videos
604 | track_ids_to_update.add(t)
605 | max_track_id = max(max_track_id, t)
606 |
607 | if track_ids_to_update:
608 | print("true")
609 | next_id = itertools.count(max_track_id + 1)
610 | new_tk_ids = defaultdict(lambda: next(next_id))
611 | for ann in annotations:
612 | t = ann[tk_str]
613 | v = ann["video_id"]
614 | if t in track_ids_to_update:
615 | ann[tk_str] = new_tk_ids[t, v]
616 | return len(track_ids_to_update)
617 |
--------------------------------------------------------------------------------
/teta/datasets/tao.py:
--------------------------------------------------------------------------------
1 | """TAO Dataset."""
2 | import copy
3 | import itertools
4 | import json
5 | import os
6 | from collections import defaultdict
7 |
8 | import numpy as np
9 |
10 | from .. import _timing
11 | from ..config import get_default_dataset_config, init_config
12 | from ..utils import TrackEvalException
13 | from ._base_dataset import _BaseDataset
14 |
15 |
16 | class TAO(_BaseDataset):
17 | """Dataset class for TAO tracking"""
18 |
19 | def __init__(self, config=None):
20 | """Initialize dataset, checking that all required files are present."""
21 | super().__init__()
22 | # Fill non-given config values with defaults
23 | self.config = init_config(config, get_default_dataset_config(), self.get_name())
24 | self.gt_fol = self.config["GT_FOLDER"]
25 | self.tracker_fol = self.config["TRACKERS_FOLDER"]
26 | self.should_classes_combine = True
27 | self.use_super_categories = False
28 |
29 | self.tracker_sub_fol = self.config["TRACKER_SUB_FOLDER"]
30 | self.output_fol = self.config["OUTPUT_FOLDER"]
31 | if self.output_fol is None:
32 | self.output_fol = self.tracker_fol
33 | self.output_sub_fol = self.config["OUTPUT_SUB_FOLDER"]
34 |
35 | if self.gt_fol.endswith(".json"):
36 | self.gt_data = json.load(open(self.gt_fol, "r"))
37 | else:
38 | gt_dir_files = [
39 | file for file in os.listdir(self.gt_fol) if file.endswith(".json")
40 | ]
41 | if len(gt_dir_files) != 1:
42 | raise TrackEvalException(
43 | f"{self.gt_fol} does not contain exactly one json file."
44 | )
45 |
46 | with open(os.path.join(self.gt_fol, gt_dir_files[0])) as f:
47 | self.gt_data = json.load(f)
48 |
49 | # merge categories marked with a merged tag in TAO dataset
50 | self._merge_categories(self.gt_data["annotations"] + self.gt_data["tracks"])
51 |
52 | # get sequences to eval and sequence information
53 | self.seq_list = [
54 | vid["name"].replace("/", "-") for vid in self.gt_data["videos"]
55 | ]
56 | self.seq_name2seqid = {
57 | vid["name"].replace("/", "-"): vid["id"] for vid in self.gt_data["videos"]
58 | }
59 | # compute mappings from videos to annotation data
60 | self.video2gt_track, self.video2gt_image = self._compute_vid_mappings(
61 | self.gt_data["annotations"]
62 | )
63 | # compute sequence lengths
64 | self.seq_lengths = {vid["id"]: 0 for vid in self.gt_data["videos"]}
65 | for img in self.gt_data["images"]:
66 | self.seq_lengths[img["video_id"]] += 1
67 | self.seq2images2timestep = self._compute_image_to_timestep_mappings()
68 | self.seq2cls = {
69 | vid["id"]: {
70 | "pos_cat_ids": list(
71 | {track["category_id"] for track in self.video2gt_track[vid["id"]]}
72 | ),
73 | "neg_cat_ids": vid["neg_category_ids"],
74 | "not_exh_labeled_cat_ids": vid["not_exhaustive_category_ids"],
75 | }
76 | for vid in self.gt_data["videos"]
77 | }
78 |
79 | # Get classes to eval
80 | considered_vid_ids = [self.seq_name2seqid[vid] for vid in self.seq_list]
81 | seen_cats = set(
82 | [
83 | cat_id
84 | for vid_id in considered_vid_ids
85 | for cat_id in self.seq2cls[vid_id]["pos_cat_ids"]
86 | ]
87 | )
88 | # only classes with ground truth are evaluated in TAO
89 | self.valid_classes = [
90 | cls["name"] for cls in self.gt_data["categories"] if cls["id"] in seen_cats
91 | ]
92 | cls_name2clsid_map = {
93 | cls["name"]: cls["id"] for cls in self.gt_data["categories"]
94 | }
95 |
96 | if self.config["CLASSES_TO_EVAL"]:
97 | self.class_list = [
98 | cls.lower() if cls.lower() in self.valid_classes else None
99 | for cls in self.config["CLASSES_TO_EVAL"]
100 | ]
101 | if not all(self.class_list):
102 | valid_cls = ", ".join(self.valid_classes)
103 | raise TrackEvalException(
104 | "Attempted to evaluate an invalid class. Only classes "
105 | f"{valid_cls} are valid (classes present in ground truth"
106 | " data)."
107 | )
108 | else:
109 | self.class_list = [cls for cls in self.valid_classes]
110 | self.cls_name2clsid = {
111 | k: v for k, v in cls_name2clsid_map.items() if k in self.class_list
112 | }
113 | self.clsid2cls_name = {
114 | v: k for k, v in cls_name2clsid_map.items() if k in self.class_list
115 | }
116 | # get trackers to eval
117 | print(self.config["TRACKERS_TO_EVAL"] )
118 | if self.config["TRACKERS_TO_EVAL"] is None:
119 | self.tracker_list = os.listdir(self.tracker_fol)
120 | else:
121 | self.tracker_list = self.config["TRACKERS_TO_EVAL"]
122 |
123 | if self.config["TRACKER_DISPLAY_NAMES"] is None:
124 | self.tracker_to_disp = dict(zip(self.tracker_list, self.tracker_list))
125 | elif (self.config["TRACKERS_TO_EVAL"] is not None) and (
126 | len(self.config["TK_DISPLAY_NAMES"]) == len(self.tracker_list)
127 | ):
128 | self.tracker_to_disp = dict(
129 | zip(self.tracker_list, self.config["TK_DISPLAY_NAMES"])
130 | )
131 | else:
132 | raise TrackEvalException(
133 | "List of tracker files and tracker display names do not match."
134 | )
135 |
136 | self.tracker_data = {tracker: dict() for tracker in self.tracker_list}
137 |
138 | for tracker in self.tracker_list:
139 | if self.tracker_sub_fol.endswith(".json"):
140 | with open(os.path.join(self.tracker_sub_fol)) as f:
141 | curr_data = json.load(f)
142 | else:
143 | tr_dir = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol)
144 | tr_dir_files = [
145 | file for file in os.listdir(tr_dir) if file.endswith(".json")
146 | ]
147 | if len(tr_dir_files) != 1:
148 | raise TrackEvalException(
149 | f"{tr_dir} does not contain exactly one json file."
150 | )
151 | with open(os.path.join(tr_dir, tr_dir_files[0])) as f:
152 | curr_data = json.load(f)
153 |
154 | # limit detections if MAX_DETECTIONS > 0
155 | if self.config["MAX_DETECTIONS"]:
156 | curr_data = self._limit_dets_per_image(curr_data)
157 |
158 | # fill missing video ids
159 | self._fill_video_ids_inplace(curr_data)
160 |
161 | # make track ids unique over whole evaluation set
162 | self._make_tk_ids_unique(curr_data)
163 |
164 | # merge categories marked with a merged tag in TAO dataset
165 | self._merge_categories(curr_data)
166 |
167 | # get tracker sequence information
168 | curr_vids2tracks, curr_vids2images = self._compute_vid_mappings(curr_data)
169 | self.tracker_data[tracker]["vids_to_tracks"] = curr_vids2tracks
170 | self.tracker_data[tracker]["vids_to_images"] = curr_vids2images
171 |
172 | def get_display_name(self, tracker):
173 | return self.tracker_to_disp[tracker]
174 |
175 | def _load_raw_file(self, tracker, seq, is_gt):
176 | """Load a file (gt or tracker) in the TAO format
177 |
178 | If is_gt, this returns a dict which contains the fields:
179 | [gt_ids, gt_classes]:
180 | list (for each timestep) of 1D NDArrays (for each det).
181 | [gt_dets]: list (for each timestep) of lists of detections.
182 |
183 | if not is_gt, this returns a dict which contains the fields:
184 | [tk_ids, tk_classes, tk_confidences]:
185 | list (for each timestep) of 1D NDArrays (for each det).
186 | [tk_dets]: list (for each timestep) of lists of detections.
187 | """
188 | seq_id = self.seq_name2seqid[seq]
189 | # file location
190 | if is_gt:
191 | imgs = self.video2gt_image[seq_id]
192 | else:
193 | imgs = self.tracker_data[tracker]["vids_to_images"][seq_id]
194 |
195 | # convert data to required format
196 | num_timesteps = self.seq_lengths[seq_id]
197 | img_to_timestep = self.seq2images2timestep[seq_id]
198 | data_keys = ["ids", "classes", "dets"]
199 | if not is_gt:
200 | data_keys += ["tk_confidences"]
201 | raw_data = {key: [None] * num_timesteps for key in data_keys}
202 | for img in imgs:
203 | # some tracker data contains images without any ground truth info,
204 | # these are ignored
205 | if img["id"] not in img_to_timestep:
206 | continue
207 | t = img_to_timestep[img["id"]]
208 | anns = img["annotations"]
209 | raw_data["dets"][t] = np.atleast_2d([ann["bbox"] for ann in anns]).astype(
210 | float
211 | )
212 | raw_data["ids"][t] = np.atleast_1d(
213 | [ann["track_id"] for ann in anns]
214 | ).astype(int)
215 | raw_data["classes"][t] = np.atleast_1d(
216 | [ann["category_id"] for ann in anns]
217 | ).astype(int)
218 | if not is_gt:
219 | raw_data["tk_confidences"][t] = np.atleast_1d(
220 | [ann["score"] for ann in anns]
221 | ).astype(float)
222 |
223 | for t, d in enumerate(raw_data["dets"]):
224 | if d is None:
225 | raw_data["dets"][t] = np.empty((0, 4)).astype(float)
226 | raw_data["ids"][t] = np.empty(0).astype(int)
227 | raw_data["classes"][t] = np.empty(0).astype(int)
228 | if not is_gt:
229 | raw_data["tk_confidences"][t] = np.empty(0)
230 |
231 | if is_gt:
232 | key_map = {"ids": "gt_ids", "classes": "gt_classes", "dets": "gt_dets"}
233 | else:
234 | key_map = {"ids": "tk_ids", "classes": "tk_classes", "dets": "tk_dets"}
235 | for k, v in key_map.items():
236 | raw_data[v] = raw_data.pop(k)
237 |
238 | raw_data["num_timesteps"] = num_timesteps
239 | raw_data["neg_cat_ids"] = self.seq2cls[seq_id]["neg_cat_ids"]
240 | raw_data["not_exh_labeled_cls"] = self.seq2cls[seq_id][
241 | "not_exh_labeled_cat_ids"
242 | ]
243 | raw_data["seq"] = seq
244 | return raw_data
245 |
246 | def get_preprocessed_seq_data_thr(self, raw_data, cls, assignment=None):
247 | """Preprocess data for a single sequence for a single class.
248 |
249 | Inputs:
250 | raw_data: dict containing the data for the sequence already
251 | read in by get_raw_seq_data().
252 | cls: class to be evaluated.
253 | Outputs:
254 | gt_ids:
255 | list (for each timestep) of ids of GT tracks
256 | tk_ids:
257 | list (for each timestep) of ids of predicted tracks (all for TP
258 | matching (Det + AssocA))
259 | tk_overlap_ids:
260 | list (for each timestep) of ids of predicted tracks that overlap
261 | with GTs
262 | tk_neg_ids:
263 | list (for each timestep) of ids of predicted tracks that with
264 | the class id on the negative list for the current sequence.
265 | tk_exh_ids:
266 | list (for each timestep) of ids of predicted tracks that do not
267 | overlap with existing GTs but have the class id on the
268 | exhaustive annotated class list for the current sequence.
269 | tk_dets:
270 | list (for each timestep) of lists of detections that
271 | corresponding to the tk_ids
272 | tk_classes:
273 | list (for each timestep) of lists of classes that corresponding
274 | to the tk_ids
275 | tk_confidences:
276 | list (for each timestep) of lists of classes that corresponding
277 | to the tk_ids
278 | sim_scores:
279 | similarity score between gt_ids and tk_ids.
280 | """
281 | if cls != "all":
282 | cls_id = self.cls_name2clsid[cls]
283 |
284 | data_keys = [
285 | "gt_ids",
286 | "tk_ids",
287 | "gt_id_map",
288 | "tk_id_map",
289 | "gt_dets",
290 | "gt_classes",
291 | "gt_class_name",
292 | "tk_overlap_classes",
293 | "tk_overlap_ids",
294 | "tk_neg_ids",
295 | "tk_exh_ids",
296 | "tk_class_eval_tk_ids",
297 | "tk_dets",
298 | "tk_classes",
299 | "tk_confidences",
300 | "sim_scores",
301 | ]
302 | data = {key: [None] * raw_data["num_timesteps"] for key in data_keys}
303 | unique_gt_ids = []
304 | unique_tk_ids = []
305 | num_gt_dets = 0
306 | num_tk_cls_dets = 0
307 | num_tk_overlap_dets = 0
308 | overlap_ious_thr = 0.5
309 | loc_and_asso_tk_ids = []
310 |
311 | for t in range(raw_data["num_timesteps"]):
312 | # only extract relevant dets for this class for preproc and eval
313 | if cls == "all":
314 | gt_class_mask = np.ones_like(raw_data["gt_classes"][t]).astype(bool)
315 | else:
316 | gt_class_mask = np.atleast_1d(
317 | raw_data["gt_classes"][t] == cls_id
318 | ).astype(bool)
319 |
320 | # select GT that is not in the evaluating classes
321 | if assignment is not None and assignment:
322 | all_gt_ids = list(assignment[t].keys())
323 | gt_ids_in = raw_data["gt_ids"][t][gt_class_mask]
324 | gt_ids_out = set(all_gt_ids) - set(gt_ids_in)
325 | tk_ids_out = set([assignment[t][key] for key in list(gt_ids_out)])
326 |
327 | # compute overlapped tracks and add their ids to overlap_tk_ids
328 | sim_scores = raw_data["similarity_scores"]
329 | overlap_ids_masks = (sim_scores[t][gt_class_mask] >= overlap_ious_thr).any(
330 | axis=0
331 | )
332 | overlap_tk_ids_t = raw_data["tk_ids"][t][overlap_ids_masks]
333 | if assignment is not None and assignment:
334 | data["tk_overlap_ids"][t] = list(set(overlap_tk_ids_t) - tk_ids_out)
335 | else:
336 | data["tk_overlap_ids"][t] = list(set(overlap_tk_ids_t))
337 |
338 | loc_and_asso_tk_ids += data["tk_overlap_ids"][t]
339 |
340 | data["tk_exh_ids"][t] = []
341 | data["tk_neg_ids"][t] = []
342 |
343 | if cls == "all":
344 | continue
345 |
346 | # remove tk_ids that has been assigned to GT belongs to other classes.
347 | loc_and_asso_tk_ids = list(set(loc_and_asso_tk_ids))
348 |
349 | # remove all unwanted unmatched tracker detections
350 | for t in range(raw_data["num_timesteps"]):
351 | # add gt to the data
352 | if cls == "all":
353 | gt_class_mask = np.ones_like(raw_data["gt_classes"][t]).astype(bool)
354 | else:
355 | gt_class_mask = np.atleast_1d(
356 | raw_data["gt_classes"][t] == cls_id
357 | ).astype(bool)
358 | data["gt_classes"][t] = cls_id
359 | data["gt_class_name"][t] = cls
360 |
361 | gt_ids = raw_data["gt_ids"][t][gt_class_mask]
362 | gt_dets = raw_data["gt_dets"][t][gt_class_mask]
363 | data["gt_ids"][t] = gt_ids
364 | data["gt_dets"][t] = gt_dets
365 |
366 | # filter pred and only keep those that highly overlap with GTs
367 | tk_mask = np.isin(
368 | raw_data["tk_ids"][t], np.array(loc_and_asso_tk_ids), assume_unique=True
369 | )
370 | tk_overlap_mask = np.isin(
371 | raw_data["tk_ids"][t],
372 | np.array(data["tk_overlap_ids"][t]),
373 | assume_unique=True,
374 | )
375 |
376 | tk_ids = raw_data["tk_ids"][t][tk_mask]
377 | tk_dets = raw_data["tk_dets"][t][tk_mask]
378 | tracker_classes = raw_data["tk_classes"][t][tk_mask]
379 |
380 | # add overlap classes for computing the FP for Cls term
381 | tracker_overlap_classes = raw_data["tk_classes"][t][tk_overlap_mask]
382 | tracker_confidences = raw_data["tk_confidences"][t][tk_mask]
383 | sim_scores_masked = sim_scores[t][gt_class_mask, :][:, tk_mask]
384 |
385 | # add filtered prediction to the data
386 | data["tk_classes"][t] = tracker_classes
387 | data["tk_overlap_classes"][t] = tracker_overlap_classes
388 | data["tk_ids"][t] = tk_ids
389 | data["tk_dets"][t] = tk_dets
390 | data["tk_confidences"][t] = tracker_confidences
391 | data["sim_scores"][t] = sim_scores_masked
392 | data["tk_class_eval_tk_ids"][t] = set(
393 | list(data["tk_overlap_ids"][t])
394 | + list(data["tk_neg_ids"][t])
395 | + list(data["tk_exh_ids"][t])
396 | )
397 |
398 | # count total number of detections
399 | unique_gt_ids += list(np.unique(data["gt_ids"][t]))
400 | # the unique track ids are for association.
401 | unique_tk_ids += list(np.unique(data["tk_ids"][t]))
402 |
403 | num_tk_overlap_dets += len(data["tk_overlap_ids"][t])
404 | num_tk_cls_dets += len(data["tk_class_eval_tk_ids"][t])
405 | num_gt_dets += len(data["gt_ids"][t])
406 |
407 | # re-label IDs such that there are no empty IDs
408 | if len(unique_gt_ids) > 0:
409 | unique_gt_ids = np.unique(unique_gt_ids)
410 | gt_id_map = np.nan * np.ones((np.max(unique_gt_ids) + 1))
411 | gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
412 | data["gt_id_map"] = {}
413 | for gt_id in unique_gt_ids:
414 | new_gt_id = gt_id_map[gt_id].astype(int)
415 | data["gt_id_map"][new_gt_id] = gt_id
416 |
417 | for t in range(raw_data["num_timesteps"]):
418 | if len(data["gt_ids"][t]) > 0:
419 | data["gt_ids"][t] = gt_id_map[data["gt_ids"][t]].astype(int)
420 |
421 | if len(unique_tk_ids) > 0:
422 | unique_tk_ids = np.unique(unique_tk_ids)
423 | tk_id_map = np.nan * np.ones((np.max(unique_tk_ids) + 1))
424 | tk_id_map[unique_tk_ids] = np.arange(len(unique_tk_ids))
425 |
426 | data["tk_id_map"] = {}
427 | for track_id in unique_tk_ids:
428 | new_track_id = tk_id_map[track_id].astype(int)
429 | data["tk_id_map"][new_track_id] = track_id
430 |
431 | for t in range(raw_data["num_timesteps"]):
432 | if len(data["tk_ids"][t]) > 0:
433 | data["tk_ids"][t] = tk_id_map[data["tk_ids"][t]].astype(int)
434 | if len(data["tk_overlap_ids"][t]) > 0:
435 | data["tk_overlap_ids"][t] = tk_id_map[
436 | data["tk_overlap_ids"][t]
437 | ].astype(int)
438 |
439 | # record overview statistics.
440 | data["num_tk_cls_dets"] = num_tk_cls_dets
441 | data["num_tk_overlap_dets"] = num_tk_overlap_dets
442 | data["num_gt_dets"] = num_gt_dets
443 | data["num_tk_ids"] = len(unique_tk_ids)
444 | data["num_gt_ids"] = len(unique_gt_ids)
445 | data["num_timesteps"] = raw_data["num_timesteps"]
446 | data["seq"] = raw_data["seq"]
447 |
448 | self._check_unique_ids(data)
449 |
450 | return data
451 |
452 | @_timing.time
453 | def get_preprocessed_seq_data(
454 | self, raw_data, cls, assignment=None, thresholds=[50, 75]
455 | ):
456 | """Preprocess data for a single sequence for a single class."""
457 | data = {}
458 | if thresholds is None:
459 | thresholds = [50]
460 | elif isinstance(thresholds, int):
461 | thresholds = [thresholds]
462 |
463 | for thr in thresholds:
464 | assignment_thr = None
465 | if assignment is not None:
466 | assignment_thr = assignment[thr]
467 | data[thr] = self.get_preprocessed_seq_data_thr(
468 | raw_data, cls, assignment_thr
469 | )
470 |
471 | return data
472 |
473 | def _calculate_similarities(self, gt_dets_t, tk_dets_t):
474 | """Compute similarity scores."""
475 | sim_scores = self._calculate_box_ious(gt_dets_t, tk_dets_t)
476 | return sim_scores
477 |
478 | def _merge_categories(self, annotations):
479 | """Merges categories with a merged tag.
480 |
481 | Adapted from https://github.com/TAO-Dataset.
482 | """
483 | merge_map = {}
484 | for category in self.gt_data["categories"]:
485 | if "merged" in category:
486 | for to_merge in category["merged"]:
487 | merge_map[to_merge["id"]] = category["id"]
488 |
489 | for ann in annotations:
490 | ann["category_id"] = merge_map.get(ann["category_id"], ann["category_id"])
491 |
492 | def _compute_vid_mappings(self, annotations):
493 | """Computes mappings from videos to corresponding tracks and images."""
494 | vids_to_tracks = {}
495 | vids_to_imgs = {}
496 | vid_ids = [vid["id"] for vid in self.gt_data["videos"]]
497 |
498 | # compute an mapping from image IDs to images
499 | images = {}
500 | for image in self.gt_data["images"]:
501 | images[image["id"]] = image
502 |
503 | for ann in annotations:
504 | ann["area"] = ann["bbox"][2] * ann["bbox"][3]
505 |
506 | vid = ann["video_id"]
507 | if ann["video_id"] not in vids_to_tracks.keys():
508 | vids_to_tracks[ann["video_id"]] = list()
509 | if ann["video_id"] not in vids_to_imgs.keys():
510 | vids_to_imgs[ann["video_id"]] = list()
511 |
512 | # fill in vids_to_tracks
513 | tid = ann["track_id"]
514 | exist_tids = [track["id"] for track in vids_to_tracks[vid]]
515 | try:
516 | index1 = exist_tids.index(tid)
517 | except ValueError:
518 | index1 = -1
519 | if tid not in exist_tids:
520 | curr_track = {
521 | "id": tid,
522 | "category_id": ann["category_id"],
523 | "video_id": vid,
524 | "annotations": [ann],
525 | }
526 | vids_to_tracks[vid].append(curr_track)
527 | else:
528 | vids_to_tracks[vid][index1]["annotations"].append(ann)
529 |
530 | # fill in vids_to_imgs
531 | img_id = ann["image_id"]
532 | exist_img_ids = [img["id"] for img in vids_to_imgs[vid]]
533 | try:
534 | index2 = exist_img_ids.index(img_id)
535 | except ValueError:
536 | index2 = -1
537 | if index2 == -1:
538 | curr_img = {"id": img_id, "annotations": [ann]}
539 | vids_to_imgs[vid].append(curr_img)
540 | else:
541 | vids_to_imgs[vid][index2]["annotations"].append(ann)
542 |
543 | # sort annotations by frame index and compute track area
544 | for vid, tracks in vids_to_tracks.items():
545 | for track in tracks:
546 | track["annotations"] = sorted(
547 | track["annotations"],
548 | key=lambda x: images[x["image_id"]]["frame_index"],
549 | )
550 | # compute average area
551 | track["area"] = sum(x["area"] for x in track["annotations"]) / len(
552 | track["annotations"]
553 | )
554 |
555 | # ensure all videos are present
556 | for vid_id in vid_ids:
557 | if vid_id not in vids_to_tracks.keys():
558 | vids_to_tracks[vid_id] = []
559 | if vid_id not in vids_to_imgs.keys():
560 | vids_to_imgs[vid_id] = []
561 |
562 | return vids_to_tracks, vids_to_imgs
563 |
564 | def _compute_image_to_timestep_mappings(self):
565 | """Computes a mapping from images to timestep in sequence."""
566 | images = {}
567 | for image in self.gt_data["images"]:
568 | images[image["id"]] = image
569 |
570 | seq_to_imgs_to_timestep = {vid["id"]: dict() for vid in self.gt_data["videos"]}
571 | for vid in seq_to_imgs_to_timestep:
572 | curr_imgs = [img["id"] for img in self.video2gt_image[vid]]
573 | curr_imgs = sorted(curr_imgs, key=lambda x: images[x]["frame_index"])
574 | seq_to_imgs_to_timestep[vid] = {
575 | curr_imgs[i]: i for i in range(len(curr_imgs))
576 | }
577 |
578 | return seq_to_imgs_to_timestep
579 |
580 | def _limit_dets_per_image(self, annotations):
581 | """Limits the number of detections for each image.
582 |
583 | Adapted from https://github.com/TAO-Dataset/.
584 | """
585 | max_dets = self.config["MAX_DETECTIONS"]
586 | img_ann = defaultdict(list)
587 | for ann in annotations:
588 | img_ann[ann["image_id"]].append(ann)
589 |
590 | for img_id, _anns in img_ann.items():
591 | if len(_anns) <= max_dets:
592 | continue
593 | _anns = sorted(_anns, key=lambda x: x["score"], reverse=True)
594 | img_ann[img_id] = _anns[:max_dets]
595 |
596 | return [ann for anns in img_ann.values() for ann in anns]
597 |
598 | def _fill_video_ids_inplace(self, annotations):
599 | """Fills in missing video IDs inplace.
600 |
601 | Adapted from https://github.com/TAO-Dataset/.
602 | """
603 | missing_video_id = [x for x in annotations if "video_id" not in x]
604 | if missing_video_id:
605 | image_id_to_video_id = {
606 | x["id"]: x["video_id"] for x in self.gt_data["images"]
607 | }
608 | for x in missing_video_id:
609 | x["video_id"] = image_id_to_video_id[x["image_id"]]
610 |
611 | @staticmethod
612 | def _make_tk_ids_unique(annotations):
613 | """Makes track IDs unqiue over the whole annotation set.
614 |
615 | Adapted from https://github.com/TAO-Dataset/.
616 | """
617 | track_id_videos = {}
618 | track_ids_to_update = set()
619 | max_track_id = 0
620 | for ann in annotations:
621 | t = ann["track_id"]
622 | if t not in track_id_videos:
623 | track_id_videos[t] = ann["video_id"]
624 |
625 | if ann["video_id"] != track_id_videos[t]:
626 | # track id is assigned to multiple videos
627 | track_ids_to_update.add(t)
628 | max_track_id = max(max_track_id, t)
629 |
630 | if track_ids_to_update:
631 | print("true")
632 | next_id = itertools.count(max_track_id + 1)
633 | new_tk_ids = defaultdict(lambda: next(next_id))
634 | for ann in annotations:
635 | t = ann["track_id"]
636 | v = ann["video_id"]
637 | if t in track_ids_to_update:
638 | ann["track_id"] = new_tk_ids[t, v]
639 | return len(track_ids_to_update)
640 |
--------------------------------------------------------------------------------