├── .gitignore
├── .readthedocs.yaml
├── LICENSE
├── MANIFEST.in
├── README.md
├── data
└── aliases
│ ├── analysis.json
│ ├── downloader.json
│ ├── experiment.json
│ ├── indexer.json
│ ├── loader.json
│ ├── report.json
│ └── transformer.json
├── docs
├── Makefile
├── api.rst
├── api
│ ├── analysis.rst
│ ├── dataset.rst
│ ├── experiment.rst
│ ├── region.rst
│ ├── report.rst
│ ├── stack.rst
│ ├── tracker.rst
│ └── utilities.rst
├── conf.py
├── index.rst
├── overview.rst
├── requirements.txt
├── tutorials.rst
└── tutorials
│ ├── cli.rst
│ ├── dataset.rst
│ ├── integration.rst
│ ├── jupyter.rst
│ ├── report.rst
│ ├── stack.rst
│ └── workspace.rst
├── requirements.txt
├── setup.py
└── vot
├── __init__.py
├── __main__.py
├── analysis
├── __init__.py
├── accuracy.py
├── failures.py
├── longterm.py
├── multistart.py
├── processor.py
├── supervised.py
└── tests.py
├── dataset
├── __init__.py
├── common.py
├── cow.png
├── dummy.py
├── got10k.py
├── otb.py
├── proxy.py
└── trackingnet.py
├── experiment
├── __init__.py
├── helpers.py
├── multirun.py
├── multistart.py
└── transformer.py
├── region
├── __init__.py
├── io.py
├── raster.py
├── shapes.py
└── tests.py
├── report
├── __init__.py
├── commands.tex
├── common.py
├── html.py
├── jquery.js
├── latex.py
├── pure.css
├── report.css
├── report.js
├── table.js
├── tests.py
└── video.py
├── stack
├── __init__.py
├── otb100.yaml
├── otb50.yaml
├── tests.py
├── tests
│ ├── basic.yaml
│ ├── multiobject.yaml
│ └── segmentation.yaml
├── vot2013.yaml
├── vot2014.yaml
├── vot2015
│ ├── rgb.yaml
│ └── tir.yaml
├── vot2016
│ ├── rgb.yaml
│ └── tir.yaml
├── vot2017.yaml
├── vot2018
│ ├── longterm.yaml
│ └── shortterm.yaml
├── vot2019
│ ├── longterm.yaml
│ ├── rgbd.yaml
│ ├── rgbtir.yaml
│ └── shortterm.yaml
├── vot2020
│ ├── longterm.yaml
│ ├── rgbd.yaml
│ ├── rgbtir.yaml
│ └── shortterm.yaml
├── vot2021
│ ├── longterm.yaml
│ ├── rgbd.yaml
│ └── shortterm.yaml
├── vot2022
│ ├── depth.yaml
│ ├── longterm.yaml
│ ├── rgbd.yaml
│ ├── shortterm.yaml
│ └── shorttermbox.yaml
├── vots2023.yaml
├── vots2024
│ ├── main.yaml
│ ├── votst.yaml
│ └── votstval.yaml
└── vots2025
│ ├── main.yaml
│ ├── realtime.yaml
│ └── votst.yaml
├── tracker
├── __init__.py
├── dummy.py
├── results.py
├── tests.py
└── trax.py
├── utilities
├── __init__.py
├── cli.py
├── data.py
├── draw.py
├── io.py
├── migration.py
├── net.py
└── notebook.py
├── version.py
└── workspace
├── __init__.py
├── storage.py
└── tests.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .vscode
2 | __pycache__
3 | venv/
4 | .idea/
5 | MANIFEST
6 | *.pyc
7 | dist/
8 | *.egg-info
9 | _build
10 | build
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | # Set the version of Python and other tools you might need
9 | build:
10 | os: ubuntu-22.04
11 | tools:
12 | python: "3.11"
13 |
14 | # Build documentation in the docs/ directory with Sphinx
15 | sphinx:
16 | configuration: docs/conf.py
17 |
18 | # We recommend specifying your dependencies to enable reproducible builds:
19 | # https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
20 | python:
21 | install:
22 | - requirements: docs/requirements.txt
23 | - requirements: requirements.txt
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include requirements.txt
2 | include README.md
3 | recursive-include vot/stack/ *.yaml
4 | include vot/dataset/*.png
5 | include vot/dataset/*.jpg
6 | include vot/report/*.css
7 | include vot/report/*.js
8 | include vot/report/*.tex
9 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | The VOT evaluation toolkit
3 | ==========================
4 |
5 | [](https://vot-toolkit.readthedocs.io/en/latest/?badge=latest)
6 | [](https://badge.fury.io/py/vot-toolkit)
7 |
8 | This repository contains the official evaluation toolkit for the [Visual Object Tracking (VOT) challenge](http://votchallenge.net/). This is the official version of the toolkit, implemented in Python 3 language. If you are looking for the old Matlab version, you can find an archived repository [here](https://github.com/votchallenge/toolkit-legacy).
9 |
10 | For more detailed informations consult the documentation available [here](http://vot-toolkit.readthedocs.io/). You can also subscribe to the VOT [mailing list](https://liste.arnes.si/mailman3/lists/votchallenge.lists.arnes.si/) to receive news about challenges and important software updates or join our [support form](https://groups.google.com/forum/?hl=en#!forum/votchallenge-help) to ask questions.
11 |
12 | Developers
13 | ----------
14 |
15 | The VOT toolkit is developed and maintained by [Luka Čehovin Zajc](https://vicos.si/lukacu) with the help of the VOT innitiative members and the VOT community.
16 |
17 | Contributors:
18 |
19 | * [Luka Čehovin Zajc](https://vicos.si/lukacu), University of Ljubljana
20 | * [Alan Lukežič](https://vicos.si/people/alan_lukezic/), University of Ljubljana
21 | * Yan Song, Tampere University
22 |
23 | Acknowledgements
24 | ----------------
25 |
26 | The development of this package was supported by Slovenian research agency (ARRS) projects Z2-1866 and J2-316.
27 |
28 | License
29 | -------
30 |
31 | Copyright (C) 2024 Luka Čehovin Zajc and the [VOT Challenge innitiative](http://votchallenge.net/).
32 |
33 | This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
34 |
35 | This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
36 |
37 | You should have received a copy of the GNU General Public License along with this program. If not, see .
38 |
39 | Enquiries, Question and Comments
40 | --------------------------------
41 |
42 | If you have any further enquiries, question, or comments, please refer to the contact information link on the [VOT homepage](http://votchallenge.net/). If you would like to file a bug report or a feature request, use the [Github issue tracker](https://github.com/votchallenge/toolkit/issues). **The issue tracker is for toolkit issues only**, if you have a problem with tracker integration or any other questions, please use our [support forum](https://groups.google.com/forum/?hl=en#!forum/votchallenge-help).
43 |
--------------------------------------------------------------------------------
/data/aliases/analysis.json:
--------------------------------------------------------------------------------
1 | {
2 | "accuracy": "vot.analysis.accuracy.SequenceAccuracy",
3 | "average_accuracy": "vot.analysis.accuracy.AverageAccuracy",
4 | "success_plot": "vot.analysis.accuracy.SuccessPlot",
5 | "average_success_plot": "vot.analysis.accuracy.AverageSuccessPlot",
6 |
7 | "failures": "vot.analysis.failures.FailureCount",
8 | "cumulative_failures": "vot.analysis.failures.CumulativeFailureCount",
9 |
10 | "pr_curves": "vot.analysis.longterm.PrecisionRecallCurves",
11 | "pr_curve": "vot.analysis.longterm.PrecisionRecallCurve",
12 | "f_curve": "vot.analysis.longterm.FScoreCurve",
13 | "average_tpr": "vot.analysis.longterm.PrecisionRecall",
14 | "quality_auxiliary": "vot.analysis.longterm.QualityAuxiliary",
15 | "average_quality_auxiliary": "vot.analysis.longterm.AverageQualityAuxiliary",
16 | "longterm_ar": "vot.analysis.longterm.AccuracyRobustness",
17 |
18 | "multistart_ar": "vot.analysis.multistart.AccuracyRobustness",
19 | "multistart_average_ar": "vot.analysis.multistart.AverageAccuracyRobustness",
20 | "multistart_fragments": "vot.analysis.multistart.MultiStartFragments",
21 | "multistart_eao_curves": "vot.analysis.multistart.EAOCurves",
22 | "multistart_eao_curve": "vot.analysis.multistart.EAOCurve",
23 | "multistart_eao_score": "vot.analysis.multistart.EAOScore",
24 |
25 | "supervised_ar": "vot.analysis.supervised.AccuracyRobustness",
26 | "supervised_average_ar": "vot.analysis.supervised.AverageAccuracyRobustness",
27 | "supervised_eao_curve": "vot.analysis.supervised.EAOCurve",
28 | "supervised_eao_score": "vot.analysis.supervised.EAOScore"
29 | }
--------------------------------------------------------------------------------
/data/aliases/downloader.json:
--------------------------------------------------------------------------------
1 | {
2 |
3 | }
--------------------------------------------------------------------------------
/data/aliases/experiment.json:
--------------------------------------------------------------------------------
1 | {
2 | "unsupervised": "vot.experiment.multirun.UnsupervisedExperiment",
3 | "supervised": "vot.experiment.multirun.SupervisedExperiment",
4 |
5 | "multistart": "vot.experiment.multistart.MultiStartExperiment"
6 | }
--------------------------------------------------------------------------------
/data/aliases/indexer.json:
--------------------------------------------------------------------------------
1 | {
2 | "default": "vot.dataset.common.list_sequences",
3 | "tackingnet": "vot.dataset.trackingnet.list_sequences"
4 | }
--------------------------------------------------------------------------------
/data/aliases/loader.json:
--------------------------------------------------------------------------------
1 | {
2 | "default": "vot.dataset.common.read_sequence",
3 |
4 |
5 | "legacy": "vot.dataset.read_legacy_sequence"
6 | }
--------------------------------------------------------------------------------
/data/aliases/report.json:
--------------------------------------------------------------------------------
1 | {
2 | "table" : "vot.report.common.StackAnalysesTable",
3 | "plots" : "vot.report.common.StackAnalysesPlots"
4 | }
--------------------------------------------------------------------------------
/data/aliases/transformer.json:
--------------------------------------------------------------------------------
1 | {
2 | "singleobject" : "vot.experiment.transformer.SingleObject",
3 | "redetection": "vot.experiment.transformer.Redetection",
4 | "ignore": "vot.experiment.transformer.IgnoreObjects",
5 | "downsample": "vot.experiment.transformer.Downsample"
6 | }
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # Internal variables.
11 | PAPEROPT_a4 = -D latex_paper_size=a4
12 | PAPEROPT_letter = -D latex_paper_size=letter
13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
14 | # the i18n builder cannot share the environment and doctrees with the others
15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
16 |
17 | .PHONY: help
18 | help:
19 | @echo "Please use \`make ' where is one of"
20 | @echo " html to make standalone HTML files"
21 | @echo " dirhtml to make HTML files named index.html in directories"
22 | @echo " singlehtml to make a single large HTML file"
23 | @echo " pickle to make pickle files"
24 | @echo " json to make JSON files"
25 | @echo " htmlhelp to make HTML files and a HTML help project"
26 | @echo " qthelp to make HTML files and a qthelp project"
27 | @echo " applehelp to make an Apple Help Book"
28 | @echo " devhelp to make HTML files and a Devhelp project"
29 | @echo " epub to make an epub"
30 | @echo " epub3 to make an epub3"
31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
32 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
33 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
34 | @echo " text to make text files"
35 | @echo " man to make manual pages"
36 | @echo " texinfo to make Texinfo files"
37 | @echo " info to make Texinfo files and run them through makeinfo"
38 | @echo " gettext to make PO message catalogs"
39 | @echo " changes to make an overview of all changed/added/deprecated items"
40 | @echo " xml to make Docutils-native XML files"
41 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
42 | @echo " linkcheck to check all external links for integrity"
43 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
44 | @echo " coverage to run coverage check of the documentation (if enabled)"
45 | @echo " dummy to check syntax errors of document sources"
46 |
47 | .PHONY: clean
48 | clean:
49 | rm -rf $(BUILDDIR)/*
50 |
51 | .PHONY: html
52 | html:
53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
54 | @echo
55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
56 |
57 | .PHONY: dirhtml
58 | dirhtml:
59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
60 | @echo
61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
62 |
63 | .PHONY: singlehtml
64 | singlehtml:
65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
66 | @echo
67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
68 |
69 | .PHONY: pickle
70 | pickle:
71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
72 | @echo
73 | @echo "Build finished; now you can process the pickle files."
74 |
75 | .PHONY: json
76 | json:
77 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
78 | @echo
79 | @echo "Build finished; now you can process the JSON files."
80 |
81 | .PHONY: htmlhelp
82 | htmlhelp:
83 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
84 | @echo
85 | @echo "Build finished; now you can run HTML Help Workshop with the" \
86 | ".hhp project file in $(BUILDDIR)/htmlhelp."
87 |
88 | .PHONY: epub
89 | epub:
90 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
91 | @echo
92 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
93 |
94 | .PHONY: epub3
95 | epub3:
96 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3
97 | @echo
98 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
99 |
100 | .PHONY: latex
101 | latex:
102 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
103 | @echo
104 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
105 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
106 | "(use \`make latexpdf' here to do that automatically)."
107 |
108 | .PHONY: latexpdf
109 | latexpdf:
110 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
111 | @echo "Running LaTeX files through pdflatex..."
112 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
113 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
114 |
115 | .PHONY: latexpdfja
116 | latexpdfja:
117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
118 | @echo "Running LaTeX files through platex and dvipdfmx..."
119 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
120 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
121 |
122 | .PHONY: text
123 | text:
124 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
125 | @echo
126 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
127 |
128 | .PHONY: man
129 | man:
130 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
131 | @echo
132 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
133 |
134 | .PHONY: texinfo
135 | texinfo:
136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
137 | @echo
138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
139 | @echo "Run \`make' in that directory to run these through makeinfo" \
140 | "(use \`make info' here to do that automatically)."
141 |
142 | .PHONY: info
143 | info:
144 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
145 | @echo "Running Texinfo files through makeinfo..."
146 | make -C $(BUILDDIR)/texinfo info
147 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
148 |
149 | .PHONY: gettext
150 | gettext:
151 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
152 | @echo
153 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
154 |
155 | .PHONY: changes
156 | changes:
157 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
158 | @echo
159 | @echo "The overview file is in $(BUILDDIR)/changes."
160 |
161 | .PHONY: linkcheck
162 | linkcheck:
163 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
164 | @echo
165 | @echo "Link check complete; look for any errors in the above output " \
166 | "or in $(BUILDDIR)/linkcheck/output.txt."
167 |
168 | .PHONY: doctest
169 | doctest:
170 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
171 | @echo "Testing of doctests in the sources finished, look at the " \
172 | "results in $(BUILDDIR)/doctest/output.txt."
173 |
174 | .PHONY: coverage
175 | coverage:
176 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
177 | @echo "Testing of coverage in the sources finished, look at the " \
178 | "results in $(BUILDDIR)/coverage/python.txt."
179 |
180 | .PHONY: xml
181 | xml:
182 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
183 | @echo
184 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
185 |
186 | .PHONY: pseudoxml
187 | pseudoxml:
188 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
189 | @echo
190 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
191 |
192 | .PHONY: dummy
193 | dummy:
194 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy
195 | @echo
196 | @echo "Build finished. Dummy builder generates no files."
197 |
--------------------------------------------------------------------------------
/docs/api.rst:
--------------------------------------------------------------------------------
1 | Documentation
2 | =============
3 |
4 | The API section contains the generated documentation of individual structures and functions from source code docstrings.
5 |
6 | .. toctree::
7 | :maxdepth: 2
8 |
9 | api/analysis
10 | api/dataset
11 | api/report
12 | api/experiment
13 | api/region
14 | api/stack
15 | api/tracker
16 | api/utilities
17 |
18 | Configuration
19 | -------------
20 |
21 | .. automodule:: vot
22 | :members:
--------------------------------------------------------------------------------
/docs/api/analysis.rst:
--------------------------------------------------------------------------------
1 | Analysis module
2 | ===============
3 |
4 | The analysis module contains classes that implement various performance analysis methodologies. It also contains a parallel runtime with caching capabilities that
5 | enables efficient execution of large-scale evaluations.
6 |
7 | .. automodule:: vot.analysis
8 | :members:
9 |
10 | .. automodule:: vot.analysis.processor
11 | :members:
12 |
13 | Accuracy analysis
14 | -----------------
15 |
16 | .. automodule:: vot.analysis.accuracy
17 | :members:
18 |
19 | Failure analysis
20 | ----------------
21 |
22 | .. automodule:: vot.analysis.failures
23 | :members:
24 |
25 | Long-term measures
26 | ------------------
27 |
28 | .. automodule:: vot.analysis.longterm
29 | :members:
30 |
31 | Multi-start measures
32 | --------------------
33 |
34 | .. automodule:: vot.analysis.multistart
35 | :members:
36 |
37 | Supervision analysis
38 | --------------------
39 |
40 | .. automodule:: vot.analysis.supervised
41 | :members:
--------------------------------------------------------------------------------
/docs/api/dataset.rst:
--------------------------------------------------------------------------------
1 | Dataset module
2 | ==============
3 |
4 | .. automodule:: vot.dataset
5 | :members:
6 |
7 | .. automodule:: vot.dataset.common
8 | :members:
9 |
10 | Extended dataset support
11 | ------------------------
12 |
13 | Many datasets are supported by the toolkit using special adapters.
14 |
15 | ### OTB
16 |
17 | .. automodule:: vot.dataset.otb
18 | :members:
19 |
20 | ### GOT10k
21 |
22 | .. automodule:: vot.dataset.got10k
23 | :members:
24 |
25 | ### TrackingNet
26 |
27 | .. automodule:: vot.dataset.trackingnet
28 | :members:
29 |
--------------------------------------------------------------------------------
/docs/api/experiment.rst:
--------------------------------------------------------------------------------
1 | Experiment module
2 | ================
3 |
4 | .. automodule:: vot.experiment
5 | :members:
--------------------------------------------------------------------------------
/docs/api/region.rst:
--------------------------------------------------------------------------------
1 | Region module
2 | ============
3 |
4 | .. automodule:: vot.region
5 | :members:
6 |
7 | Shapes
8 | ------
9 |
10 | .. automodule:: vot.region.shapes
11 | :members:
12 |
13 | Raster utilities
14 | ----------------
15 |
16 | .. automodule:: vot.region.raster
17 | :members:
18 |
19 | IO functions
20 | ------------
21 |
22 | .. automodule:: vot.region.io
23 | :members:
--------------------------------------------------------------------------------
/docs/api/report.rst:
--------------------------------------------------------------------------------
1 | Report module
2 | =============
3 |
4 | .. automodule:: vot.report
5 | :members:
6 |
7 | .. automodule:: vot.report.common
8 | :members:
9 |
10 | HTML report generation
11 | ----------------------
12 |
13 | .. automodule:: vot.report
14 | :members:
15 |
16 | LaTeX report generation
17 | -----------------------
18 |
19 | .. automodule:: vot.report.latex
20 | :members:
21 |
22 |
--------------------------------------------------------------------------------
/docs/api/stack.rst:
--------------------------------------------------------------------------------
1 | Stack module
2 | ============
3 |
4 | .. automodule:: vot.stack
5 | :members:
--------------------------------------------------------------------------------
/docs/api/tracker.rst:
--------------------------------------------------------------------------------
1 | Tracker module
2 | ==============
3 |
4 | .. automodule:: vot.tracker
5 | :members:
6 |
7 | TraX protocol module
8 | --------------------
9 |
10 | .. automodule:: vot.tracker.trax
11 | :members:
12 |
13 | Results module
14 | --------------
15 |
16 | .. automodule:: vot.tracker.results
17 | :members:
18 |
19 |
--------------------------------------------------------------------------------
/docs/api/utilities.rst:
--------------------------------------------------------------------------------
1 | Utilities module
2 | ===============
3 |
4 | .. automodule:: vot.utilities
5 | :members:
6 |
7 | CLI
8 | ---
9 |
10 | .. automodule:: vot.utilities.cli
11 | :members:
12 |
13 | Data
14 | ----
15 |
16 | .. automodule:: vot.utilities.data
17 | :members:
18 |
19 | Drawing
20 | -------
21 |
22 | .. automodule:: vot.utilities.draw
23 | :members:
24 |
25 | Input/Output
26 | ------------
27 |
28 | .. automodule:: vot.utilities.io
29 | :members:
30 |
31 |
32 | Migration
33 |
34 | .. automodule:: vot.utilities.migration
35 | :members:
36 |
37 | Network
38 | --------
39 |
40 | .. automodule:: vot.utilities.net
41 | :members:
42 |
43 | Notebook
44 | --------
45 |
46 | .. automodule:: vot.utilities.notebook
47 | :members:
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | import os
4 | import sys
5 | sys.path.insert(0, os.path.abspath('..'))
6 |
7 | # -- General configuration ------------------------------------------------
8 |
9 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
10 |
11 | # Add any paths that contain templates here, relative to this directory.
12 | templates_path = ['_templates']
13 |
14 | from recommonmark.parser import CommonMarkParser
15 |
16 | source_parsers = {
17 | '.md': CommonMarkParser,
18 | }
19 |
20 | source_suffix = ['.rst', '.md']
21 |
22 | master_doc = 'index'
23 |
24 | # General information about the project.
25 | project = u'VOT Toolkit'
26 | copyright = u'2024, Luka Cehovin Zajc'
27 | author = u'Luka Cehovin Zajc'
28 |
29 | try:
30 | import sys
31 | import os
32 |
33 | __version__ = "0.0.0"
34 |
35 | exec(open(os.path.join(os.path.dirname(__file__), '..', 'vot', 'version.py')).read())
36 |
37 | version = __version__
38 | except:
39 | version = 'unknown'
40 |
41 | # The full version, including alpha/beta/rc tags.
42 | release = version
43 |
44 | # The language for content autogenerated by Sphinx. Refer to documentation
45 | # for a list of supported languages.
46 | #
47 | # This is also used if you do content translation via gettext catalogs.
48 | # Usually you set "language" from the command line for these cases.
49 | language = 'en'
50 |
51 | # There are two options for replacing |today|: either, you set today to some
52 | # non-false value, then it is used:
53 | #
54 | # today = ''
55 | #
56 | # Else, today_fmt is used as the format for a strftime call.
57 | #
58 | # today_fmt = '%B %d, %Y'
59 |
60 | # List of patterns, relative to source directory, that match files and
61 | # directories to ignore when looking for source files.
62 | # This patterns also effect to html_static_path and html_extra_path
63 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
64 |
65 | # The reST default role (used for this markup: `text`) to use for all
66 | # documents.
67 | #
68 | # default_role = None
69 |
70 | # If true, '()' will be appended to :func: etc. cross-reference text.
71 | #
72 | # add_function_parentheses = True
73 |
74 | # If true, the current module name will be prepended to all description
75 | # unit titles (such as .. function::).
76 | #
77 | # add_module_names = True
78 |
79 | # If true, sectionauthor and moduleauthor directives will be shown in the
80 | # output. They are ignored by default.
81 | #
82 | # show_authors = False
83 |
84 | # The name of the Pygments (syntax highlighting) style to use.
85 | pygments_style = 'sphinx'
86 |
87 | # A list of ignored prefixes for module index sorting.
88 | # modindex_common_prefix = []
89 |
90 | # If true, keep warnings as "system message" paragraphs in the built documents.
91 | # keep_warnings = False
92 |
93 | # If true, `todo` and `todoList` produce output, else they produce nothing.
94 | todo_include_todos = False
95 |
96 |
97 | # -- Options for HTML output ----------------------------------------------
98 |
99 | # The theme to use for HTML and HTML Help pages. See the documentation for
100 | # a list of builtin themes.
101 | #
102 | html_theme = 'sphinx_rtd_theme'
103 |
104 | html_static_path = ['_static']
105 |
106 | htmlhelp_basename = 'vottoolkitdoc'
107 |
108 | # -- Options for LaTeX output ---------------------------------------------
109 |
110 | latex_documents = [
111 | (master_doc, 'vot-toolkit.tex', u'VOT Toolkit Documentation',
112 | u'Luka Cehovin Zajc', 'manual'),
113 | ]
114 |
115 | man_pages = [
116 | (master_doc, 'vot-toolkit', u'VOT Toolkit Documentation',
117 | [author], 1)
118 | ]
119 |
120 | # If true, show URL addresses after external links.
121 | #
122 | # man_show_urls = False
123 |
124 |
125 | # -- Options for Texinfo output -------------------------------------------
126 |
127 | # Grouping the document tree into Texinfo files. List of tuples
128 | # (source start file, target name, title, author,
129 | # dir menu entry, description, category)
130 | texinfo_documents = [
131 | (master_doc, 'VOT Toolkit', u'VOT Toolkit Documentation',
132 | author, 'VOT Toolkit', 'The official VOT Challenge evaluation toolkit',
133 | 'Miscellaneous'),
134 | ]
135 |
136 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to the VOT Toolkit documentation
2 | ========================================
3 |
4 | The VOT toolkit is the official evaluation tool for the `Visual Object Tracking (VOT) challenge `_.
5 | It is written in Python 3 language. The toolkit is designed to be easy to use and to have broad support for various trackers,
6 | datasets and evaluation measures.
7 |
8 | Contributions and development
9 | -----------------------------
10 |
11 | The VOT toolkit is developed by the VOT Committee, primarily by `Luka Čehovin Zajc `_ and the tracking community as an open-source project (GPLv3 license).
12 |
13 | Contributions to the VOT toolkit are welcome, the preferred way to do it is by submitting an issue or a pull requests on `GitHub `_.
14 |
15 | Index
16 | -----
17 |
18 | .. toctree::
19 | :maxdepth: 1
20 |
21 | overview
22 | tutorials
23 | api
24 |
25 |
--------------------------------------------------------------------------------
/docs/overview.rst:
--------------------------------------------------------------------------------
1 | Overview
2 | ========
3 |
4 | The toolkit is organized as a modular collection of classes and methods with several modules that address different aspects of the performance evaluation problem.
5 |
6 | Key concepts
7 | ------------
8 |
9 | Key concepts that are used throughout the toolkit are:
10 |
11 | * **Dataset** - a collection of sequences that is used for performance evaluation. A dataset is a collection of **sequences**.
12 | * **Sequence** - a sequence of frames with correspoding ground truth annotations for one or more objects. A sequence is a collection of **frames**.
13 | * **Tracker** - a tracker is an algorithm that takes frames from a sequence as input (one by one) and produces a set of **trajectories** as output.
14 | * **Experiment** - an experiment is a method that applies a tracker to a given sequence in a specific way.
15 | * **Analysis** - an analysis is a set of **measures** that are used to evaluate the performance of a tracker (compare predicted trajectories to groundtruth).
16 | * **Stack** - a stack is a collection of **experiments** and **analyses** that are performed on a given dataset.
17 | * **Workspace** - a workspace is a collection of experiments and analyses that are performed on a given dataset.
18 | * **Report** - a report is a representation of a list of analyses for a given experiment stack.
19 |
20 | Tracker support
21 | ---------------
22 |
23 | The toolkit supports various ways of interacting with a tracking methods. Primary manner (at the only supported at the moment) is using the TraX protocol.
24 | The toolkit provides a wrapper for the TraX protocol that allows to use any tracker that supports the protocol. Other ways of interacting with a tracker can be added in the future.
25 |
26 | Dataset support
27 | ---------------
28 |
29 | The toolkit is capable of using any dataset that is provided in the official format or by registering a custom loaders.
30 | The toolkit format is a simple directory structure that contains a set of sequences. Each sequence is a directory that contains a set of frames and a groundtruth file.
31 | The groundtruth file is a text file that contains one line per frame. Each line contains the bounding box of the object in the frame in the format `x,y,w,h`. The toolkit format is used by the toolkit itself and by the VOT challenges.
32 |
33 | Performance methodology support
34 | -------------------------------
35 |
36 | Various performance measures and visualzatons are implemented, most of them were used in VOT challenges.
37 |
38 | * **Accuracy** - the accuracy measure is the overlap between the predicted and groundtruth bounding boxes. The overlap is measured using the intersection over union (IoU) measure.
39 | * **Robustness** - the robustness measure is the number of failures of the tracker. A failure is defined as the overlap between the predicted and groundtruth bounding boxes being less than a certain threshold.
40 | * **Expected Average Overlap** - the expected average overlap (EAO) is a measure that combines accuracy and robustness into a single measure. The EAO is computed as the area under the accuracy-robustness curve.
41 | * **Expected Overlap** - the expected overlap (EO) is a measure that combines accuracy and robustness into a single measure. The EO is computed as the area under the accuracy-robustness curve.
42 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx==5.3.0
2 | sphinx_rtd_theme==1.1.1
3 | readthedocs-sphinx-search==0.3.2
4 | recommonmark==0.7.1
--------------------------------------------------------------------------------
/docs/tutorials.rst:
--------------------------------------------------------------------------------
1 | Tutorials
2 | =========
3 |
4 | The main purpose of the toolkit is to facilitate tracker evaluation for VOT challenges and benchmarks. But there are many other
5 | ways that the toolkit can be used and extended. The following tutorials are provided to help you get started with the toolkit.
6 |
7 | .. toctree::
8 | :maxdepth: 1
9 |
10 | tutorials/cli
11 | tutorials/workspace
12 | tutorials/integration
13 | tutorials/evaluation
14 | tutorials/dataset
15 | tutorials/stack
16 | tutorials/report
17 | tutorials/jupyter
18 |
--------------------------------------------------------------------------------
/docs/tutorials/cli.rst:
--------------------------------------------------------------------------------
1 | Using the CLI
2 | ===============
3 |
4 | The CLI is a simple command line interface that allows you to interact with the toolkit for the most common tasks. It is a good starting point for new users. The CLI supports the following commands:
5 |
6 | - `initialize` - Initialize a new workspace
7 | - `test` - Run integration tests for a tracker
8 | - `evaluate` - Run the evaluation stack
9 | - `analyze` - Analyze the results
10 | - `pack` - Package results for submission to the evaluation server (used for competition submissions)
11 |
12 | To access the CLI, run `vot` or `python -m vot ` in the terminal. The CLI supports the `--help` option to display help for each command.
13 |
14 | Workspace initialization
15 | ------------------------
16 |
17 | To initialize a new workspace, run the following command:
18 |
19 | ```
20 | vot initialize
21 | ```
22 |
23 | This will create a new workspace in the specified directory.
24 |
25 | Integration tests
26 | -----------------
27 |
28 | To run integration tests for a tracker, run the following command:
29 |
30 | ```
31 | vot test
32 | ```
--------------------------------------------------------------------------------
/docs/tutorials/dataset.rst:
--------------------------------------------------------------------------------
1 | Datasets
2 | ========
3 |
4 | The easiest way to use the VOT toolkit is by using one of the integrated stacks that also provides a dataset. Everything is provided, and you just select a stack.
5 | However, you can also use the toolkit with your own dataset. This document shows how to do this.
6 |
7 | Background
8 | ----------
9 |
10 | When loading an existing workspace, the toolkit will attempt to load anything that is available in the sequences subdirectory (by default this is `sequences`). The loading process is divided into two
11 | steps, first an indexer will scan the directory for sequences and return the list of available sequences. Then, the sequence loader will load the sequence metadata into the appropriate structure.
12 |
13 | This allows you some flexibility, you can organize your sequences in the format that the toolkit uses by default, or you can provide your own indexer and/or loader that will load the sequences from your custom format.
14 | The toolkit comes with several loaders integrated, e.g. it can load sequences in formats for OTB, GoT10k, LaSOT, and TrackingNet. You can also provide your own loader, if you have a custom format and do not want to change it.
15 |
16 | Default dataset format
17 | ----------------------
18 |
19 | The default dataset format is a directory with subdirectories for each sequence, accompanied by a `list.txt` file that contains the list of sequences.
20 |
21 | Each sequence directory contains the following units:
22 |
23 | - Metadata (`sequence`): A file with sequence metadata in INI (key-value) format. The metadata also defines which channels are available for the sequence.
24 | - Channels (usually `color`, `depth`, `ir`): Directories with images for each channel. The images are enumerated with a frame number and an extension that indicates the image format.
25 | - Annotations (either `groundtruth.txt` or `groundtruth_.json`): A file with ground truth annotations for the sequence.
26 | - Tags (denoted as `.tag`): Per-frame tags that can be used to specify the binary state of each frame.
27 | - Values (denoted as `.value`): Per-frame values that can be used to specify numeric values for each frame.
28 |
29 | The `list.txt` file contains the list of sequences in the dataset. Each line contains the name of a sequence directory.
30 |
31 | Preparing a dataset
32 | -------------------
33 |
34 | To use a custom dataset, you have to prepare an empty workspace directory that already contains the `sequences` subdirectory that will be recognized by one of the integrated loaders. Then initialize the workspace with the `vot initialize` command with a desired stack of experiments.
35 | Since the sequences are present, the command will not attempt to download them. If you would prefer to specify your own experiment stack, check out the tutorial on [creating a custom stack](stack.md).
36 |
37 |
38 | Creating a custom loader
39 | ------------------------
40 |
41 | As explained above, the toolkit uses an indexer and a loader to load sequences. Both are callable objects that accept a single string argument, the path to the directory. The indexer returns a list of sequence names, and the loader loads the Sequence object.
42 | These callables are registered using the `class-registry `_ package, and can be added to the registry using the setuptools entry points mechanism.
43 |
44 | To create a custom indexer, you have to create a callable that returns a list of directories that contain sequences. An example of a custom indexer is shown below:
45 |
46 | ```python
47 |
48 | def my_indexer(path):
49 | return ['sequence1', 'sequence2', 'sequence3']
50 | ```
--------------------------------------------------------------------------------
/docs/tutorials/integration.rst:
--------------------------------------------------------------------------------
1 | Tracker integration
2 | ===================
3 |
4 |
--------------------------------------------------------------------------------
/docs/tutorials/jupyter.rst:
--------------------------------------------------------------------------------
1 | Interactive analysis in Jupyter notebooks
2 | =========================================
3 |
4 |
--------------------------------------------------------------------------------
/docs/tutorials/report.rst:
--------------------------------------------------------------------------------
1 | Customizing reports
2 | ===================
--------------------------------------------------------------------------------
/docs/tutorials/stack.rst:
--------------------------------------------------------------------------------
1 | Creating a new experiment stack
2 | ===============================
3 |
4 | The experiment stack is a collection of experiments that are run on the dataset. It is described by a YAML file that may be provided in the toolkit installation or created by the user manually in the workspace.
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/docs/tutorials/workspace.rst:
--------------------------------------------------------------------------------
1 | Workspace details
2 | =================
3 |
4 | A workspace is the top-level container for all of your data, experiments, and artifacts. The CLI can only manipulate a limited subset of the workspace's properties, more can be achieved by looking at its structure and files.
5 |
6 | Structure
7 | ---------
8 |
9 | The workspace is a directory with the following structure:
10 |
11 | * sequences/ - a directory containing all the sequences in the workspace
12 | * results/ - a directory containing all the results in the workspace
13 | * cache/ - a directory containing all the cache in the workspace
14 | * analysis/ - a directory containing all the analysis in the workspace
15 |
16 | Configuration
17 | -------------
18 |
19 | The workspace has a configuration file called `config.yaml` that contains all the configuration for the workspace. This file is usually automatically generated by the CLI, but can also be edited manually or even created from scratch.
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | vot-trax>=4.0.1
2 | tqdm>=4.37
3 | numpy>=1.16
4 | opencv-python>=4.0
5 | six
6 | pylatex>=1.3
7 | jsonschema>=3.2
8 | pyYAML>=5.3
9 | matplotlib>=3.0
10 | Pillow>=7.0
11 | numba>=0.47
12 | requests>=2.22
13 | colorama>=0.4.3
14 | packaging>=20
15 | dominate>=2.5
16 | cachetools>=4.1
17 | bidict>=0.19
18 | phx-class-registry>=5.0
19 | attributee>=0.1.8
20 | lazy-object-proxy>=1.9
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from os.path import join, dirname, abspath, isfile
4 | from setuptools import find_packages, setup
5 | import json
6 |
7 | this_directory = abspath(dirname(__file__))
8 | with open(join(this_directory, 'README.md'), encoding='utf-8') as f:
9 | long_description = f.read()
10 |
11 | install_requires = []
12 | if isfile(join(this_directory, "requirements.txt")):
13 | with open(join(this_directory, "requirements.txt"), encoding='utf-8') as f:
14 | install_requires = f.readlines()
15 |
16 | __version__ = "0.0.0"
17 |
18 | exec(open(join(dirname(__file__), 'vot', 'version.py')).read())
19 |
20 | entrypoints = {
21 | 'console_scripts': ['vot=vot.utilities.cli:main'],
22 | }
23 |
24 | for r in ["analysis", "downloader", "experiment", "indexer", "loader", "transformer"]:
25 | registry = join(this_directory, "data", "aliases", r + ".json")
26 | if isfile(registry):
27 | try:
28 | with open(registry, encoding='utf-8') as f:
29 | data = json.load(f)
30 |
31 | entrypoints['vot_' + r] = ["%s=%s" % (k, ":".join(v.rsplit(".", 1))) for k, v in data.items()]
32 | except Exception:
33 | pass
34 |
35 | setup(name='vot-toolkit',
36 | version=__version__,
37 | description='Perform visual object tracking experiments and analyze results',
38 | long_description=long_description,
39 | long_description_content_type='text/markdown',
40 | author='Luka Cehovin Zajc',
41 | author_email='luka.cehovin@gmail.com',
42 | url='https://github.com/votchallenge/toolkit',
43 | packages=find_packages(),
44 | install_requires=install_requires,
45 | include_package_data=True,
46 | classifiers=[
47 | "Programming Language :: Python :: 3",
48 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
49 | "Operating System :: OS Independent",
50 | "Development Status :: 4 - Beta",
51 | "Intended Audience :: Science/Research",
52 | ],
53 | python_requires='>=3.7',
54 | entry_points=entrypoints,
55 | extras_require = {
56 | 'jupyter': ["ipywidgets", "jupyter", "itables"],
57 | },
58 | )
59 |
60 |
--------------------------------------------------------------------------------
/vot/__init__.py:
--------------------------------------------------------------------------------
1 | """ Some basic functions and classes used by the toolkit. """
2 |
3 | import os
4 | import logging
5 |
6 | from .version import __version__
7 |
8 | from lazy_object_proxy import Proxy
9 |
10 | class ToolkitException(Exception):
11 | """Base class for all toolkit related exceptions
12 | """
13 | pass
14 |
15 |
16 | def toolkit_version() -> str:
17 | """Returns toolkit version as a string
18 |
19 | Returns:
20 | str: Version of the toolkit
21 | """
22 | return __version__
23 |
24 | def check_updates() -> bool:
25 | """Checks for toolkit updates on Github, requires internet access, fails silently on errors.
26 |
27 | Returns:
28 | bool: True if an update is available, False otherwise.
29 | """
30 |
31 | import re
32 | import packaging.version as packaging
33 | import requests
34 | pattern = r"^__version__ = ['\"]([^'\"]*)['\"]"
35 |
36 | version_url = "https://github.com/votchallenge/vot-toolkit-python/raw/master/vot/version.py"
37 |
38 | try:
39 | get_logger().debug("Checking for new version")
40 | response = requests.get(version_url, timeout=5, allow_redirects=True)
41 | except Exception as e:
42 | get_logger().debug("Unable to retrieve version information %s", e)
43 | return False, None
44 |
45 | if not response:
46 | return False, None
47 |
48 | groups = re.search(pattern, response.content.decode("utf-8"), re.M)
49 | if groups:
50 | remote_version = packaging.parse(groups.group(1))
51 | local_version = packaging.parse(__version__)
52 |
53 | return remote_version > local_version, groups.group(1)
54 |
55 | else:
56 | return False, None
57 |
58 | from attributee import Attributee, Integer, Boolean, List, String
59 |
60 | class GlobalConfiguration(Attributee):
61 | """Global configuration object for the toolkit. It is used to store global configuration options. It can be initialized
62 | from environment variables. The following options are supported:
63 |
64 | - ``VOT_DEBUG_MODE``: Enables debug mode for the toolkit.
65 | - ``VOT_SEQUENCE_CACHE_SIZE``: Maximum number of sequences to keep in cache.
66 | - ``VOT_RESULTS_BINARY``: Enables binary results format.
67 | - ``VOT_MASK_OPTIMIZE_READ``: Enables mask optimization when reading masks.
68 | - ``VOT_WORKER_POOL_SIZE``: Number of workers to use for parallel processing.
69 | - ``VOT_PERSISTENT_CACHE``: Enables persistent cache for analysis results in workspace.
70 |
71 | """
72 |
73 | debug_mode = Boolean(default=False, description="Enables debug mode for the toolkit.")
74 | sequence_cache_size = Integer(default=100, description="Maximum number of sequences to keep in cache.")
75 | results_binary = Boolean(default=True, description="Enables binary results format.")
76 | mask_optimize_read = Boolean(default=True, description="Enables mask optimization when reading masks.")
77 | worker_pool_size = Integer(default=1, description="Number of workers to use for parallel processing.")
78 | persistent_cache = Boolean(default=True, description="Enables persistent cache for analysis results in workspace.")
79 | registry = List(String(), default="", separator=os.pathsep, description="List of directories to search for tracker metadata.")
80 |
81 | def __init__(self):
82 | """Initializes the global configuration object. It reads the configuration from environment variables.
83 |
84 | Raises:
85 | ValueError: When an invalid value is provided for an attribute.
86 | """
87 |
88 | kwargs = {}
89 | for k in self.attributes():
90 | envname = "VOT_{}".format(k.upper())
91 | if envname in os.environ:
92 | kwargs[k] = os.environ[envname]
93 | super().__init__(**kwargs)
94 |
95 | def __repr__(self):
96 | """Returns a string representation of the global configuration object."""
97 | return " ".join(["{}={}".format(k, getattr(self, k)) for k in self.attributes()])
98 |
99 | #_logger = None
100 |
101 | from vot.utilities import singleton
102 |
103 | @singleton
104 | def get_logger() -> logging.Logger:
105 | """Returns the default logger object used to log different messages.
106 |
107 | Returns:
108 | logging.Logger: Logger handle
109 | """
110 |
111 | def init():
112 | from .utilities import ColoredFormatter
113 | logger = logging.getLogger("vot")
114 | stream = logging.StreamHandler()
115 | stream.setFormatter(ColoredFormatter())
116 | logger.addHandler(stream)
117 | if check_debug():
118 | logger.setLevel(logging.DEBUG)
119 | return logger
120 |
121 | return Proxy(init)
122 |
123 | config = Proxy(lambda: GlobalConfiguration())
124 |
125 | def check_debug() -> bool:
126 | """Checks if debug is enabled for the toolkit via an environment variable.
127 |
128 | Returns:
129 | bool: True if debug is enabled, False otherwise
130 | """
131 | return config.debug_mode
132 |
133 | def print_config():
134 | """Prints the global configuration object to the logger."""
135 | if check_debug():
136 | get_logger().debug("Configuration: %s", config)
137 |
--------------------------------------------------------------------------------
/vot/__main__.py:
--------------------------------------------------------------------------------
1 | """ This module is a shortcut for the CLI interface so that it can be run as a "vot" module. """
2 |
3 | # Just a shortcut for the CLI interface so that it can be run as a "vot" module.
4 |
5 | from vot.utilities.cli import main
6 |
7 | if __name__ == '__main__':
8 | main()
9 |
--------------------------------------------------------------------------------
/vot/analysis/failures.py:
--------------------------------------------------------------------------------
1 | """This module contains the implementation of the FailureCount analysis. The analysis counts the number of failures in one or more sequences."""
2 |
3 | from typing import List, Tuple, Any
4 |
5 | from attributee import Include
6 |
7 | from vot.analysis import (Measure,
8 | MissingResultsException,
9 | SequenceAggregator, Sorting,
10 | is_special, SeparableAnalysis)
11 | from vot.dataset import Sequence
12 | from vot.experiment import Experiment
13 | from vot.experiment.multirun import (SupervisedExperiment)
14 | from vot.region import Region
15 | from vot.tracker import Tracker
16 | from vot.utilities.data import Grid
17 |
18 |
19 | def count_failures(trajectory: List[Region]) -> Tuple[int, int]:
20 | """Count the number of failures in a trajectory. A failure is defined as a region is annotated as Special.FAILURE by the experiment."""
21 | return len([region for region in trajectory if is_special(region, SupervisedExperiment.FAILURE)]), len(trajectory)
22 |
23 |
24 | class FailureCount(SeparableAnalysis):
25 | """Count the number of failures in a sequence. A failure is defined as a region is annotated as Special.FAILURE by the experiment."""
26 |
27 | def compatible(self, experiment: Experiment):
28 | """Check if the experiment is compatible with the analysis."""
29 | return isinstance(experiment, SupervisedExperiment)
30 |
31 | @property
32 | def _title_default(self):
33 | """Default title for the analysis."""
34 | return "Number of failures"
35 |
36 | def describe(self):
37 | """Describe the analysis."""
38 | return Measure("Failures", "F", 0, None, Sorting.ASCENDING),
39 |
40 | def subcompute(self, experiment: Experiment, tracker: Tracker, sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]:
41 | """Compute the analysis for a single sequence."""
42 |
43 | assert isinstance(experiment, SupervisedExperiment)
44 |
45 | objects = sequence.objects()
46 | objects_failures = 0
47 |
48 | for object in objects:
49 | trajectories = experiment.gather(tracker, sequence, objects=[object])
50 | if len(trajectories) == 0:
51 | raise MissingResultsException()
52 |
53 | failures = 0
54 | for trajectory in trajectories:
55 | failures = failures + count_failures(trajectory.regions())[0]
56 | objects_failures += failures / len(trajectories)
57 |
58 | return objects_failures / len(objects), len(sequence)
59 |
60 | class CumulativeFailureCount(SequenceAggregator):
61 | """Count the number of failures over all sequences. A failure is defined as a region is annotated as Special.FAILURE by the experiment."""
62 |
63 | analysis = Include(FailureCount)
64 |
65 | def compatible(self, experiment: Experiment):
66 | """Check if the experiment is compatible with the analysis."""
67 | return isinstance(experiment, SupervisedExperiment)
68 |
69 | def dependencies(self):
70 | """Return the dependencies of the analysis."""
71 | return self.analysis,
72 |
73 | @property
74 | def _title_default(self):
75 | """Default title for the analysis."""
76 | return "Number of failures"
77 |
78 | def describe(self):
79 | """Describe the analysis."""
80 | return Measure("Failures", "F", 0, None, Sorting.ASCENDING),
81 |
82 | def aggregate(self, _: Tracker, sequences: List[Sequence], results: Grid):
83 | """Aggregate the analysis for a list of sequences. The aggregation is done by summing the number of failures for each sequence.
84 |
85 | Args:
86 | sequences (List[Sequence]): The list of sequences to aggregate.
87 | results (Grid): The results of the analysis for each sequence.
88 |
89 | Returns:
90 | Tuple[Any]: The aggregated analysis.
91 | """
92 |
93 | failures = 0
94 |
95 | for a in results:
96 | failures = failures + a[0]
97 |
98 | return failures,
99 |
--------------------------------------------------------------------------------
/vot/analysis/tests.py:
--------------------------------------------------------------------------------
1 | """ Unit tests for analysis module. """
2 |
3 |
4 | import unittest
5 |
6 | class Tests(unittest.TestCase):
7 | """ Unit tests for analysis module. """
8 |
9 | def test_perfect_accuracy(self):
10 | import numpy as np
11 |
12 | from vot.region import Rectangle, Special
13 | from vot.analysis.accuracy import gather_overlaps
14 |
15 | trajectory = [Rectangle(0, 0, 100, 100)] * 30
16 | groundtruth = [Rectangle(0, 0, 100, 100)] * 30
17 |
18 | trajectory[0] = Special(1)
19 |
20 | overlaps, _ = gather_overlaps(trajectory, groundtruth)
21 |
22 | self.assertEqual(np.mean(overlaps), 1)
--------------------------------------------------------------------------------
/vot/dataset/cow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/votchallenge/toolkit/372a6fb167484c3ef121b0b640250deb1fdba0eb/vot/dataset/cow.png
--------------------------------------------------------------------------------
/vot/dataset/dummy.py:
--------------------------------------------------------------------------------
1 | """ Dummy sequences for testing purposes."""
2 |
3 | import os
4 | import math
5 | import tempfile
6 |
7 | from vot.dataset import BasedSequence
8 | from vot.region import Rectangle
9 | from vot.region.io import write_trajectory
10 | from vot.utilities import write_properties
11 |
12 | from PIL import Image
13 | import numpy as np
14 |
15 | def _generate(base, length, size, objects):
16 | """Generate a new dummy sequence.
17 |
18 | Args:
19 | base (str): The base directory for the sequence.
20 | length (int): The length of the sequence.
21 | size (tuple): The size of the sequence.
22 | objects (int): The number of objects in the sequence.
23 | """
24 |
25 | background_color = Image.fromarray(np.random.normal(15, 5, (size[1], size[0], 3)).astype(np.uint8))
26 | background_depth = Image.fromarray(np.ones((size[1], size[0]), dtype=np.uint8) * 200)
27 | background_ir = Image.fromarray(np.zeros((size[1], size[0]), dtype=np.uint8))
28 |
29 | template = Image.open(os.path.join(os.path.dirname(__file__), "cow.png"))
30 |
31 | dir_color = os.path.join(base, "color")
32 | dir_depth = os.path.join(base, "depth")
33 | dir_ir = os.path.join(base, "ir")
34 |
35 | os.makedirs(dir_color, exist_ok=True)
36 | os.makedirs(dir_depth, exist_ok=True)
37 | os.makedirs(dir_ir, exist_ok=True)
38 |
39 | path_color = os.path.join(dir_color, "%08d.jpg")
40 | path_depth = os.path.join(dir_depth, "%08d.png")
41 | path_ir = os.path.join(dir_ir, "%08d.png")
42 |
43 | groundtruth = {i : [] for i in range(objects)}
44 |
45 | center_x = size[0] / 2
46 | center_y = size[1] / 2
47 |
48 | radius = min(center_x - template.size[0], center_y - template.size[1])
49 |
50 | speed = (math.pi * 2) / length
51 | offset = (math.pi * 2) / objects
52 |
53 | for i in range(length):
54 | frame_color = background_color.copy()
55 | frame_depth = background_depth.copy()
56 | frame_ir = background_ir.copy()
57 |
58 | for o in range(objects):
59 |
60 | x = int(center_x + math.cos(i * speed + offset * o) * radius - template.size[0] / 2)
61 | y = int(center_y + math.sin(i * speed + offset * o) * radius - template.size[1] / 2)
62 |
63 | frame_color.paste(template, (x, y), template)
64 | frame_depth.paste(10, (x, y), template)
65 | frame_ir.paste(240, (x, y), template)
66 |
67 | groundtruth[o].append(Rectangle(x, y, template.size[0], template.size[1]))
68 |
69 | frame_color.save(path_color % (i + 1))
70 | frame_depth.save(path_depth % (i + 1))
71 | frame_ir.save(path_ir % (i + 1))
72 |
73 | if objects == 1:
74 | write_trajectory(os.path.join(base, "groundtruth.txt"), groundtruth[0])
75 | else:
76 | for i, g in groundtruth.items():
77 | write_trajectory(os.path.join(base, "groundtruth_%03d.txt" % i), g)
78 |
79 | metadata = {"name": "dummy", "fps" : 30, "format" : "dummy",
80 | "channel.default": "color"}
81 | write_properties(os.path.join(base, "sequence"), metadata)
82 |
83 | def generate_dummy(length=100, size=(640, 480), objects=1):
84 | """Create a new dummy sequence.
85 |
86 | Args:
87 | length (int, optional): The length of the sequence. Defaults to 100.
88 | size (tuple, optional): The size of the sequence. Defaults to (640, 480).
89 | objects (int, optional): The number of objects in the sequence. Defaults to 1.
90 | """
91 | from vot.dataset import load_sequence
92 |
93 | base = os.path.join(tempfile.gettempdir(), "vot_dummy_%d_%d_%d_%d" % (length, size[0], size[1], objects))
94 | if not os.path.isdir(base) or not os.path.isfile(os.path.join(base, "sequence")):
95 | _generate(base, length, size, objects)
96 |
97 | return load_sequence(base)
98 |
--------------------------------------------------------------------------------
/vot/dataset/got10k.py:
--------------------------------------------------------------------------------
1 | """ GOT-10k dataset adapter module. The format of GOT-10k dataset is very similar to a subset of VOT, so there
2 | is a lot of code duplication."""
3 |
4 | import os
5 | import glob
6 | import configparser
7 |
8 | import six
9 |
10 | from vot import get_logger
11 | from vot.dataset import DatasetException, BasedSequence, \
12 | PatternFileListChannel, SequenceData, Sequence
13 | from vot.region import Special
14 | from vot.region.io import read_trajectory
15 |
16 | logger = get_logger()
17 |
18 | def load_channel(source):
19 | """ Load channel from the given source.
20 |
21 | Args:
22 | source (str): Path to the source. If the source is a directory, it is
23 | assumed to be a pattern file list. If the source is a file, it is
24 | assumed to be a video file.
25 |
26 | Returns:
27 | Channel: Channel object.
28 | """
29 | extension = os.path.splitext(source)[1]
30 |
31 | if extension == '':
32 | source = os.path.join(source, '%08d.jpg')
33 | return PatternFileListChannel(source)
34 |
35 |
36 | def _read_data(metadata):
37 | """ Read data from the given metadata.
38 |
39 | Args:
40 | metadata (dict): Metadata dictionary.
41 | """
42 | channels = {}
43 | tags = {}
44 | values = {}
45 | groundtruth = []
46 |
47 | base = metadata["root"]
48 |
49 | channels["color"] = load_channel(os.path.join(base, "%08d.jpg"))
50 | metadata["channel.default"] = "color"
51 | metadata["width"], metadata["height"] = six.next(six.itervalues(channels)).size
52 |
53 | groundtruth_file = os.path.join(base, metadata.get("groundtruth", "groundtruth.txt"))
54 | groundtruth = read_trajectory(groundtruth_file)
55 |
56 | if len(groundtruth) == 1 and channels["color"].length > 1:
57 | # We are dealing with testing dataset, only first frame is available, so we pad the
58 | # groundtruth with unknowns. Only unsupervised experiment will work, but it is ok
59 | groundtruth.extend([Special(Sequence.UNKNOWN)] * (channels["color"].length - 1))
60 |
61 | metadata["length"] = len(groundtruth)
62 |
63 | tagfiles = glob.glob(os.path.join(base, '*.label'))
64 |
65 | for tagfile in tagfiles:
66 | with open(tagfile, 'r') as filehandle:
67 | tagname = os.path.splitext(os.path.basename(tagfile))[0]
68 | tag = [line.strip() == "1" for line in filehandle.readlines()]
69 | while not len(tag) >= len(groundtruth):
70 | tag.append(False)
71 | tags[tagname] = tag
72 |
73 | valuefiles = glob.glob(os.path.join(base, '*.value'))
74 |
75 | for valuefile in valuefiles:
76 | with open(valuefile, 'r') as filehandle:
77 | valuename = os.path.splitext(os.path.basename(valuefile))[0]
78 | value = [float(line.strip()) for line in filehandle.readlines()]
79 | while not len(value) >= len(groundtruth):
80 | value.append(0.0)
81 | values[valuename] = value
82 |
83 | for name, channel in channels.items():
84 | if not channel.length == len(groundtruth):
85 | raise DatasetException("Length mismatch for channel %s" % name)
86 |
87 | for name, tag in tags.items():
88 | if not len(tag) == len(groundtruth):
89 | tag_tmp = len(groundtruth) * [False]
90 | tag_tmp[:len(tag)] = tag
91 | tag = tag_tmp
92 |
93 | for name, value in values.items():
94 | if not len(value) == len(groundtruth):
95 | raise DatasetException("Length mismatch for value %s" % name)
96 |
97 | objects = {"object" : groundtruth}
98 |
99 | return SequenceData(channels, objects, tags, values, len(groundtruth))
100 |
101 | from vot.dataset import sequence_reader
102 |
103 | @sequence_reader.register("GOT-10k")
104 | def read_sequence(path):
105 | """ Read GOT-10k sequence from the given path.
106 |
107 | Args:
108 | path (str): Path to the sequence.
109 | """
110 |
111 | if not (os.path.isfile(os.path.join(path, 'groundtruth.txt')) and os.path.isfile(os.path.join(path, 'meta_info.ini'))):
112 | return None
113 |
114 | metadata = dict(fps=30, format="default")
115 |
116 | if os.path.isfile(os.path.join(path, 'meta_info.ini')):
117 | config = configparser.ConfigParser()
118 | config.read(os.path.join(path, 'meta_info.ini'))
119 | metadata.update(config["METAINFO"])
120 | metadata["fps"] = int(metadata["anno_fps"][:-3])
121 |
122 | metadata["root"] = path
123 | metadata["name"] = os.path.basename(path)
124 | metadata["channel.default"] = "color"
125 |
126 | return BasedSequence(metadata["name"], _read_data, metadata)
127 |
128 |
129 |
--------------------------------------------------------------------------------
/vot/dataset/trackingnet.py:
--------------------------------------------------------------------------------
1 | """ Dataset adapter for the TrackingNet dataset. Note that the dataset is organized a different way than the VOT datasets,
2 | annotated frames are stored in a separate directory. The dataset also contains train and test splits. The loader
3 | assumes that only one of the splits is used at a time and that the path is given to this part of the dataset. """
4 |
5 | import os
6 | import glob
7 |
8 | import six
9 |
10 | from vot import get_logger
11 | from vot.region import Special
12 | from vot.region.io import read_trajectory
13 |
14 | logger = get_logger()
15 |
16 | def load_channel(source):
17 | """ Load channel from the given source.
18 |
19 | Args:
20 | source (str): Path to the source. If the source is a directory, it is
21 | assumed to be a pattern file list. If the source is a file, it is
22 | assumed to be a video file.
23 |
24 | Returns:
25 | Channel: Channel object.
26 | """
27 | from vot.dataset import PatternFileListChannel
28 |
29 | extension = os.path.splitext(source)[1]
30 |
31 | if extension == '':
32 | source = os.path.join(source, '%d.jpg')
33 | return PatternFileListChannel(source)
34 |
35 |
36 | def _read_data(metadata):
37 | """Internal function for reading data from the given metadata for a TrackingNet sequence.
38 |
39 | Args:
40 | metadata (dict): Metadata dictionary.
41 |
42 | Returns:
43 | SequenceData: Sequence data object.
44 | """
45 | from vot.dataset import BasedSequence, SequenceData, Sequence
46 |
47 | channels = {}
48 | tags = {}
49 | values = {}
50 | groundtruth = []
51 |
52 | name = metadata["name"]
53 | root = metadata["root"]
54 |
55 | channels["color"] = load_channel(os.path.join(root, 'frames', name))
56 | metadata["channel.default"] = "color"
57 | metadata["width"], metadata["height"] = six.next(six.itervalues(channels)).size
58 |
59 | groundtruth = read_trajectory(root)
60 |
61 | if len(groundtruth) == 1 and channels["color"].length > 1:
62 | # We are dealing with testing dataset, only first frame is available, so we pad the
63 | # groundtruth with unknowns. Only unsupervised experiment will work, but it is ok
64 | groundtruth.extend([Special(Sequence.UNKNOWN)] * (channels["color"].length - 1))
65 |
66 | metadata["length"] = len(groundtruth)
67 |
68 | objects = {"object" : groundtruth}
69 |
70 | return SequenceData(channels, objects, tags, values, len(groundtruth))
71 |
72 | def read_sequence(path):
73 | """ Read sequence from the given path. Different to VOT datasets, the sequence is not
74 | a directory, but a file. From the file name the sequence name is extracted and the
75 | path to image frames is inferred based on standard TrackingNet directory structure.
76 |
77 | Args:
78 | path (str): Path to the sequence groundtruth.
79 |
80 | Returns:
81 | Sequence: Sequence object.
82 | """
83 | from vot.dataset import BasedSequence
84 |
85 | if not os.path.isfile(path):
86 | return None
87 |
88 | name, ext = os.path.splitext(os.path.basename(path))
89 |
90 | if ext != '.txt':
91 | return None
92 |
93 | root = os.path.dirname(os.path.dirname(os.path.dirname(path)))
94 |
95 | if not os.path.isfile(path) and os.path.isdir(os.path.join(root, 'frames', name)):
96 | return None
97 |
98 | metadata = dict(fps=30)
99 | metadata["channel.default"] = "color"
100 | metadata["name"] = name
101 | metadata["root"] = root
102 |
103 | return BasedSequence(name, _read_data, metadata)
104 |
105 | def list_sequences(path):
106 | """ List sequences in the given path. The path is expected to be the root of the TrackingNet dataset split.
107 |
108 | Args:
109 | path (str): Path to the dataset root.
110 |
111 | Returns:
112 | list: List of sequences.
113 | """
114 | for dirname in ["anno", "frames"]:
115 | if not os.path.isdir(os.path.join(path, dirname)):
116 | return None
117 |
118 | sequences = list(glob.glob(os.path.join(path, "anno", "*.txt")))
119 |
120 | return sequences
121 |
122 |
123 |
--------------------------------------------------------------------------------
/vot/experiment/helpers.py:
--------------------------------------------------------------------------------
1 | """ Helper classes for experiments."""
2 |
3 | from vot.dataset import Sequence
4 | from vot.region import RegionType
5 |
6 | def _objectstart(sequence: Sequence, id: str):
7 | """Returns the first frame where the object appears in the sequence."""
8 | trajectory = sequence.object(id)
9 | return [x is None or x.type == RegionType.SPECIAL for x in trajectory].index(False)
10 |
11 | class MultiObjectHelper(object):
12 | """Helper class for multi-object sequences. It provides methods for querying active objects at a given frame."""
13 |
14 | def __init__(self, sequence: Sequence):
15 | """Initialize the helper class.
16 |
17 | Args:
18 | sequence (Sequence): The sequence to be used.
19 | """
20 | self._sequence = sequence
21 | self._ids = list(sequence.objects())
22 | start = [_objectstart(sequence, id) for id in self._ids]
23 | self._ids = sorted(zip(start, self._ids), key=lambda x: x[0])
24 |
25 | def new(self, position: int):
26 | """Returns a list of objects that appear at the given frame.
27 |
28 | Args:
29 | position (int): The frame number.
30 |
31 | Returns:
32 | [list]: A list of object ids.
33 | """
34 | return [x[1] for x in self._ids if x[0] == position]
35 |
36 | def objects(self, position: int):
37 | """Returns a list of objects that are active at the given frame.
38 |
39 | Args:
40 | position (int): The frame number.
41 |
42 | Returns:
43 | [list]: A list of object ids.
44 | """
45 | return [x[1] for x in self._ids if x[0] <= position]
46 |
47 | def all(self):
48 | """Returns a list of all objects in the sequence.
49 |
50 | Returns:
51 | [list]: A list of object ids.
52 | """
53 | return [x[1] for x in self._ids]
--------------------------------------------------------------------------------
/vot/experiment/multistart.py:
--------------------------------------------------------------------------------
1 |
2 | """ This module implements the multistart experiment. """
3 |
4 | from typing import Callable
5 |
6 | from vot.dataset import Sequence
7 | from vot.dataset.proxy import FrameMapSequence
8 | from vot.region import Special
9 |
10 | from attributee import String
11 |
12 | from vot.experiment import Experiment
13 | from vot.tracker import Tracker, Trajectory
14 |
15 | def find_anchors(sequence: Sequence, anchor="anchor"):
16 | """Find anchor frames in the sequence. Anchor frames are frames where the given object is visible and can be used for initialization.
17 |
18 | Args:
19 | sequence (Sequence): The sequence to be scanned.
20 | anchor (str, optional): The name of the object to be used as an anchor. Defaults to "anchor".
21 |
22 | Returns:
23 | [tuple]: A tuple containing two lists of frames. The first list contains forward anchors, the second list contains backward anchors.
24 | """
25 | forward = []
26 | backward = []
27 | for frame in range(len(sequence)):
28 | values = sequence.values(frame)
29 | if anchor in values:
30 | if values[anchor] > 0:
31 | forward.append(frame)
32 | elif values[anchor] < 0:
33 | backward.append(frame)
34 | return forward, backward
35 |
36 | class MultiStartExperiment(Experiment):
37 | """The multistart experiment. The experiment works by utilizing anchor frames in the sequence.
38 | Anchor frames are frames where the given object is visible and can be used for initialization.
39 | The tracker is then initialized in each anchor frame and run until the end of the sequence either forward or backward.
40 | """
41 |
42 | anchor = String(default="anchor")
43 |
44 | def scan(self, tracker: Tracker, sequence: Sequence) -> tuple:
45 | """Scan the results of the experiment for the given tracker and sequence.
46 |
47 | Args:
48 | tracker (Tracker): The tracker to be scanned.
49 | sequence (Sequence): The sequence to be scanned.
50 |
51 | Returns:
52 | [tuple]: A tuple containing three elements. The first element is a boolean indicating whether the experiment is complete. The second element is a list of files that are present. The third element is the results object."""
53 |
54 | files = []
55 | complete = True
56 |
57 | results = self.results(tracker, sequence)
58 |
59 | forward, backward = find_anchors(sequence, self.anchor)
60 |
61 | if len(forward) == 0 and len(backward) == 0:
62 | raise RuntimeError("Sequence does not contain any anchors")
63 |
64 | for i in forward + backward:
65 | name = "%s_%08d" % (sequence.name, i)
66 | if Trajectory.exists(results, name):
67 | files.extend(Trajectory.gather(results, name))
68 | else:
69 | complete = False
70 |
71 | return complete, files, results
72 |
73 | def execute(self, tracker: Tracker, sequence: Sequence, force: bool = False, callback: Callable = None) -> None:
74 | """Execute the experiment for the given tracker and sequence.
75 |
76 | Args:
77 | tracker (Tracker): The tracker to be executed.
78 | sequence (Sequence): The sequence to be executed.
79 | force (bool, optional): Force re-execution of the experiment. Defaults to False.
80 | callback (Callable, optional): A callback function that is called after each frame. Defaults to None.
81 |
82 | Raises:
83 | RuntimeError: If the sequence does not contain any anchors.
84 | """
85 |
86 | results = self.results(tracker, sequence)
87 |
88 | forward, backward = find_anchors(sequence, self.anchor)
89 |
90 | if len(forward) == 0 and len(backward) == 0:
91 | raise RuntimeError("Sequence does not contain any anchors")
92 |
93 | total = len(forward) + len(backward)
94 | current = 0
95 |
96 | with self._get_runtime(tracker, sequence) as runtime:
97 |
98 | for i, reverse in [(f, False) for f in forward] + [(f, True) for f in backward]:
99 | name = "%s_%08d" % (sequence.name, i)
100 |
101 | if Trajectory.exists(results, name) and not force:
102 | continue
103 |
104 | if reverse:
105 | proxy = FrameMapSequence(sequence, list(reversed(range(0, i + 1))))
106 | else:
107 | proxy = FrameMapSequence(sequence, list(range(i, len(sequence))))
108 |
109 | trajectory = Trajectory(len(proxy))
110 |
111 | _, elapsed = runtime.initialize(proxy.frame(0), self._get_initialization(proxy, 0))
112 |
113 | trajectory.set(0, Special(Trajectory.INITIALIZATION), {"time": elapsed})
114 |
115 | for frame in range(1, len(proxy)):
116 | object, elapsed = runtime.update(proxy.frame(frame))
117 |
118 | object.properties["time"] = elapsed
119 |
120 | trajectory.set(frame, object.region, object.properties)
121 |
122 | trajectory.write(results, name)
123 |
124 | current = current + 1
125 | if callback:
126 | callback(current / total)
127 |
--------------------------------------------------------------------------------
/vot/experiment/transformer.py:
--------------------------------------------------------------------------------
1 | """ Transformer module for experiments."""
2 |
3 | import os
4 | from abc import abstractmethod
5 | import typing
6 |
7 | from PIL import Image
8 |
9 | from attributee import Attributee, Integer, Float, Boolean, String, List
10 |
11 | from vot.dataset import Sequence, InMemorySequence
12 | from vot.dataset.proxy import FrameMapSequence
13 | from vot.dataset.common import write_sequence, read_sequence
14 | from vot.region import RegionType
15 | from vot.utilities import arg_hash
16 |
17 | class Transformer(Attributee):
18 | """Base class for transformers. Transformers are used to generate new modified sequences from existing ones."""
19 |
20 | def __init__(self, cache: "LocalStorage", **kwargs):
21 | """Initialize the transformer.
22 |
23 | Args:
24 | cache (LocalStorage): The cache to be used for storing generated sequences.
25 | """
26 | super().__init__(**kwargs)
27 | self._cache = cache
28 |
29 | @abstractmethod
30 | def __call__(self, sequence: Sequence) -> typing.List[Sequence]:
31 | """Generate a list of sequences from the given sequence. The generated sequences are stored in the cache if needed.
32 |
33 | Args:
34 | sequence (Sequence): The sequence to be transformed.
35 |
36 | Returns:
37 | [list]: A list of generated sequences.
38 | """
39 | raise NotImplementedError
40 |
41 | class SingleObject(Transformer):
42 | """Transformer that generates a sequence for each object in the given sequence."""
43 |
44 | trim = Boolean(default=False, description="Trim each generated sequence to a visible subsection for the selected object")
45 |
46 | def __call__(self, sequence: Sequence) -> typing.List[Sequence]:
47 | """Generate a list of sequences from the given sequence.
48 |
49 | Args:
50 | sequence (Sequence): The sequence to be transformed.
51 | """
52 | from vot.dataset.proxy import ObjectFilterSequence
53 |
54 | if len(sequence.objects()) == 1:
55 | return [sequence]
56 |
57 | return [ObjectFilterSequence(sequence, id, self.trim) for id in sequence.objects()]
58 |
59 | class Redetection(Transformer):
60 | """Transformer that test redetection of the object in the sequence. The object is shown in several frames and then moved to a different location.
61 |
62 | This tranformer can only be used with single-object sequences."""
63 |
64 | length = Integer(default=100, val_min=1)
65 | initialization = Integer(default=5, val_min=1)
66 | padding = Float(default=2, val_min=0)
67 | scaling = Float(default=1, val_min=0.1, val_max=10)
68 |
69 | def __call__(self, sequence: Sequence) -> typing.List[Sequence]:
70 | """Generate a list of sequences from the given sequence.
71 |
72 | Args:
73 | sequence (Sequence): The sequence to be transformed.
74 | """
75 |
76 | assert self._cache is not None, "Local cache is required for redetection transformer."
77 |
78 | assert len(sequence.objects()) == 1, "Redetection transformer can only be used with single-object sequences."
79 |
80 | chache_dir = self._cache.directory(self, arg_hash(sequence.name, **self.dump()))
81 |
82 | if not os.path.isfile(os.path.join(chache_dir, "sequence")):
83 | generated = InMemorySequence(sequence.name, sequence.channels())
84 | size = (int(sequence.size[0] * self.scaling), int(sequence.size[1] * self.scaling))
85 |
86 | initial_images = dict()
87 | redetect_images = dict()
88 | for channel in sequence.channels():
89 | rect = sequence.frame(0).groundtruth().convert(RegionType.RECTANGLE)
90 |
91 | halfsize = int(max(rect.width, rect.height) * self.scaling / 2)
92 | x, y = rect.center()
93 |
94 | image = Image.fromarray(sequence.frame(0).image())
95 | box = (x - halfsize, y - halfsize, x + halfsize, y + halfsize)
96 | template = image.crop(box)
97 | initial = Image.new(image.mode, size)
98 | initial.paste(image, (0, 0))
99 | redetect = Image.new(image.mode, size)
100 | redetect.paste(template, (size[0] - template.width, size[1] - template.height))
101 | initial_images[channel] = initial
102 | redetect_images[channel] = redetect
103 |
104 | generated.append(initial_images, sequence.frame(0).groundtruth())
105 | generated.append(redetect_images, sequence.frame(0).groundtruth().move(size[0] - template.width, size[1] - template.height))
106 |
107 | write_sequence(chache_dir, generated)
108 |
109 | source = read_sequence(chache_dir)
110 | mapping = [0] * self.initialization + [1] * (len(source) - self.initialization)
111 | return [FrameMapSequence(source, mapping)]
112 |
113 | class IgnoreObjects(Transformer):
114 | """Transformer that hides objects with certain ids from the sequence."""
115 |
116 | ids = List(String(), default=[], description="List of ids to be ignored")
117 |
118 | def __call__(self, sequence: Sequence) -> typing.List[Sequence]:
119 | """Generate a list of sequences from the given sequence.
120 |
121 | Args:
122 | sequence (Sequence): The sequence to be transformed.
123 | """
124 | from vot.dataset.proxy import ObjectsHideFilterSequence
125 |
126 | return [ObjectsHideFilterSequence(sequence, self.ids)]
127 |
128 | class Downsample(Transformer):
129 | """Transformer that downsamples the sequence by a given factor."""
130 |
131 | factor = Integer(default=2, val_min=1, description="Downsampling factor")
132 | offset = Integer(default=0, val_min=0, description="Offset for the downsampling")
133 |
134 | def __call__(self, sequence: Sequence) -> typing.List[Sequence]:
135 | """Generate a list of sequences from the given sequence.
136 |
137 | Args:
138 | sequence (Sequence): The sequence to be transformed.
139 | """
140 | from vot.dataset.proxy import FrameMapSequence
141 |
142 | map = [i for i in range(self.offset, len(sequence), self.factor)]
143 |
144 | return [FrameMapSequence(sequence, map)]
--------------------------------------------------------------------------------
/vot/region/__init__.py:
--------------------------------------------------------------------------------
1 | """ This module contains classes for region representation and manipulation. Regions are also used to represent results
2 | of trackers as well as groundtruth trajectories. The module also contains functions for calculating overlaps between
3 | regions and for converting between different region types."""
4 |
5 | from abc import abstractmethod, ABC
6 | from enum import Enum
7 |
8 | from vot import ToolkitException
9 | from vot.utilities.draw import DrawHandle
10 |
11 | class RegionException(ToolkitException):
12 | """General region exception"""
13 |
14 | class ConversionException(RegionException):
15 | """Region conversion exception, the conversion cannot be performed
16 | """
17 | def __init__(self, *args, source=None):
18 | """Constructor
19 |
20 | Args:
21 | *args: Arguments for the base exception
22 |
23 | Keyword Arguments:
24 | source (Region): Source region (default: {None})
25 |
26 | """
27 | super().__init__(*args)
28 | self._source = source
29 |
30 | class RegionType(Enum):
31 | """Enumeration of region types
32 | """
33 | SPECIAL = 0
34 | RECTANGLE = 1
35 | POLYGON = 2
36 | MASK = 3
37 |
38 | class Region(ABC):
39 | """
40 | Base class for all region containers.
41 | """
42 | def __init__(self):
43 | """Base constructor"""
44 | pass
45 |
46 | @property
47 | @abstractmethod
48 | def type(self):
49 | """Return type of the region
50 |
51 | Returns:
52 | RegionType -- Type of the region
53 | """
54 | pass
55 |
56 | @abstractmethod
57 | def copy(self):
58 | """Copy region to another object
59 |
60 | Returns:
61 | Region -- Copy of the region
62 | """
63 |
64 | @abstractmethod
65 | def convert(self, rtype: RegionType):
66 | """Convert region to another type. Note that some conversions
67 | degrade information.
68 |
69 | Args:
70 | rtype (RegionType): Target region type to convert to.
71 | """
72 |
73 | @abstractmethod
74 | def is_empty(self):
75 | """Check if region is empty (not annotated or not reported)
76 | """
77 |
78 | class Special(Region):
79 | """
80 | Special region, meaning of the code can change depending on the context
81 |
82 | :var code: Code value
83 | """
84 |
85 | def __init__(self, code):
86 | """ Constructor
87 |
88 | Args:
89 | code (int): Code value
90 | """
91 | super().__init__()
92 | self._code = int(code)
93 |
94 | def __str__(self):
95 | """ Create string from class """
96 | return '{}'.format(self._code)
97 |
98 | @property
99 | def type(self):
100 | """Return type of the region"""
101 | return RegionType.SPECIAL
102 |
103 | def copy(self):
104 | """Copy region to another object"""
105 | return Special(self._code)
106 |
107 | def convert(self, rtype: RegionType):
108 | """Convert region to another type. Note that some conversions degrade information.
109 |
110 | Args:
111 | rtype (RegionType): Target region type to convert to.
112 |
113 | Raises:
114 | ConversionException: Unable to convert special region to another type
115 |
116 | Returns:
117 | Region -- Converted region
118 | """
119 |
120 | if rtype == RegionType.SPECIAL:
121 | return self.copy()
122 | else:
123 | raise ConversionException("Unable to convert special region to {}".format(rtype))
124 |
125 | @property
126 | def code(self):
127 | """Retiurns special code for this region.
128 | Returns:
129 | int -- Type code
130 | """
131 | return self._code
132 |
133 | def draw(self, handle: DrawHandle):
134 | """Draw region to the image using the provided handle.
135 |
136 | Args:
137 | handle (DrawHandle): Draw handle
138 | """
139 | pass
140 |
141 | def is_empty(self):
142 | """ Check if region is empty. Special regions are always empty by definition."""
143 | return True
144 |
145 | from .raster import calculate_overlap, calculate_overlaps
146 | from .shapes import Rectangle, Polygon, Mask
--------------------------------------------------------------------------------
/vot/region/tests.py:
--------------------------------------------------------------------------------
1 |
2 | """Tests for the region module. """
3 |
4 | import unittest
5 |
6 | import numpy as np
7 |
8 | from vot.region.raster import rasterize_polygon, rasterize_rectangle, copy_mask, calculate_overlap
9 |
10 | class TestRasterMethods(unittest.TestCase):
11 | """Tests for the raster module."""
12 |
13 | def test_rasterize_polygon(self):
14 | """Tests if the polygon rasterization works correctly. """
15 | points = np.array([[0, 0], [0, 100], [100, 100], [100, 0]], dtype=np.float32)
16 | np.testing.assert_array_equal(rasterize_polygon(points, (0, 0, 99, 99)), np.ones((100, 100), dtype=np.uint8))
17 |
18 | def test_rasterize_rectangle(self):
19 | """Tests if the rectangle rasterization works correctly."""
20 | np.testing.assert_array_equal(rasterize_rectangle(np.array([[0], [0], [100], [100]], dtype=np.float32), (0, 0, 99, 99)), np.ones((100, 100), dtype=np.uint8))
21 |
22 | def test_copy_mask(self):
23 | """Tests if the mask copy works correctly."""
24 | mask = np.ones((100, 100), dtype=np.uint8)
25 | np.testing.assert_array_equal(copy_mask(mask, (0, 0), (0, 0, 99, 99)), np.ones((100, 100), dtype=np.uint8))
26 |
27 | def test_calculate_overlap(self):
28 | """Tests if the overlap calculation works correctly."""
29 | from vot.region import Rectangle
30 |
31 | r1 = Rectangle(0, 0, 100, 100)
32 | self.assertEqual(calculate_overlap(r1, r1), 1)
33 |
34 | r1 = Rectangle(0, 0, 0, 0)
35 | self.assertEqual(calculate_overlap(r1, r1), 1)
36 |
37 | def test_ignore_mask(self):
38 | """Tests if the mask ignore works correctly."""
39 | from vot.region import Mask
40 |
41 | r1 = Mask(np.ones((100, 100), dtype=np.uint8))
42 | r2 = Mask(np.ones((100, 100), dtype=np.uint8))
43 | ignore = Mask(np.zeros((100, 100), dtype=np.uint8))
44 | self.assertEqual(calculate_overlap(r1, r2, ignore=ignore), 1)
45 |
46 | ignore = Mask(np.ones((100, 100), dtype=np.uint8))
47 | self.assertEqual(calculate_overlap(r1, r2, ignore=ignore), 0)
48 |
49 | def test_empty_mask(self):
50 | """Tests if the empty mask is correctly detected."""
51 | from vot.region import Mask
52 |
53 | mask = Mask(np.zeros((100, 100), dtype=np.uint8))
54 | self.assertTrue(mask.is_empty())
55 |
56 | mask = Mask(np.ones((100, 100), dtype=np.uint8))
57 | self.assertFalse(mask.is_empty())
58 |
59 | def test_binary_format(self):
60 | """ Tests if the binary format of a region matched the plain-text one."""
61 | import io
62 |
63 | from vot.region import Rectangle, Polygon, Mask
64 | from vot.region.io import read_trajectory, write_trajectory
65 | from vot.region.raster import calculate_overlaps
66 |
67 | trajectory = [
68 | Rectangle(0, 0, 100, 100),
69 | Rectangle(0, 10, 100, 100),
70 | Rectangle(0, 0, 200, 100),
71 | Polygon([[0, 0], [0, 100], [100, 100], [100, 0]]),
72 | Mask(np.ones((100, 100), dtype=np.uint8)),
73 | Mask(np.zeros((100, 100), dtype=np.uint8)),
74 | ]
75 |
76 | binf = io.BytesIO()
77 | txtf = io.StringIO()
78 |
79 | write_trajectory(binf, trajectory)
80 | write_trajectory(txtf, trajectory)
81 |
82 | binf.seek(0)
83 | txtf.seek(0)
84 |
85 | bint = read_trajectory(binf)
86 | txtt = read_trajectory(txtf)
87 |
88 | o1 = calculate_overlaps(bint, txtt, None)
89 | o2 = calculate_overlaps(bint, trajectory, None)
90 |
91 | self.assertTrue(np.all(np.array(o1) == 1))
92 | self.assertTrue(np.all(np.array(o2) == 1))
93 |
94 | def test_rle(self):
95 | """ Test if RLE encoding works for limited stride representation."""
96 | from vot.region.io import rle_to_mask, mask_to_rle
97 | rle = [0, 2, 122103, 9, 260, 19, 256, 21, 256, 22, 254, 24, 252, 26, 251, 27, 250, 28, 249, 28, 250, 28, 249, 28, 249, 29, 249, 30, 247, 33, 245, 33, 244, 34, 244, 37, 241, 39, 239, 41, 237, 41, 236, 43, 235, 45, 234, 47, 233, 47, 231, 48, 230, 48, 230, 11, 7, 29, 231, 9, 9, 29, 230, 8, 11, 28, 230, 7, 12, 28, 230, 7, 13, 27, 231, 5, 14, 27, 233, 2, 16, 26, 253, 23, 255, 22, 256, 20, 258, 19, 259, 17, 3]
98 | rle = np.array(rle)
99 | m1 = rle_to_mask(np.array(rle, dtype=np.int32), 277, 478)
100 |
101 | r2 = mask_to_rle(m1, maxstride=255)
102 | m2 = rle_to_mask(np.array(r2, dtype=np.int32), 277, 478)
103 |
104 | np.testing.assert_array_equal(m1, m2)
--------------------------------------------------------------------------------
/vot/report/commands.tex:
--------------------------------------------------------------------------------
1 | \newcommand{\DefineTracker}[3]{%
2 | \expandafter\newcommand\csname trk-\detokenize{#1}-\detokenize{#2}\endcsname{#3}%
3 | }%
4 | \newcommand{\Tracker}[2]{\csname trk-\detokenize{#1}-\detokenize{#2}\endcsname}%
5 | \makeatletter%
6 | \newcommand{\replunderscores}[1]{\expandafter\@repl@underscores#1_\relax}
7 | \def\@repl@underscores#1_#2\relax{%
8 | \ifx \relax #2\relax
9 | #1%
10 | \else
11 | #1%
12 | \textunderscore
13 | \@repl@underscores#2\relax
14 | \fi
15 | }
16 | \makeatother%
--------------------------------------------------------------------------------
/vot/report/common.py:
--------------------------------------------------------------------------------
1 | """Common functions for document generation."""
2 | import os
3 | import math
4 | from typing import List
5 |
6 | from attributee import String
7 |
8 | from vot.tracker import Tracker
9 | from vot.report import ScatterPlot, LinePlot, Table, SeparableReport, Report
10 | from vot.analysis import Measure, Point, Plot, Curve, Sorting, Axes
11 |
12 | def read_resource(name):
13 | """Reads a resource file from the package directory. The file is read as a string."""
14 | path = os.path.join(os.path.dirname(__file__), name)
15 | with open(path, "r") as filehandle:
16 | return filehandle.read()
17 |
18 | def per_tracker(a):
19 | """Returns true if the analysis is per-tracker."""
20 | return a.axes == Axes.TRACKERS
21 |
22 | def extract_measures_table(trackers: List[Tracker], results) -> Table:
23 | """Extracts a table of measures from the results. The table is a list of lists, where each list is a column.
24 | The first column is the tracker name, the second column is the measure name, and the rest of the columns are the values for each tracker.
25 |
26 | Args:
27 | trackers (list): List of trackers.
28 | results (dict): Dictionary of results. It is a dictionary of dictionaries, where the first key is the experiment, and the second key is the analysis. The value is a list of results for each tracker.
29 | """
30 | table_header = [[], [], []]
31 | table_data = dict()
32 | column_order = []
33 |
34 | def safe(value, default):
35 | return value if not value is None else default
36 |
37 | for experiment, eresults in results.items():
38 | for analysis, aresults in eresults.items():
39 | descriptions = analysis.describe()
40 |
41 | # Ignore all non per-tracker results
42 | if not per_tracker(analysis):
43 | continue
44 |
45 | for i, description in enumerate(descriptions):
46 | if description is None:
47 | continue
48 | if isinstance(description, Measure):
49 | table_header[0].append(experiment)
50 | table_header[1].append(analysis)
51 | table_header[2].append(description)
52 | column_order.append(description.direction)
53 |
54 | if aresults is None:
55 | continue
56 |
57 | for tracker, values in zip(trackers, aresults):
58 | if not tracker in table_data:
59 | table_data[tracker] = list()
60 |
61 | for i, description in enumerate(descriptions):
62 | if description is None:
63 | continue
64 | if isinstance(description, Measure):
65 | table_data[tracker].append(values[i] if not values is None else None)
66 |
67 | table_order = []
68 |
69 | for i, order in enumerate(column_order):
70 | values = [(v[i], k) for k, v in table_data.items()]
71 | if order == Sorting.ASCENDING:
72 | values = sorted(values, key=lambda x: safe(x[0], -math.inf), reverse=False)
73 | elif order == Sorting.DESCENDING:
74 | values = sorted(values, key=lambda x: safe(x[0], math.inf), reverse=True)
75 | else:
76 | table_order.append(None)
77 | continue
78 |
79 | order = dict()
80 | j = 0
81 | value = None
82 |
83 | # Take into account that some values are the same
84 | for k, v in enumerate(values):
85 | j = j if value == v[0] else k + 1
86 | value = v[0]
87 | order[v[1]] = j
88 | table_order.append(order)
89 |
90 | return Table(table_header, table_data, table_order)
91 |
92 | def extract_plots(trackers: List[Tracker], results, order=None):
93 | """Extracts a list of plots from the results. The list is a list of tuples, where each tuple is a pair of strings and a plot.
94 |
95 | Args:
96 | trackers (list): List of trackers.
97 | results (dict): Dictionary of results. It is a dictionary of dictionaries, where the first key is the experiment, and the second key is the analysis. The value is a list of results for each tracker.
98 |
99 | Returns:
100 | list: List of plots.
101 | """
102 | plots = dict()
103 | j = 0
104 |
105 | for experiment, eresults in results.items():
106 | experiment_plots = list()
107 | for analysis, aresults in eresults.items():
108 | descriptions = analysis.describe()
109 |
110 | # Ignore all non per-tracker results
111 | if not per_tracker(analysis):
112 | continue
113 |
114 | for i, description in enumerate(descriptions):
115 | if description is None:
116 | continue
117 |
118 | plot_identifier = "%s_%s_%d" % (experiment.identifier, analysis.name, j)
119 | j += 1
120 |
121 | if isinstance(description, Point) and description.dimensions == 2:
122 | xlim = (description.minimal(0), description.maximal(0))
123 | ylim = (description.minimal(1), description.maximal(1))
124 | xlabel = description.label(0)
125 | ylabel = description.label(1)
126 | plot = ScatterPlot(plot_identifier, xlabel, ylabel, xlim, ylim, description.trait)
127 | elif isinstance(description, Plot):
128 | ylim = (description.minimal, description.maximal)
129 | plot = LinePlot(plot_identifier, description.wrt, description.name, None, ylim, description.trait)
130 | elif isinstance(description, Curve) and description.dimensions == 2:
131 | xlim = (description.minimal(0), description.maximal(0))
132 | ylim = (description.minimal(1), description.maximal(1))
133 | xlabel = description.label(0)
134 | ylabel = description.label(1)
135 | plot = LinePlot(plot_identifier, xlabel, ylabel, xlim, ylim, description.trait)
136 | else:
137 | continue
138 |
139 | for t in order if order is not None else range(len(trackers)):
140 | tracker = trackers[t]
141 | values = aresults[t, 0]
142 | data = values[i] if not values is None else None
143 | plot(tracker, data)
144 |
145 | experiment_plots.append((analysis.title + " - " + description.name, plot))
146 |
147 | plots[experiment] = experiment_plots
148 |
149 | return plots
150 |
151 | def format_value(data):
152 | """Formats a value for display. If the value is a string, it is returned as is. If the value is an integer, it is returned as a string.
153 | If the value is a float, it is returned as a string with 3 decimal places. Otherwise, the value is converted to a string.
154 |
155 | Args:
156 | data: Value to format.
157 |
158 | Returns:
159 | str: Formatted value.
160 |
161 | """
162 | if data is None:
163 | return "N/A"
164 | if isinstance(data, str):
165 | return data
166 | if isinstance(data, int):
167 | return "%d" % data
168 | if isinstance(data, float):
169 | return "%.3f" % data
170 | return str(data)
171 |
172 | def merge_repeats(objects):
173 | """Merges repeated objects in a list into a list of tuples (object, count)."""
174 |
175 | if not objects:
176 | return []
177 |
178 | repeats = []
179 | previous = objects[0]
180 | count = 1
181 |
182 | for o in objects[1:]:
183 | if o == previous:
184 | count = count + 1
185 | else:
186 | repeats.append((previous, count))
187 | previous = o
188 | count = 1
189 |
190 | repeats.append((previous, count))
191 |
192 | return repeats
193 |
194 | class StackAnalysesPlots(SeparableReport):
195 | """ A document that produces plots for all analyses configures in stack experiments. """
196 |
197 | async def perexperiment(self, experiment, trackers, sequences):
198 |
199 | from vot.report.common import extract_plots
200 |
201 | analyses = [analysis for analysis in experiment.analyses if analysis.compatible(experiment)]
202 |
203 | results = {a: r for a, r in zip(analyses, await self.process(analyses, experiment, trackers, sequences))}
204 |
205 | # Plot in reverse order, with best trackers on top
206 | z_order = list(reversed(range(len(trackers))))
207 |
208 | return [p for _, p in extract_plots(trackers, {experiment: results}, z_order)[experiment]]
209 |
210 | def compatible(self, experiment):
211 | return True
212 |
213 | class StackAnalysesTable(Report):
214 | """ A document that produces plots for all analyses configures in stack experiments. """
215 |
216 | async def generate(self, experiments, trackers, sequences):
217 |
218 | from vot.report.common import extract_measures_table
219 |
220 | results = dict()
221 |
222 | for experiment in experiments:
223 | analyses = [analysis for analysis in experiment.analyses if analysis.compatible(experiment)]
224 | results[experiment] = {a: r for a, r in zip(analyses, await self.process(analyses, experiment, trackers, sequences))}
225 |
226 | table = extract_measures_table(trackers, results)
227 |
228 | return {"Overview": [table]}
229 |
230 | class SequenceOverlapPlots(SeparableReport):
231 | """ A document that produces plots for all analyses configures in stack experiments. """
232 |
233 | ignore_masks = String(default="_ignore", description="Object ID used to get ignore masks.")
234 |
235 | async def perexperiment(self, experiment, trackers, sequences):
236 |
237 | from vot.analysis.accuracy import Overlaps
238 | from vot.report import LinePlot
239 |
240 | results = next(await self.process(Overlaps(ignore_masks=self.ignore_masks), experiment, trackers, sequences))
241 |
242 | plots = []
243 |
244 | for s, sequence in enumerate(sequences):
245 | plot = LinePlot("overlap_%s_%s" % (experiment.identifier, sequence.name), "Frame", "Overlap", (0, len(sequence)), (0, 1), None)
246 |
247 | for t, tracker in enumerate(trackers):
248 | measurements = results[t, s][0]
249 | for m in measurements:
250 | data = [(i, v) for i, v in zip(m[2], m[1])]
251 | plot(tracker, data)
252 |
253 | plots.append(plot)
254 |
255 | return plots
256 |
257 | def compatible(self, experiment):
258 | from vot.experiment.multirun import MultiRunExperiment
259 | return isinstance(experiment, MultiRunExperiment)
--------------------------------------------------------------------------------
/vot/report/html.py:
--------------------------------------------------------------------------------
1 | """HTML report generation. This module is used to generate HTML reports from the results of the experiments."""
2 | import os
3 | import io
4 | import datetime
5 | from typing import List
6 |
7 | import dominate
8 | from dominate.tags import h1, h2, table, thead, tbody, tr, th, td, div, p, li, ol, ul, span, style, link, script, video, a
9 | from dominate.util import raw, text
10 |
11 | from vot import toolkit_version, check_debug, get_logger
12 | from vot.tracker import Tracker
13 | from vot.dataset import Sequence
14 | from vot.workspace import Storage
15 | from vot.report.common import format_value, read_resource, merge_repeats
16 | from vot.report import StyleManager, Table, Plot, Video
17 | from vot.utilities.data import Grid
18 |
19 | ORDER_CLASSES = {1: "first", 2: "second", 3: "third"}
20 |
21 | def insert_cell(value, order):
22 | """Inserts a cell into the data table."""
23 | attrs = dict(data_sort_value=order, data_value=value)
24 | if order in ORDER_CLASSES:
25 | attrs["cls"] = ORDER_CLASSES[order]
26 | td(format_value(value), **attrs)
27 |
28 | def table_cell(value):
29 | """Returns a cell for the data table."""
30 | if isinstance(value, str):
31 | return value
32 | elif isinstance(value, Tracker):
33 | return value.label
34 | elif isinstance(value, Sequence):
35 | return value.name
36 | return format_value(value)
37 |
38 | def grid_table(data: Grid, rows: List[str], columns: List[str]):
39 | """Generates a table from a grid object."""
40 |
41 | assert data.dimensions == 2
42 | assert data.size(0) == len(rows) and data.size(1) == len(columns)
43 |
44 | with table() as element:
45 | with thead():
46 | with tr():
47 | th()
48 | [th(table_cell(column)) for column in columns]
49 | with tbody():
50 | for i, row in enumerate(rows):
51 | with tr():
52 | th(table_cell(row))
53 | for value in data.row(i):
54 | if isinstance(value, tuple):
55 | if len(value) == 1:
56 | value = value[0]
57 | insert_cell(value, None)
58 |
59 | return element
60 |
61 | def generate_html_document(trackers: List[Tracker], sequences: List[Sequence], reports, storage: Storage, metadata: dict = None):
62 | """Generates an HTML document from the results of the experiments.
63 |
64 | Args:
65 | trackers (list): List of trackers.
66 | sequences (list): List of sequences.
67 | reports (dict): List of reports as tuples of (name, data).
68 | storage (Storage): Storage object.
69 | metadata (dict): Metadata dictionary.
70 | """
71 |
72 | def insert_video(data: Video):
73 | """Insert a video into the document."""
74 | name = data.identifier + ".mp4"
75 |
76 | with storage.write(name, binary=True) as handle:
77 | data.save(handle, "mp4")
78 |
79 | with video(src=name, controls=True, preload="auto", autoplay=False, loop=False, width="100%", height="100%"):
80 | raw("Your browser does not support the video tag.")
81 |
82 | def insert_figure(figure):
83 | """Inserts a matplotlib figure into the document."""
84 | buffer = io.StringIO()
85 | figure.save(buffer, "SVG")
86 | raw(buffer.getvalue())
87 |
88 | def insert_mplfigure(figure):
89 | """Inserts a matplotlib figure into the document."""
90 | buffer = io.StringIO()
91 | figure.savefig(buffer, format="SVG", bbox_inches='tight', pad_inches=0.01, dpi=200)
92 | raw(buffer.getvalue())
93 |
94 | def add_style(name, linked=False):
95 | """Adds a style to the document."""
96 | if linked:
97 | link(rel='stylesheet', href='file://' + os.path.join(os.path.dirname(__file__), name))
98 | else:
99 | style(read_resource(name))
100 |
101 | def add_script(name, linked=False):
102 | """Adds a script to the document."""
103 | if linked:
104 | script(type='text/javascript', src='file://' + os.path.join(os.path.dirname(__file__), name))
105 | else:
106 | with script(type='text/javascript'):
107 | raw("//")
108 |
109 | logger = get_logger()
110 |
111 | legend = StyleManager.default().legend(Tracker)
112 |
113 | doc = dominate.document(title='VOT report')
114 |
115 | linked = check_debug()
116 |
117 | with doc.head:
118 | add_style("pure.css", linked)
119 | add_style("report.css", linked)
120 | add_script("jquery.js", linked)
121 | add_script("table.js", linked)
122 | add_script("report.js", linked)
123 |
124 | # TODO: make table more general (now it assumes a tracker per row)
125 | def make_table(data: Table):
126 | """Generates a table from a Table object."""
127 | if len(data.header[2]) == 0:
128 | logger.debug("No measures found, skipping table")
129 | else:
130 | with table(cls="overview-table pure-table pure-table-horizontal pure-table-striped"):
131 | with thead():
132 | with tr():
133 | th()
134 | [th(c[0].identifier, colspan=c[1]) for c in merge_repeats(data.header[0])]
135 | with tr():
136 | th()
137 | [th(c[0].title, colspan=c[1]) for c in merge_repeats(data.header[1])]
138 | with tr():
139 | th("Trackers")
140 | [th(c.abbreviation, data_sort="int" if order else "") for c, order in zip(data.header[2], data.order)]
141 | with tbody():
142 | for tracker, row in data.data.items():
143 | with tr(data_tracker=tracker.reference):
144 | with td():
145 | insert_mplfigure(legend.figure(tracker))
146 | span(tracker.label)
147 | for value, order in zip(row, data.order):
148 | insert_cell(value, order[tracker] if not order is None else None)
149 |
150 | metadata = metadata or dict()
151 | metadata["Version"] = toolkit_version()
152 | metadata["Created"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
153 | metadata["Trackers"] = ", ".join([tracker.label for tracker in trackers])
154 | metadata["Sequences"] = ", ".join([sequence.name for sequence in sequences])
155 |
156 | with doc:
157 |
158 | with div(id="wrapper"):
159 |
160 | h1("Analysis report document")
161 |
162 | with ul(id="metadata"):
163 | for key, value in metadata.items():
164 | with li():
165 | span(key)
166 | text(": " + value)
167 |
168 | with div(id="index"):
169 | h2("Index")
170 | with ol():
171 | for key, _ in reports.items():
172 | li(a(key, href="#"+key))
173 |
174 | for key, section in reports.items():
175 |
176 | a(name=key)
177 | h2(key, cls="section")
178 |
179 | for item in section:
180 | if isinstance(item, Table):
181 | make_table(item)
182 | elif isinstance(item, Plot):
183 | with div(cls="plot"):
184 | p(item.identifier)
185 | insert_figure(item)
186 | elif isinstance(item, Video):
187 | with div(cls="video"):
188 | p(item.identifier)
189 | insert_video(item)
190 | else:
191 | logger.warning("Unsupported report item type %s", item)
192 |
193 | with div(id="footer"):
194 | text("Generated by ")
195 | a("VOT toolkit", href="https://github.com/votchallenge/toolkit")
196 | text(" version %s" % toolkit_version())
197 |
198 | with storage.write("report.html") as filehandle:
199 | filehandle.write(doc.render())
200 |
--------------------------------------------------------------------------------
/vot/report/latex.py:
--------------------------------------------------------------------------------
1 | """This module contains functions for generating LaTeX documents with results."""
2 | import io
3 | import tempfile
4 | import datetime
5 | from typing import List
6 |
7 | from pylatex.base_classes import Container
8 | from pylatex.package import Package
9 | from pylatex import Document, Section, Command, LongTable, MultiColumn, Figure, UnsafeCommand
10 | from pylatex.utils import NoEscape
11 |
12 | from vot import toolkit_version, get_logger
13 | from vot.tracker import Tracker
14 | from vot.dataset import Sequence
15 | from vot.workspace import Storage
16 | from vot.report.common import format_value, read_resource, merge_repeats
17 | from vot.report import StyleManager, Plot, Table
18 |
19 | TRACKER_GROUP = "default"
20 |
21 | class Chunk(Container):
22 | """A container that does not add a newline after the content."""
23 |
24 | def dumps(self):
25 | """Returns the LaTeX representation of the container."""
26 | return self.dumps_content()
27 |
28 | def strip_comments(src, wrapper=True):
29 | """Strips comments from a LaTeX source file."""
30 | return "\n".join([line for line in src.split("\n") if not line.startswith("%") and (wrapper or not line.startswith(r"\makeat"))])
31 |
32 | def insert_figure(figure):
33 | """Inserts a figure into a LaTeX document."""
34 | buffer = io.StringIO()
35 | figure.save(buffer, "PGF")
36 | return NoEscape(strip_comments(buffer.getvalue()))
37 |
38 | def insert_mplfigure(figure, wrapper=True):
39 | """Inserts a matplotlib figure into a LaTeX document."""
40 | buffer = io.StringIO()
41 | figure.savefig(buffer, format="PGF", bbox_inches='tight', pad_inches=0.01)
42 | return NoEscape(strip_comments(buffer.getvalue(), wrapper))
43 |
44 |
45 | def generate_symbols(container, trackers):
46 | """Generates a LaTeX command for each tracker. The command is named after the tracker reference and contains the tracker symbol."""
47 |
48 | legend = StyleManager.default().legend(Tracker)
49 |
50 | container.append(Command("makeatletter"))
51 | for tracker in trackers:
52 | container.append(UnsafeCommand('DefineTracker', [tracker.reference, TRACKER_GROUP],
53 | extra_arguments=insert_mplfigure(legend.figure(tracker), False) + r' \replunderscores{%s}' % tracker.label))
54 |
55 | container.append(Command("makeatother"))
56 |
57 | def generate_latex_document(trackers: List[Tracker], sequences: List[Sequence], reports, storage: Storage, multipart=True, metadata: dict = None) -> str:
58 | """Generates a LaTeX document with the results. The document is returned as a string. If build is True, the document is compiled and the PDF is returned.
59 |
60 | Args:
61 |
62 | trackers (list): List of trackers.
63 | sequences (list): List of sequences.
64 | reports (list): List of results tuples.
65 | storage (Storage): Storage object.
66 | multipart (bool): If True, the document is split into multiple files.
67 | metadata (dict): Metadata dictionary.
68 | """
69 |
70 | order_marks = {1: "first", 2: "second", 3: "third"}
71 |
72 | def format_cell(value, order):
73 | """Formats a cell in the data table."""
74 | cell = format_value(value)
75 | if order in order_marks:
76 | cell = Command(order_marks[order], cell)
77 | return cell
78 |
79 | logger = get_logger()
80 |
81 | doc = Document(page_numbers=True)
82 |
83 | doc.preamble.append(Package('pgf'))
84 | doc.preamble.append(Package('xcolor'))
85 | doc.preamble.append(Package('fullpage'))
86 |
87 | doc.preamble.append(NoEscape(read_resource("commands.tex")))
88 |
89 | doc.preamble.append(UnsafeCommand('newcommand', r'\first', options=1, extra_arguments=r'{\color{red} #1 }'))
90 | doc.preamble.append(UnsafeCommand('newcommand', r'\second', options=1, extra_arguments=r'{\color{green} #1 }'))
91 | doc.preamble.append(UnsafeCommand('newcommand', r'\third', options=1, extra_arguments=r'{\color{blue} #1 }'))
92 |
93 | # TODO: make table more general (now it assumes a tracker per row)
94 | def make_table(doc, table):
95 |
96 | if len(table.header[2]) == 0:
97 | logger.debug("No measures found, skipping table")
98 | else:
99 |
100 | # Generate data table
101 | with doc.create(LongTable("l " * (len(table.header[2]) + 1))) as data_table:
102 | data_table.add_hline()
103 | data_table.add_row([" "] + [MultiColumn(c[1], data=c[0].identifier) for c in merge_repeats(table.header[0])])
104 | data_table.add_hline()
105 | data_table.add_row([" "] + [MultiColumn(c[1], data=c[0].title) for c in merge_repeats(table.header[1])])
106 | data_table.add_hline()
107 | data_table.add_row(["Tracker"] + [" " + c.abbreviation + " " for c in table.header[2]])
108 | data_table.add_hline()
109 | data_table.end_table_header()
110 | data_table.add_hline()
111 |
112 | for tracker, data in table.data.items():
113 | data_table.add_row([UnsafeCommand("Tracker", [tracker.reference, TRACKER_GROUP])] +
114 | [format_cell(x, order[tracker] if not order is None else None) for x, order in zip(data, table.order)])
115 |
116 | if multipart:
117 | container = Chunk()
118 | generate_symbols(container, trackers)
119 | with storage.write("symbols.tex") as out:
120 | container.dump(out)
121 | doc.preamble.append(Command("input", "symbols.tex"))
122 | else:
123 | generate_symbols(doc.preamble, trackers)
124 |
125 | doc.preamble.append(Command('title', 'VOT toolkit report'))
126 | doc.preamble.append(Command('author', 'Toolkit version ' + toolkit_version()))
127 | doc.preamble.append(Command('date', datetime.datetime.now().isoformat()))
128 | doc.append(NoEscape(r'\maketitle'))
129 |
130 | for key, section in reports.items():
131 |
132 | doc.append(Section(key))
133 |
134 | for item in section:
135 | if isinstance(item, Table):
136 | make_table(doc, item)
137 | elif isinstance(item, Plot):
138 | plot = item
139 | with doc.create(Figure(position='htbp')) as container:
140 | if multipart:
141 | plot_name = plot.identifier + ".pdf"
142 | with storage.write(plot_name, binary=True) as out:
143 | plot.save(out, "PDF")
144 | container.add_image(plot_name)
145 | else:
146 | container.append(insert_figure(plot))
147 | container.add_caption(plot.identifier)
148 |
149 | logger.debug("Saving plot %s", item.identifier)
150 | item.save(key + "_" + item.identifier + '.pdf', "PDF")
151 | else:
152 | logger.warning("Unsupported report item type %s", item)
153 |
154 | # TODO: Move to separate function
155 | #if build:
156 | # temp = tempfile.mktemp()
157 | # logger.debug("Generating to temporary output %s", temp)
158 | # doc.generate_pdf(temp, clean_tex=True)
159 | # storage.copy(temp + ".pdf", "report.pdf")
160 | #else:
161 | with storage.write("report.tex") as out:
162 | doc.dump(out)
163 |
--------------------------------------------------------------------------------
/vot/report/report.css:
--------------------------------------------------------------------------------
1 |
2 | body {
3 | font-family:'Gill Sans', 'Gill Sans MT', Calibri, 'Trebuchet MS', sans-serif;
4 | background-color: #f8f8f8;
5 | }
6 |
7 | #wrapper {
8 | padding: 50px 20px 40px 20px;
9 | background-color: white;
10 | border-left: #dadada 1px solid;
11 | border-right: #dadada 1px solid;
12 | }
13 |
14 | #index {
15 | position:fixed;
16 | top: 0;
17 | left: 0;
18 | width: 100%;
19 | height: 50px;
20 | }
21 |
22 | #index h2 {
23 | display: none;
24 | }
25 |
26 | #index ol {
27 | list-style-type: none;
28 | padding: 0 20px;
29 | margin: 0;
30 | background-color: rgba(230, 230, 230, 0.9);
31 | border: 1px solid #e1e1e1;
32 | height: 50px;
33 | }
34 |
35 | #index li {
36 | display: inline-block;
37 | text-align: center;
38 | line-height: 50px;
39 | height: 50px;
40 | padding: 0 10px;
41 | }
42 |
43 | #index li a {
44 | text-decoration: none;
45 | color: #428BCA;
46 | font-weight: bold;
47 | font-size: larger;
48 | }
49 |
50 | #index li:hover {
51 | background-color: #428BCA;
52 | }
53 |
54 | #index li:hover a {
55 | color: white;
56 | }
57 |
58 | @media screen and (min-width: 1024px) {
59 | #index ol {
60 | width: 984px;
61 | margin: 0 auto;
62 | }
63 |
64 | #wrapper {
65 | width: 984px;
66 | margin: 0 auto;
67 | }
68 | }
69 |
70 | @media screen and (max-width: 1024px) {
71 |
72 | #index {
73 | width: 100%;
74 | }
75 |
76 | #wrapper {
77 | width: 100%;
78 | height: auto;
79 | }
80 | }
81 |
82 | #metadata {
83 | list-style-type: none;
84 | padding: 0;
85 | }
86 |
87 | #metadata li {
88 | margin-bottom: 10px;
89 | }
90 |
91 | #metadata li span {
92 | font-weight: bold;
93 | }
94 |
95 | #footer {
96 | text-align: center;
97 | margin-top: 20px;
98 | color: gray;
99 | font-size: smaller;
100 | }
101 |
102 | #footer a {
103 | color: #428BCA;
104 | text-decoration: none;
105 | }
106 |
107 | td span {
108 | vertical-align: super;
109 | }
110 |
111 | .first {
112 | font-weight: bold;
113 | color: red;
114 | }
115 |
116 | .second {
117 | font-weight: bold;
118 | color: green;
119 | }
120 |
121 | .third {
122 | font-weight: bold;
123 | color: blue;
124 | }
125 |
126 | table.pure-table-striped tbody tr:hover td {
127 | background-color: #ebebeb;
128 | }
129 |
130 | table.pure-table-striped tbody tr.selected td {
131 | background-color: #dadada;
132 | }
133 |
134 | table.pure-table-striped tbody tr.selected td span {
135 | font-weight: bold;
136 | }
137 |
138 | table.overview-table td svg {
139 | width: 1em;
140 | height: 1em;
141 | }
142 |
143 | g.blurred {
144 | opacity: 0.2;
145 | }
146 |
147 | div.graph {
148 | display: inline-block;
149 | }
150 |
151 | div.graph p {
152 | text-align: center;
153 | }
--------------------------------------------------------------------------------
/vot/report/report.js:
--------------------------------------------------------------------------------
1 |
2 | function get_tracker_id(el) {
3 | var id = $(el).attr("id");
4 | var index = id.lastIndexOf("_");
5 | return parseInt(id.substring(index+1));
6 | }
7 |
8 | function update_selected(el) {
9 |
10 | var selected = $("tr.selected");
11 | if (selected.length == 0) {
12 | $("g.tracker").removeClass("blurred");
13 | } else {
14 | $("g.tracker").addClass("blurred");
15 |
16 | selected.each(function (i, el) {
17 | var tracker = $(el).data("tracker");
18 | $("g.tracker_" + tracker).removeClass("blurred");
19 | });
20 | }
21 |
22 | }
23 |
24 | $(function() {
25 |
26 | var trackerNames={};
27 |
28 | $("td[id^='legend_']").each(function (i, el) {
29 | var tracker = get_tracker_id(el);
30 | $(el).addClass("tracker");
31 | trackerNames[tracker] = $(el).children("span").text();
32 | $(el).parent().click(function () {
33 | $(this).toggleClass("selected");
34 | update_selected();
35 | }).data("tracker", tracker);
36 | });
37 |
38 | $("g[id^='report_']").each(function (i, el) {
39 | var tracker = get_tracker_id(el);
40 | $(el).addClass("tracker tracker_" + tracker);
41 | $(el).find("path").after($("").text(trackerNames[tracker]));
42 | });
43 |
44 | $("table.overview-table").stupidtable();
45 |
46 |
47 |
48 |
49 |
50 | });
--------------------------------------------------------------------------------
/vot/report/table.js:
--------------------------------------------------------------------------------
1 | // Stupid jQuery table plugin.
2 |
3 | (function($) {
4 | $.fn.stupidtable = function(sortFns) {
5 | return this.each(function() {
6 | var $table = $(this);
7 | sortFns = sortFns || {};
8 | sortFns = $.extend({}, $.fn.stupidtable.default_sort_fns, sortFns);
9 | $table.data('sortFns', sortFns);
10 | $table.stupidtable_build();
11 |
12 | $table.on("click.stupidtable", "thead th", function() {
13 | $(this).stupidsort();
14 | });
15 |
16 | // Sort th immediately if data-sort-onload="yes" is specified. Limit to
17 | // the first one found - only one default sort column makes sense anyway.
18 | var $th_onload_sort = $table.find("th[data-sort-onload=yes]").eq(0);
19 | $th_onload_sort.stupidsort();
20 | });
21 | };
22 |
23 | // ------------------------------------------------------------------
24 | // Default settings
25 | // ------------------------------------------------------------------
26 | $.fn.stupidtable.default_settings = {
27 | should_redraw: function(sort_info){
28 | return true;
29 | },
30 | will_manually_build_table: false
31 | };
32 | $.fn.stupidtable.dir = {ASC: "asc", DESC: "desc"};
33 | $.fn.stupidtable.default_sort_fns = {
34 | "int": function(a, b) {
35 | return parseInt(a, 10) - parseInt(b, 10);
36 | },
37 | "float": function(a, b) {
38 | return parseFloat(a) - parseFloat(b);
39 | },
40 | "string": function(a, b) {
41 | return a.toString().localeCompare(b.toString());
42 | },
43 | "string-ins": function(a, b) {
44 | a = a.toString().toLocaleLowerCase();
45 | b = b.toString().toLocaleLowerCase();
46 | return a.localeCompare(b);
47 | }
48 | };
49 |
50 | // Allow specification of settings on a per-table basis. Call on a table
51 | // jquery object. Call *before* calling .stuidtable();
52 | $.fn.stupidtable_settings = function(settings) {
53 | return this.each(function() {
54 | var $table = $(this);
55 | var final_settings = $.extend({}, $.fn.stupidtable.default_settings, settings);
56 | $table.stupidtable.settings = final_settings;
57 | });
58 | };
59 |
60 |
61 | // Expects $("#mytable").stupidtable() to have already been called.
62 | // Call on a table header.
63 | $.fn.stupidsort = function(force_direction){
64 | var $this_th = $(this);
65 | var datatype = $this_th.data("sort") || null;
66 |
67 | // No datatype? Nothing to do.
68 | if (datatype === null) {
69 | return;
70 | }
71 |
72 | var $table = $this_th.closest("table");
73 |
74 | var sort_info = {
75 | $th: $this_th,
76 | $table: $table,
77 | datatype: datatype
78 | };
79 |
80 |
81 | // Bring in default settings if none provided
82 | if(!$table.stupidtable.settings){
83 | $table.stupidtable.settings = $.extend({}, $.fn.stupidtable.default_settings);
84 | }
85 |
86 | sort_info.compare_fn = $table.data('sortFns')[datatype];
87 | sort_info.th_index = calculateTHIndex(sort_info);
88 | sort_info.sort_dir = calculateSortDir(force_direction, sort_info);
89 |
90 | $this_th.data("sort-dir", sort_info.sort_dir);
91 | $table.trigger("beforetablesort", {column: sort_info.th_index, direction: sort_info.sort_dir, $th: $this_th});
92 |
93 | // More reliable method of forcing a redraw
94 | $table.css("display");
95 |
96 | // Run sorting asynchronously on a timout to force browser redraw after
97 | // `beforetablesort` callback. Also avoids locking up the browser too much.
98 | setTimeout(function() {
99 | if(!$table.stupidtable.settings.will_manually_build_table){
100 | $table.stupidtable_build();
101 | }
102 | var table_structure = sortTable(sort_info);
103 | var trs = getTableRowsFromTableStructure(table_structure, sort_info);
104 |
105 | if(!$table.stupidtable.settings.should_redraw(sort_info)){
106 | return;
107 | }
108 | $table.children("tbody").append(trs);
109 |
110 | updateElementData(sort_info);
111 | $table.trigger("aftertablesort", {column: sort_info.th_index, direction: sort_info.sort_dir, $th: $this_th});
112 | $table.css("display");
113 |
114 | }, 10);
115 | return $this_th;
116 | };
117 |
118 | // Call on a sortable td to update its value in the sort. This should be the
119 | // only mechanism used to update a cell's sort value. If your display value is
120 | // different from your sort value, use jQuery's .text() or .html() to update
121 | // the td contents, Assumes stupidtable has already been called for the table.
122 | $.fn.updateSortVal = function(new_sort_val){
123 | var $this_td = $(this);
124 | if($this_td.is('[data-sort-value]')){
125 | // For visual consistency with the .data cache
126 | $this_td.attr('data-sort-value', new_sort_val);
127 | }
128 | $this_td.data("sort-value", new_sort_val);
129 | return $this_td;
130 | };
131 |
132 |
133 | $.fn.stupidtable_build = function(){
134 | return this.each(function() {
135 | var $table = $(this);
136 | var table_structure = [];
137 | var trs = $table.children("tbody").children("tr");
138 | trs.each(function(index,tr) {
139 |
140 | // ====================================================================
141 | // Transfer to using internal table structure
142 | // ====================================================================
143 | var ele = {
144 | $tr: $(tr),
145 | columns: [],
146 | index: index
147 | };
148 |
149 | $(tr).children('td').each(function(idx, td){
150 | var sort_val = $(td).data("sort-value");
151 |
152 | // Store and read from the .data cache for display text only sorts
153 | // instead of looking through the DOM every time
154 | if(typeof(sort_val) === "undefined"){
155 | var txt = $(td).text();
156 | $(td).data('sort-value', txt);
157 | sort_val = txt;
158 | }
159 | ele.columns.push(sort_val);
160 | });
161 | table_structure.push(ele);
162 | });
163 | $table.data('stupidsort_internaltable', table_structure);
164 | });
165 | };
166 |
167 | // ====================================================================
168 | // Private functions
169 | // ====================================================================
170 | var sortTable = function(sort_info){
171 | var table_structure = sort_info.$table.data('stupidsort_internaltable');
172 | var th_index = sort_info.th_index;
173 | var $th = sort_info.$th;
174 |
175 | var multicolumn_target_str = $th.data('sort-multicolumn');
176 | var multicolumn_targets;
177 | if(multicolumn_target_str){
178 | multicolumn_targets = multicolumn_target_str.split(',');
179 | }
180 | else{
181 | multicolumn_targets = [];
182 | }
183 | var multicolumn_th_targets = $.map(multicolumn_targets, function(identifier, i){
184 | return get_th(sort_info.$table, identifier);
185 | });
186 |
187 | table_structure.sort(function(e1, e2){
188 | var multicolumns = multicolumn_th_targets.slice(0); // shallow copy
189 | var diff = sort_info.compare_fn(e1.columns[th_index], e2.columns[th_index]);
190 | while(diff === 0 && multicolumns.length){
191 | var multicolumn = multicolumns[0];
192 | var datatype = multicolumn.$e.data("sort");
193 | var multiCloumnSortMethod = sort_info.$table.data('sortFns')[datatype];
194 | diff = multiCloumnSortMethod(e1.columns[multicolumn.index], e2.columns[multicolumn.index]);
195 | multicolumns.shift();
196 | }
197 | // Sort by position in the table if values are the same. This enforces a
198 | // stable sort across all browsers. See https://bugs.chromium.org/p/v8/issues/detail?id=90
199 | if (diff === 0)
200 | return e1.index - e2.index;
201 | else
202 | return diff;
203 |
204 | });
205 |
206 | if (sort_info.sort_dir != $.fn.stupidtable.dir.ASC){
207 | table_structure.reverse();
208 | }
209 | return table_structure;
210 | };
211 |
212 | var get_th = function($table, identifier){
213 | // identifier can be a th id or a th index number;
214 | var $table_ths = $table.find('th');
215 | var index = parseInt(identifier, 10);
216 | var $th;
217 | if(!index && index !== 0){
218 | $th = $table_ths.siblings('#' + identifier);
219 | index = $table_ths.index($th);
220 | }
221 | else{
222 | $th = $table_ths.eq(index);
223 | }
224 | return {index: index, $e: $th};
225 | };
226 |
227 | var getTableRowsFromTableStructure = function(table_structure, sort_info){
228 | // Gather individual column for callbacks
229 | var column = $.map(table_structure, function(ele, i){
230 | return [[ele.columns[sort_info.th_index], ele.$tr, i]];
231 | });
232 |
233 | /* Side effect */
234 | sort_info.column = column;
235 |
236 | // Replace the content of tbody with the sorted rows. Strangely
237 | // enough, .append accomplishes this for us.
238 | return $.map(table_structure, function(ele) { return ele.$tr; });
239 |
240 | };
241 |
242 | var updateElementData = function(sort_info){
243 | var $table = sort_info.$table;
244 | var $this_th = sort_info.$th;
245 | var sort_dir = $this_th.data('sort-dir');
246 |
247 |
248 | // Reset siblings
249 | $table.find("th").data("sort-dir", null).removeClass("sorting-desc sorting-asc");
250 | $this_th.data("sort-dir", sort_dir).addClass("sorting-"+sort_dir);
251 | };
252 |
253 | var calculateSortDir = function(force_direction, sort_info){
254 | var sort_dir;
255 | var $this_th = sort_info.$th;
256 | var dir = $.fn.stupidtable.dir;
257 |
258 | if(force_direction){
259 | sort_dir = force_direction;
260 | }
261 | else{
262 | sort_dir = force_direction || $this_th.data("sort-default") || dir.ASC;
263 | if ($this_th.data("sort-dir"))
264 | sort_dir = $this_th.data("sort-dir") === dir.ASC ? dir.DESC : dir.ASC;
265 | }
266 | return sort_dir;
267 | };
268 |
269 | var calculateTHIndex = function(sort_info){
270 | var th_index = 0;
271 | var base_index = sort_info.$th.index();
272 | sort_info.$th.parents("tr").find("th").slice(0, base_index).each(function() {
273 | var cols = $(this).attr("colspan") || 1;
274 | th_index += parseInt(cols,10);
275 | });
276 | return th_index;
277 | };
278 |
279 | })(window.jQuery);
280 |
--------------------------------------------------------------------------------
/vot/report/tests.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/votchallenge/toolkit/372a6fb167484c3ef121b0b640250deb1fdba0eb/vot/report/tests.py
--------------------------------------------------------------------------------
/vot/report/video.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | from typing import List
4 |
5 | from attributee import Boolean
6 |
7 | from vot.dataset import Sequence
8 | from vot.tracker import Tracker
9 | from vot.experiment.multirun import MultiRunExperiment, Experiment
10 | from vot.report import ObjectVideo, SeparableReport
11 |
12 | class VideoWriter:
13 |
14 | def __init__(self, filename: str, fps: int = 30):
15 | self._filename = filename
16 | self._fps = fps
17 |
18 | def __call__(self, frame):
19 | raise NotImplementedError()
20 |
21 | def close(self):
22 | raise NotImplementedError()
23 |
24 | class VideoWriterScikitH264(VideoWriter):
25 |
26 | def _handle(self):
27 | try:
28 | import skvideo.io
29 | except ImportError:
30 | raise ImportError("The scikit-video package is required for video export.")
31 | if not hasattr(self, "_writer"):
32 | skvideo.io.vwrite(self._filename, [])
33 | self._writer = skvideo.io.FFmpegWriter(self._filename, inputdict={'-r': str(self._fps), '-vcodec': 'libx264'})
34 | return self._writer
35 |
36 | def __call__(self, frame):
37 | self._handle().writeFrame(frame)
38 |
39 | def close(self):
40 | if hasattr(self, "_writer"):
41 | self._writer.close()
42 | self._writer = None
43 |
44 | class VideoWriterOpenCV(VideoWriter):
45 |
46 |
47 | def __init__(self, filename: str, fps: int = 30, codec: str = "mp4v"):
48 | super().__init__(filename, fps)
49 | self._codec = codec
50 |
51 | def __call__(self, frame):
52 | try:
53 | import cv2
54 | except ImportError:
55 | raise ImportError("The OpenCV package is required for video export.")
56 | if not hasattr(self, "_writer"):
57 | self._height, self._width = frame.shape[:2]
58 | self._writer = cv2.VideoWriter(self._filename, cv2.VideoWriter_fourcc(*self._codec.lower()), self._fps, (self._width, self._height))
59 | self._writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
60 |
61 | def close(self):
62 | if hasattr(self, "_writer"):
63 | self._writer.release()
64 | self._writer = None
65 |
66 | class PreviewVideos(SeparableReport):
67 | """A report that generates video previews for the tracker results."""
68 |
69 | groundtruth = Boolean(default=False, description="If set, the groundtruth is shown with the tracker output.")
70 | separate = Boolean(default=False, description="If set, each tracker is shown in a separate video.")
71 |
72 | async def perexperiment(self, experiment: Experiment, trackers: List[Tracker], sequences: List[Sequence]):
73 |
74 | videos = []
75 |
76 | for sequence in sequences:
77 |
78 | if self.separate:
79 |
80 | for tracker in trackers:
81 |
82 | video = ObjectVideo(sequence.identifier + "_" + tracker.identifier, sequence)
83 |
84 | if self.groundtruth:
85 | for frame in range(len(sequence)):
86 | video(frame, "_", sequence.groundtruth(frame))
87 |
88 | for obj in sequence.objects():
89 | trajectories = experiment.gather(tracker, sequence, objects=[obj])
90 |
91 | if len(trajectories) == 0:
92 | continue
93 |
94 | for frame in range(len(sequence)):
95 | video(frame, obj, trajectories[0].region(frame))
96 |
97 | videos.append(video)
98 | else:
99 |
100 | video = ObjectVideo(sequence.identifier, sequence)
101 |
102 | if self.groundtruth:
103 | for frame in range(len(sequence)):
104 | video(frame, "_", sequence.groundtruth(frame))
105 |
106 | for tracker in trackers:
107 |
108 | for obj in sequence.objects():
109 | trajectories = experiment.gather(tracker, sequence, objects=[obj])
110 |
111 | if len(trajectories) == 0:
112 | continue
113 |
114 | for frame in range(len(sequence)):
115 | video(frame, obj + "_" + tracker.identifier, trajectories[0].region(frame))
116 |
117 | videos.append(video)
118 |
119 | return videos
120 |
121 | def compatible(self, experiment):
122 | return isinstance(experiment, MultiRunExperiment)
--------------------------------------------------------------------------------
/vot/stack/__init__.py:
--------------------------------------------------------------------------------
1 | """Stacks are collections of experiments that are grouped together for convenience. Stacks are used to organize experiments and to run them in
2 | batch mode.
3 | """
4 | import os
5 | from typing import List, Mapping
6 |
7 | import yaml
8 |
9 | from attributee import Attributee, String, Boolean, Map, Object
10 | from attributee.io import Serializable
11 |
12 | from vot.experiment import Experiment, experiment_registry
13 |
14 |
15 | def experiment_resolver(typename, context, **kwargs):
16 | """Resolves experiment objects from stack definitions. This function is used by the stack module to resolve experiment objects from stack
17 | definitions. It is not intended to be used directly.
18 |
19 | Args:
20 | typename (str): Name of the experiment class
21 | context (Attributee): Context of the experiment
22 | kwargs (dict): Additional arguments
23 |
24 | Returns:
25 | Experiment: Experiment object
26 | """
27 |
28 | from vot.utilities import import_class
29 |
30 | identifier = context.key
31 | storage = None
32 |
33 | if getattr(context.parent, "workspace", None) is not None:
34 | storage = context.parent.workspace.storage
35 |
36 | if typename in experiment_registry:
37 | experiment = experiment_registry.get(typename, _identifier=identifier, _storage=storage, **kwargs)
38 | assert isinstance(experiment, Experiment)
39 | return experiment
40 | else:
41 | experiment_class = import_class(typename)
42 | assert issubclass(experiment_class, Experiment)
43 | return experiment_class(_identifier=identifier, _storage=storage, **kwargs)
44 |
45 | class Stack(Attributee, Serializable):
46 | """Stack class represents a collection of experiments. Stacks are used to organize experiments and to run them in batch mode.
47 | """
48 |
49 | title = String(default="Stack")
50 | dataset = String(default=None)
51 | url = String(default="")
52 | deprecated = Boolean(default=False)
53 | experiments = Map(Object(experiment_resolver))
54 |
55 | @property
56 | def name(self):
57 | """Returns the name of the stack."""
58 | return getattr(self, "_name", None)
59 |
60 | def __iter__(self):
61 | """Iterates over experiments in the stack."""
62 | return iter(self.experiments.values())
63 |
64 | def __len__(self):
65 | """Returns the number of experiments in the stack."""
66 | return len(self.experiments)
67 |
68 | def __getitem__(self, identifier):
69 | """Returns the experiment with the given identifier.
70 |
71 | Args:
72 | identifier (str): Identifier of the experiment
73 |
74 | Returns:
75 | Experiment: Experiment object
76 |
77 | """
78 | return self.experiments[identifier]
79 |
80 |
81 | def resolve_stack(name: str, *directories: List[str]) -> str:
82 | """Searches for stack file in the given directories and returns its absolute path. If given an absolute path as input
83 | it simply returns it.
84 |
85 | Args:
86 | name (str): Name of the stack
87 | directories (List[str]): Directories that will be used
88 |
89 | Returns:
90 | str: Absolute path to stack file
91 | """
92 | if os.path.isabs(name):
93 | return name if os.path.isfile(name) else None
94 | for directory in directories:
95 | full = os.path.join(directory, name)
96 | if os.path.isfile(full):
97 | return full
98 | full = os.path.join(os.path.dirname(__file__), name + ".yaml")
99 | if os.path.isfile(full):
100 | return full
101 | return None
102 |
103 | def list_integrated_stacks() -> Mapping[str, str]:
104 | """List stacks that come with the toolkit
105 |
106 | Returns:
107 | Map[str, str]: A mapping of stack ids and stack title pairs
108 | """
109 |
110 | from pathlib import Path
111 |
112 | stacks = {}
113 | root = Path(os.path.join(os.path.dirname(__file__)))
114 |
115 | for stack_path in root.rglob("*.yaml"):
116 | with open(stack_path, 'r') as fp:
117 | stack_metadata = yaml.load(fp, Loader=yaml.BaseLoader)
118 | if stack_metadata is None:
119 | continue
120 | key = str(stack_path.relative_to(root).with_name(os.path.splitext(stack_path.name)[0]))
121 | stacks[key] = stack_metadata.get("title", "")
122 |
123 | return stacks
--------------------------------------------------------------------------------
/vot/stack/otb100.yaml:
--------------------------------------------------------------------------------
1 | title: OTB100 dataset experiment stack
2 | url: http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html
3 | dataset: otb100
4 | experiments:
5 | baseline:
6 | type: unsupervised
7 | analyses:
8 | - type: average_accuracy
9 | name: accuracy
10 | burnin: 1
--------------------------------------------------------------------------------
/vot/stack/otb50.yaml:
--------------------------------------------------------------------------------
1 | title: OTB50 dataset experiment stack
2 | url: http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html
3 | dataset: otb50
4 | experiments:
5 | baseline:
6 | type: unsupervised
7 | analyses:
8 | - type: average_accuracy
9 | name: accuracy
10 | burnin: 1
--------------------------------------------------------------------------------
/vot/stack/tests.py:
--------------------------------------------------------------------------------
1 | """Tests for the experiment stack module."""
2 |
3 | import unittest
4 | import yaml
5 |
6 | from vot.workspace import NullStorage
7 | from vot.stack import Stack, list_integrated_stacks, resolve_stack
8 |
9 | class NoWorkspace:
10 | """Empty workspace, does not save anything
11 | """
12 |
13 | @property
14 | def storage(self):
15 | """Returns the storage object for the workspace. """
16 | return NullStorage()
17 |
18 | class TestStacks(unittest.TestCase):
19 | """Tests for the experiment stack utilities
20 | """
21 |
22 | def test_stacks(self):
23 | """Test loading integrated stacks
24 | """
25 |
26 | stacks = list_integrated_stacks()
27 | for stack_name in stacks:
28 | try:
29 | with open(resolve_stack(stack_name), 'r') as fp:
30 | stack_metadata = yaml.load(fp, Loader=yaml.BaseLoader)
31 | Stack(stack_name, NoWorkspace(), **stack_metadata)
32 | except Exception as e:
33 | self.fail("Stack {}: {}".format(stack_name, e))
--------------------------------------------------------------------------------
/vot/stack/tests/basic.yaml:
--------------------------------------------------------------------------------
1 | title: VOT Basic Test Stack
2 | url: http://www.votchallenge.net/
3 | dataset: https://data.votchallenge.net/toolkit/test.zip
4 | experiments:
5 | baseline:
6 | type: unsupervised
7 | repetitions: 1
--------------------------------------------------------------------------------
/vot/stack/tests/multiobject.yaml:
--------------------------------------------------------------------------------
1 | title: VOTS2023 Test Stack
2 | dataset: https://data.votchallenge.net/vots2023/test/description.json
3 | experiments:
4 | baseline:
5 | type: unsupervised
6 | repetitions: 1
7 | multiobject: True
8 | analyses:
9 | - type: average_accuracy
10 | name: Quality
11 | burnin: 0
12 | ignore_unknown: False
13 | weighted: False
14 | - type: average_success_plot
15 | name: Quality plot
16 | burnin: 0
17 | ignore_unknown: False
18 | - type: longterm_ar
19 | name: AR
20 | - type: average_quality_auxiliary
21 | name: Auxiliary
--------------------------------------------------------------------------------
/vot/stack/tests/segmentation.yaml:
--------------------------------------------------------------------------------
1 | title: VOT Segmentation testing
2 | url: http://www.votchallenge.net/
3 | dataset: http://box.vicos.si/tracking/vot20_test_dataset.zip
4 | experiments:
5 | baseline:
6 | type: multistart
7 | analyses:
8 | - type: multistart_average_ar
9 | - type: multistart_eao_score
10 | low: 100
11 | high: 300
12 | realtime:
13 | type: multistart
14 | realtime:
15 | grace: 3
16 | analyses:
17 | - type: multistart_average_ar
18 | - type: multistart_eao_score
19 | low: 100
20 | high: 300
21 | redetection:
22 | type: multistart
23 | transformers:
24 | - type: redetection
25 | length: 200
26 | initialization: 5
27 | padding: 2
28 | scaling: 3
--------------------------------------------------------------------------------
/vot/stack/vot2013.yaml:
--------------------------------------------------------------------------------
1 | title: VOT2013 challenge
2 | url: http://www.votchallenge.net/vot2013/
3 | dataset: http://data.votchallenge.net/vot2013/dataset/description.json
4 | deprecated: True
5 | experiments:
6 | baseline:
7 | type: supervised
8 | repetitions: 15
9 | skip_initialize: 5
10 | analyses:
11 | - type: supervised_average_ar
12 | sensitivity: 30
13 | # TODO: missing experiments
14 |
15 |
--------------------------------------------------------------------------------
/vot/stack/vot2014.yaml:
--------------------------------------------------------------------------------
1 | title: VOT2014 challenge
2 | dataset: http://data.votchallenge.net/vot2014/dataset/description.json
3 | url: http://www.votchallenge.net/vot2014/
4 | deprecated: True
5 | experiments:
6 | baseline:
7 | type: supervised
8 | repetitions: 15
9 | skip_initialize: 5
10 | analyses:
11 | - type: supervised_average_ar
12 | sensitivity: 30
13 | - type: cumulative_failures
14 | # TODO: region noise
--------------------------------------------------------------------------------
/vot/stack/vot2015/rgb.yaml:
--------------------------------------------------------------------------------
1 | title: VOT2015 challenge
2 | dataset: http://data.votchallenge.net/vot2015/dataset/description.json
3 | url: http://www.votchallenge.net/vot2015/
4 | experiments:
5 | baseline:
6 | type: supervised
7 | repetitions: 15
8 | skip_initialize: 5
9 | analyses:
10 | - type: supervised_average_ar
11 | sensitivity: 30
12 | - type: supervised_eao_score
13 | low: 108
14 | high: 371
15 | - type: supervised_eao_curve
--------------------------------------------------------------------------------
/vot/stack/vot2015/tir.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-TIR2015 challenge
2 | dataset: http://www.cvl.isy.liu.se/research/datasets/ltir/version1.0/ltir_v1_0_8bit.zip
3 | url: http://www.votchallenge.net/vot2015/
4 | experiments:
5 | baseline:
6 | type: supervised
7 | repetitions: 15
8 | skip_initialize: 5
9 | analyses:
10 | - type: supervised_average_ar
11 | sensitivity: 30
12 |
--------------------------------------------------------------------------------
/vot/stack/vot2016/rgb.yaml:
--------------------------------------------------------------------------------
1 | title: VOT2016 challenge
2 | dataset: https://data.votchallenge.net/vot2016/main/description.json
3 | url: https://www.votchallenge.net/vot2016/
4 | experiments:
5 | baseline:
6 | type: supervised
7 | repetitions: 15
8 | skip_initialize: 5
9 | analyses:
10 | - type: supervised_average_ar
11 | sensitivity: 30
12 | - type: cumulative_failures
13 | - type: supervised_eao_score
14 | low: 108
15 | high: 371
16 | - type: supervised_eao_curve
17 | unsupervised:
18 | type: unsupervised
19 | repetitions: 1
20 | analyses:
21 | - type: average_accuracy
22 |
--------------------------------------------------------------------------------
/vot/stack/vot2016/tir.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-TIR2016 challenge
2 | dataset: https://data.votchallenge.net/vot2016/tir/description.json
3 | url: https://www.votchallenge.net/vot2016/
4 | experiments:
5 | baseline:
6 | type: supervised
7 | repetitions: 15
8 | skip_initialize: 5
9 | analyses:
10 | - type: supervised_average_ar
11 | sensitivity: 30
12 |
--------------------------------------------------------------------------------
/vot/stack/vot2017.yaml:
--------------------------------------------------------------------------------
1 | title: VOT2017 challenge
2 | dataset: http://data.votchallenge.net/vot2017/main/description.json
3 | url: http://www.votchallenge.net/vot2017/
4 | experiments:
5 | baseline:
6 | type: supervised
7 | repetitions: 15
8 | skip_initialize: 5
9 | analyses:
10 | - type: supervised_average_ar
11 | sensitivity: 30
12 | - type: cumulative_failures
13 | - type: supervised_eao_score
14 | low: 100
15 | high: 356
16 | - type: supervised_eao_curve
17 | realtime:
18 | type: supervised
19 | realtime:
20 | grace: 3
21 | repetitions: 1
22 | skip_initialize: 5
23 | analyses:
24 | - type: supervised_average_ar
25 | sensitivity: 30
26 | - type: cumulative_failures
27 | - type: supervised_eao_score
28 | low: 100
29 | high: 356
30 | - type: supervised_eao_curve
31 | unsupervised:
32 | type: unsupervised
33 | repetitions: 1
34 | analyses:
35 | - type: average_accuracy
--------------------------------------------------------------------------------
/vot/stack/vot2018/longterm.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-LT2018 challenge
2 | dataset: http://data.votchallenge.net/vot2018/longterm/description.json
3 | url: http://www.votchallenge.net/vot2018/
4 | experiments:
5 | longterm:
6 | type: unsupervised
7 | repetitions: 1
8 | analyses:
9 | - type: average_tpr
10 | name: average_tpr
11 | - type: pr_curve
12 | - type: f_curve
13 | redetection:
14 | type: unsupervised
15 | transformers:
16 | - type: redetection
17 | length: 200
18 | initialization: 5
19 | padding: 2
20 | scaling: 3
21 |
--------------------------------------------------------------------------------
/vot/stack/vot2018/shortterm.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-ST2018 challenge
2 | dataset: http://data.votchallenge.net/vot2018/main/description.json
3 | url: http://www.votchallenge.net/vot2018/
4 | experiments:
5 | baseline:
6 | type: supervised
7 | repetitions: 15
8 | skip_initialize: 5
9 | analyses:
10 | - type: supervised_average_ar
11 | sensitivity: 30
12 | - type: supervised_eao_score
13 | low: 100
14 | high: 356
15 | - type: supervised_eao_curve
16 | realtime:
17 | type: supervised
18 | realtime:
19 | grace: 3
20 | repetitions: 1
21 | skip_initialize: 5
22 | analyses:
23 | - type: supervised_average_ar
24 | sensitivity: 30
25 | - type: supervised_eao_score
26 | low: 100
27 | high: 356
28 | - type: supervised_eao_curve
29 | unsupervised:
30 | type: unsupervised
31 | repetitions: 1
32 | analyses:
33 | - type: average_accuracy
--------------------------------------------------------------------------------
/vot/stack/vot2019/longterm.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-LT2019 challenge
2 | dataset: http://data.votchallenge.net/vot2019/longterm/description.json
3 | url: http://www.votchallenge.net/vot2019/
4 | experiments:
5 | longterm:
6 | type: unsupervised
7 | repetitions: 1
8 | analyses:
9 | - type: average_tpr
10 | name: average_tpr
11 | - type: pr_curve
12 | - type: f_curve
13 | redetection:
14 | type: unsupervised
15 | transformers:
16 | - type: redetection
17 | length: 200
18 | initialization: 5
19 | padding: 2
20 | scaling: 3
21 |
--------------------------------------------------------------------------------
/vot/stack/vot2019/rgbd.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-RGBD2019 challenge
2 | dataset: http://data.votchallenge.net/vot2019/rgbd/description.json
3 | url: http://www.votchallenge.net/vot2019/
4 | experiments:
5 | rgbd-unsupervised:
6 | type: unsupervised
7 | repetitions: 1
8 | analyses:
9 | - type: average_tpr
10 | - type: pr_curve
11 | - type: f_curve
--------------------------------------------------------------------------------
/vot/stack/vot2019/rgbtir.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-RGBTIR2019 challenge
2 | dataset: http://data.votchallenge.net/vot2019/rgbtir/meta/description.json
3 | url: http://www.votchallenge.net/vot2019/
4 | experiments:
5 | baseline:
6 | type: multistart
7 | realtime:
8 | grace: 3
9 | analyses:
10 | - type: multistart_average_ar
11 | - type: multistart_eao_score
12 | low: 115
13 | high: 755
14 | - type: multistart_eao_curve
15 | high: 755
--------------------------------------------------------------------------------
/vot/stack/vot2019/shortterm.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-ST2019 challenge
2 | dataset: http://data.votchallenge.net/vot2019/main/description.json
3 | url: http://www.votchallenge.net/vot2019/
4 | experiments:
5 | baseline:
6 | type: supervised
7 | repetitions: 15
8 | skip_initialize: 5
9 | analyses:
10 | - type: supervised_average_ar
11 | sensitivity: 30
12 | - type: supervised_eao_score
13 | low: 46
14 | high: 291
15 | - type: supervised_eao_curve
16 | realtime:
17 | type: supervised
18 | realtime:
19 | grace: 3
20 | repetitions: 1
21 | skip_initialize: 5
22 | analyses:
23 | - type: supervised_average_ar
24 | sensitivity: 30
25 | - type: supervised_eao_score
26 | low: 46
27 | high: 291
28 | - type: supervised_eao_curve
29 | unsupervised:
30 | type: unsupervised
31 | repetitions: 1
32 | analyses:
33 | - type: average_accuracy
--------------------------------------------------------------------------------
/vot/stack/vot2020/longterm.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-LT2020 challenge
2 | dataset: http://data.votchallenge.net/vot2019/longterm/description.json
3 | url: http://www.votchallenge.net/vot2020/
4 | experiments:
5 | longterm:
6 | type: unsupervised
7 | repetitions: 1
8 | analyses:
9 | - type: average_tpr
10 | name: average_tpr
11 | - type: pr_curve
12 | - type: f_curve
13 | redetection:
14 | type: unsupervised
15 | transformers:
16 | - type: redetection
17 | length: 200
18 | initialization: 5
19 | padding: 2
20 | scaling: 3
21 |
--------------------------------------------------------------------------------
/vot/stack/vot2020/rgbd.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-RGBD2020 challenge
2 | dataset: http://data.votchallenge.net/vot2019/rgbd/description.json
3 | url: http://www.votchallenge.net/vot2020/
4 | experiments:
5 | rgbd-unsupervised:
6 | type: unsupervised
7 | repetitions: 1
8 | analyses:
9 | - type: average_tpr
10 | - type: pr_curve
11 | - type: f_curve
--------------------------------------------------------------------------------
/vot/stack/vot2020/rgbtir.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-RGBTIR2020 challenge
2 | dataset: http://data.votchallenge.net/vot2020/rgbtir/meta/description.json
3 | url: http://www.votchallenge.net/vot2020/
4 | experiments:
5 | baseline:
6 | type: multistart
7 | realtime:
8 | grace: 3
9 | analyses:
10 | - type: multistart_average_ar
11 | - type: multistart_eao_score
12 | low: 115
13 | high: 755
14 | - type: multistart_eao_curve
15 | high: 755
--------------------------------------------------------------------------------
/vot/stack/vot2020/shortterm.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-ST2020 challenge
2 | dataset: https://data.votchallenge.net/vot2020/shortterm/description.json
3 | url: http://www.votchallenge.net/vot2020/
4 | experiments:
5 | baseline:
6 | type: multistart
7 | analyses:
8 | - type: multistart_eao_score
9 | name: eaoscore
10 | low: 115
11 | high: 755
12 | - type: multistart_eao_curve
13 | name: eaocurve
14 | high: 755
15 | - type: multistart_average_ar
16 | name: ar
17 | realtime:
18 | type: multistart
19 | realtime:
20 | grace: 3
21 | analyses:
22 | - type: multistart_eao_score
23 | name: eaoscore
24 | low: 115
25 | high: 755
26 | - type: multistart_eao_curve
27 | name: eaocurve
28 | high: 755
29 | - type: multistart_average_ar
30 | name: ar
31 | unsupervised:
32 | type: unsupervised
33 | repetitions: 1
34 | analyses:
35 | - type: average_accuracy
36 | name: accuracy
37 | burnin: 1
--------------------------------------------------------------------------------
/vot/stack/vot2021/longterm.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-LT2021 challenge
2 | dataset: http://data.votchallenge.net/vot2019/longterm/description.json
3 | url: http://www.votchallenge.net/vot2021/
4 | experiments:
5 | longterm:
6 | type: unsupervised
7 | repetitions: 1
8 | analyses:
9 | - type: average_tpr
10 | name: average_tpr
11 | - type: pr_curve
12 | - type: f_curve
13 | redetection:
14 | type: unsupervised
15 | transformers:
16 | - type: redetection
17 | length: 200
18 | initialization: 5
19 | padding: 2
20 | scaling: 3
21 |
--------------------------------------------------------------------------------
/vot/stack/vot2021/rgbd.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-RGBD2021 challenge
2 | dataset: http://data.votchallenge.net/vot2019/rgbd/description.json
3 | url: http://www.votchallenge.net/vot2021/
4 | experiments:
5 | rgbd-unsupervised:
6 | type: unsupervised
7 | repetitions: 1
8 | analyses:
9 | - type: average_tpr
10 | - type: pr_curve
11 | - type: f_curve
--------------------------------------------------------------------------------
/vot/stack/vot2021/shortterm.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-ST2021 challenge
2 | dataset: https://data.votchallenge.net/vot2021/shortterm/description.json
3 | url: http://www.votchallenge.net/vot2021/
4 | experiments:
5 | baseline:
6 | type: multistart
7 | analyses:
8 | - type: multistart_eao_score
9 | name: eaoscore
10 | low: 115
11 | high: 755
12 | - type: multistart_eao_curve
13 | name: eaocurve
14 | high: 755
15 | - type: multistart_average_ar
16 | name: ar
17 | realtime:
18 | type: multistart
19 | realtime:
20 | grace: 3
21 | analyses:
22 | - type: multistart_eao_score
23 | name: eaoscore
24 | low: 115
25 | high: 755
26 | - type: multistart_eao_curve
27 | name: eaocurve
28 | high: 755
29 | - type: multistart_average_ar
30 | name: ar
31 | unsupervised:
32 | type: unsupervised
33 | repetitions: 1
34 | analyses:
35 | - type: average_accuracy
36 | name: accuracy
37 | burnin: 1
--------------------------------------------------------------------------------
/vot/stack/vot2022/depth.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-D2022 challenge
2 | dataset: https://data.votchallenge.net/vot2022/depth/description.json
3 | url: https://www.votchallenge.net/vot2022/
4 | experiments:
5 | baseline:
6 | type: multistart
7 | analyses:
8 | - type: multistart_eao_score
9 | name: eaoscore
10 | low: 115
11 | high: 755
12 | - type: multistart_eao_curve
13 | name: eaocurve
14 | high: 755
15 | - type: multistart_average_ar
16 | name: ar
17 |
--------------------------------------------------------------------------------
/vot/stack/vot2022/longterm.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-LT2022 challenge
2 | dataset: https://data.votchallenge.net/vot2022/lt/description.json
3 | url: https://www.votchallenge.net/vot2022/
4 | experiments:
5 | longterm:
6 | type: unsupervised
7 | repetitions: 1
8 | analyses:
9 | - type: average_tpr
10 | name: average_tpr
11 | - type: pr_curve
12 | - type: f_curve
13 | redetection:
14 | type: unsupervised
15 | transformers:
16 | - type: redetection
17 | length: 200
18 | initialization: 5
19 | padding: 2
20 | scaling: 3
21 |
--------------------------------------------------------------------------------
/vot/stack/vot2022/rgbd.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-RGBD2022 challenge
2 | dataset: https://data.votchallenge.net/vot2022/rgbd/description.json
3 | url: https://www.votchallenge.net/vot2022/
4 | experiments:
5 | baseline:
6 | type: multistart
7 | analyses:
8 | - type: multistart_eao_score
9 | name: eaoscore
10 | low: 115
11 | high: 755
12 | - type: multistart_eao_curve
13 | name: eaocurve
14 | high: 755
15 | - type: multistart_average_ar
16 | name: ar
17 |
--------------------------------------------------------------------------------
/vot/stack/vot2022/shortterm.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-ST2021 segmentation challenge
2 | dataset: https://data.votchallenge.net/vot2022/sts/description.json
3 | url: https://www.votchallenge.net/vot2022/
4 | experiments:
5 | baseline:
6 | type: multistart
7 | analyses:
8 | - type: multistart_eao_score
9 | name: eaoscore
10 | low: 115
11 | high: 755
12 | - type: multistart_eao_curve
13 | name: eaocurve
14 | high: 755
15 | - type: multistart_average_ar
16 | name: ar
17 | realtime:
18 | type: multistart
19 | realtime:
20 | grace: 3
21 | analyses:
22 | - type: multistart_eao_score
23 | name: eaoscore
24 | low: 115
25 | high: 755
26 | - type: multistart_eao_curve
27 | name: eaocurve
28 | high: 755
29 | - type: multistart_average_ar
30 | name: ar
31 | unsupervised:
32 | type: unsupervised
33 | repetitions: 1
34 | analyses:
35 | - type: average_accuracy
36 | name: accuracy
37 | burnin: 1
--------------------------------------------------------------------------------
/vot/stack/vot2022/shorttermbox.yaml:
--------------------------------------------------------------------------------
1 | title: VOT-ST2022 bounding-box challenge
2 | dataset: https://data.votchallenge.net/vot2022/stb/description.json
3 | url: https://www.votchallenge.net/vot2022/
4 | experiments:
5 | baseline:
6 | type: multistart
7 | analyses:
8 | - type: multistart_eao_score
9 | name: eaoscore
10 | low: 115
11 | high: 755
12 | - type: multistart_eao_curve
13 | name: eaocurve
14 | high: 755
15 | - type: multistart_average_ar
16 | name: ar
17 | realtime:
18 | type: multistart
19 | realtime:
20 | grace: 3
21 | analyses:
22 | - type: multistart_eao_score
23 | name: eaoscore
24 | low: 115
25 | high: 755
26 | - type: multistart_eao_curve
27 | name: eaocurve
28 | high: 755
29 | - type: multistart_average_ar
30 | name: ar
31 | unsupervised:
32 | type: unsupervised
33 | repetitions: 1
34 | analyses:
35 | - type: average_accuracy
36 | name: accuracy
37 | burnin: 1
--------------------------------------------------------------------------------
/vot/stack/vots2023.yaml:
--------------------------------------------------------------------------------
1 | title: VOTS2023 Challenge Stack
2 | dataset: https://data.votchallenge.net/vots2023/dataset/description.json
3 | experiments:
4 | baseline:
5 | type: unsupervised
6 | repetitions: 1
7 | multiobject: True
--------------------------------------------------------------------------------
/vot/stack/vots2024/main.yaml:
--------------------------------------------------------------------------------
1 | title: VOTS2024 Challenge Stack
2 | dataset: https://data.votchallenge.net/vots2023/dataset/description.json
3 | experiments:
4 | baseline:
5 | type: unsupervised
6 | repetitions: 1
7 | multiobject: True
--------------------------------------------------------------------------------
/vot/stack/vots2024/votst.yaml:
--------------------------------------------------------------------------------
1 | title: VOTS2024 ST Challenge Stack
2 | dataset: https://data.votchallenge.net/vots2024/vost/description.json
3 | experiments:
4 | baseline:
5 | type: unsupervised
6 | repetitions: 1
7 | multiobject: True
--------------------------------------------------------------------------------
/vot/stack/vots2024/votstval.yaml:
--------------------------------------------------------------------------------
1 | title: VOTS2024 ST Challenge Validation Stack
2 | dataset: https://data.votchallenge.net/vots2024/vost-val/description.json
3 | experiments:
4 | baseline:
5 | type: unsupervised
6 | repetitions: 1
7 | multiobject: True
8 | analyses:
9 | - type: average_accuracy
10 | name: Quality
11 | burnin: 0
12 | ignore_unknown: False
13 | weighted: False
14 | filter_tag: evaluation
15 | - type: average_success_plot
16 | name: Quality plot
17 | burnin: 0
18 | ignore_unknown: False
19 | filter_tag: evaluation
20 | - type: longterm_ar
21 | name: AR
22 | filter_tag: evaluation
23 | - type: average_quality_auxiliary
24 | name: Auxiliary
25 | filter_tag: evaluation
--------------------------------------------------------------------------------
/vot/stack/vots2025/main.yaml:
--------------------------------------------------------------------------------
1 | title: VOTS2025 Challenge Stack
2 | dataset: https://data.votchallenge.net/vots2023/dataset/description.json
3 | experiments:
4 | baseline:
5 | type: unsupervised
6 | repetitions: 1
7 | multiobject: True
8 |
--------------------------------------------------------------------------------
/vot/stack/vots2025/realtime.yaml:
--------------------------------------------------------------------------------
1 | title: VOTS2025-RT Challenge Stack
2 | dataset: https://data.votchallenge.net/vots2023/dataset/description.json
3 | experiments:
4 | baseline:
5 | type: unsupervised
6 | repetitions: 1
7 | multiobject: True
8 | realtime:
9 | type: unsupervised
10 | repetitions: 1
11 | multiobject: True
12 | realtime:
13 | grace: 3
14 |
--------------------------------------------------------------------------------
/vot/stack/vots2025/votst.yaml:
--------------------------------------------------------------------------------
1 | title: VOTST2025 Challenge Stack
2 | dataset: https://data.votchallenge.net/vots2024/vost-val/description.json
3 | experiments:
4 | baseline:
5 | type: unsupervised
6 | repetitions: 1
7 | multiobject: True
8 |
--------------------------------------------------------------------------------
/vot/tracker/dummy.py:
--------------------------------------------------------------------------------
1 | """ Dummy tracker for testing purposes. """
2 |
3 | from __future__ import absolute_import
4 | import os
5 | from sys import path
6 | import time
7 |
8 | def _main():
9 | """ Dummy tracker main function for testing purposes."""
10 | from trax import Image, Region, Server, TraxStatus
11 |
12 | objects = None
13 | with Server([Region.RECTANGLE], [Image.PATH]) as server:
14 | while True:
15 | request = server.wait()
16 | if request.type in [TraxStatus.QUIT, TraxStatus.ERROR]:
17 | break
18 | if request.type == TraxStatus.INITIALIZE:
19 | objects = request.objects
20 | server.status(objects)
21 | time.sleep(0.1)
22 |
23 | if __name__ == '__main__':
24 | _main()
25 | else:
26 | from . import Tracker
27 |
28 | DummyTracker = Tracker("dummy", __file__, "vot.tracker.dummy", "traxpython", paths=[])
29 |
30 |
--------------------------------------------------------------------------------
/vot/tracker/results.py:
--------------------------------------------------------------------------------
1 |
2 | """Results module for storing and retrieving tracker results."""
3 |
4 | import os
5 | import fnmatch
6 | from typing import List
7 | from copy import copy
8 | from vot.region import Region, RegionType, Special, calculate_overlap
9 | from vot.region.io import write_trajectory, read_trajectory
10 | from vot.utilities import to_string
11 |
12 | class Results(object):
13 | """Generic results interface for storing and retrieving results."""
14 |
15 | def __init__(self, storage: "Storage"):
16 | """Creates a new results interface.
17 |
18 | Args:
19 | storage (Storage): Storage interface
20 | """
21 | self._storage = storage
22 |
23 | def exists(self, name):
24 | """Returns true if the given file exists in the results storage.
25 |
26 | Args:
27 | name (str): File name
28 |
29 | Returns:
30 | bool: True if the file exists
31 | """
32 | return self._storage.isdocument(name)
33 |
34 | def read(self, name):
35 | """Returns a file handle for reading the given file from the results storage.
36 |
37 | Args:
38 | name (str): File name
39 |
40 | Returns:
41 | file: File handle
42 | """
43 | if name.endswith(".bin"):
44 | return self._storage.read(name, binary=True)
45 | return self._storage.read(name)
46 |
47 | def write(self, name: str):
48 | """Returns a file handle for writing the given file to the results storage.
49 |
50 | Args:
51 | name (str): File name
52 |
53 | Returns:
54 | file: File handle
55 | """
56 | if name.endswith(".bin"):
57 | return self._storage.write(name, binary=True)
58 | return self._storage.write(name)
59 |
60 | def find(self, pattern):
61 | """Returns a list of files matching the given pattern in the results storage.
62 |
63 | Args:
64 | pattern (str): Pattern
65 |
66 | Returns:
67 | list: List of files
68 | """
69 |
70 | return fnmatch.filter(self._storage.documents(), pattern)
71 |
72 | class Trajectory(object):
73 | """Trajectory class for storing and retrieving tracker trajectories."""
74 |
75 | UNKNOWN = 0
76 | INITIALIZATION = 1
77 | FAILURE = 2
78 |
79 | @classmethod
80 | def exists(cls, results: Results, name: str) -> bool:
81 | """Returns true if the trajectory exists in the results storage.
82 |
83 | Args:
84 | results (Results): Results storage
85 | name (str): Trajectory name (without extension)
86 |
87 | Returns:
88 | bool: True if the trajectory exists
89 | """
90 | return results.exists(name + ".bin") or results.exists(name + ".txt")
91 |
92 | @classmethod
93 | def gather(cls, results: Results, name: str) -> list:
94 | """Returns a list of files that are part of the trajectory.
95 |
96 | Args:
97 | results (Results): Results storage
98 | name (str): Trajectory name (without extension)
99 |
100 | Returns:
101 | list: List of files
102 | """
103 |
104 | if results.exists(name + ".bin"):
105 | files = [name + ".bin"]
106 | elif results.exists(name + ".txt"):
107 | files = [name + ".txt"]
108 | else:
109 | return []
110 |
111 | for propertyfile in results.find(name + "_*.value"):
112 | files.append(propertyfile)
113 |
114 | return files
115 |
116 | @classmethod
117 | def read(cls, results: Results, name: str) -> 'Trajectory':
118 | """Reads a trajectory from the results storage.
119 |
120 | Args:
121 | results (Results): Results storage
122 | name (str): Trajectory name (without extension)
123 |
124 | Returns:
125 | Trajectory: Trajectory
126 | """
127 |
128 | def parse_float(line):
129 | """Parses a float from a line.
130 |
131 | Args:
132 | line (str): Line
133 |
134 | Returns:
135 | float: Float value
136 | """
137 | if not line.strip():
138 | return None
139 | return float(line.strip())
140 |
141 | if results.exists(name + ".txt"):
142 | with results.read(name + ".txt") as fp:
143 | regions = read_trajectory(fp)
144 | elif results.exists(name + ".bin"):
145 | with results.read(name + ".bin") as fp:
146 | regions = read_trajectory(fp)
147 | else:
148 | raise FileNotFoundError("Trajectory data not found: {}".format(name))
149 |
150 | trajectory = Trajectory(len(regions))
151 | trajectory._regions = regions
152 |
153 | for propertyfile in results.find(name + "*.value"):
154 | with results.read(propertyfile) as filehandle:
155 | propertyname = os.path.splitext(os.path.basename(propertyfile))[0][len(name)+1:]
156 | lines = list(filehandle.readlines())
157 | try:
158 | trajectory._properties[propertyname] = [parse_float(line) for line in lines]
159 | except ValueError:
160 | trajectory._properties[propertyname] = [line.strip() for line in lines]
161 |
162 | return trajectory
163 |
164 | def __init__(self, length: int):
165 | """Creates a new trajectory of the given length.
166 |
167 | Args:
168 | length (int): Trajectory length
169 | """
170 | self._regions = [Special(Trajectory.UNKNOWN)] * length
171 | self._properties = dict()
172 |
173 | def set(self, frame: int, region: Region, properties: dict = None):
174 | """Sets the region for the given frame.
175 |
176 | Args:
177 | frame (int): Frame index
178 | region (Region): Region
179 | properties (dict, optional): Frame properties. Defaults to None.
180 |
181 | Raises:
182 | IndexError: Frame index out of bounds
183 | """
184 | if frame < 0 or frame >= len(self._regions):
185 | raise IndexError("Frame index out of bounds")
186 |
187 | self._regions[frame] = region
188 |
189 | if properties is None:
190 | properties = dict()
191 |
192 | for k, v in properties.items():
193 | if not k in self._properties:
194 | self._properties[k] = [None] * len(self._regions)
195 | self._properties[k][frame] = v
196 |
197 | def region(self, frame: int) -> Region:
198 | """Returns the region for the given frame.
199 |
200 | Args:
201 | frame (int): Frame index
202 |
203 | Raises:
204 | IndexError: Frame index out of bounds
205 |
206 | Returns:
207 | Region: Region
208 | """
209 | if frame < 0 or frame >= len(self._regions):
210 | raise IndexError("Frame index out of bounds")
211 | return self._regions[frame]
212 |
213 | def regions(self) -> List[Region]:
214 | """ Returns the list of regions.
215 |
216 | Returns:
217 | List[Region]: List of regions
218 | """
219 | return copy(self._regions)
220 |
221 | def properties(self, frame: int = None) -> dict:
222 | """Returns the properties for the given frame or all properties if frame is None.
223 |
224 | Args:
225 | frame (int, optional): Frame index. Defaults to None.
226 |
227 | Raises:
228 | IndexError: Frame index out of bounds
229 |
230 | Returns:
231 | dict: Properties
232 | """
233 |
234 | if frame is None:
235 | return tuple(self._properties.keys())
236 |
237 | if frame < 0 or frame >= len(self._regions):
238 | raise IndexError("Frame index out of bounds")
239 |
240 | return {k : v[frame] for k, v in self._properties.items() if not v[frame] is None}
241 |
242 | def __len__(self):
243 | """Returns the length of the trajectory.
244 |
245 | Returns:
246 | int: Length
247 | """
248 | return len(self._regions)
249 |
250 | def __iter__(self):
251 | """Returns an iterator over the regions.
252 |
253 | Returns:
254 | Iterator: Iterator
255 | """
256 | return iter(self._regions)
257 |
258 | def write(self, results: Results, name: str):
259 | """Writes the trajectory to the results storage.
260 |
261 | Args:
262 | results (Results): Results storage
263 | name (str): Trajectory name (without extension)
264 | """
265 | from vot import config
266 |
267 | if config.results_binary:
268 | with results.write(name + ".bin") as fp:
269 | write_trajectory(fp, self._regions)
270 | else:
271 | with results.write(name + ".txt") as fp:
272 | # write_trajectory_file(fp, self._regions)
273 | write_trajectory(fp, self._regions)
274 |
275 | for k, v in self._properties.items():
276 | with results.write(name + "_" + k + ".value") as fp:
277 | fp.writelines([to_string(e) + "\n" for e in v])
278 |
279 |
280 | def equals(self, trajectory: 'Trajectory', check_properties: bool = False, overlap_threshold: float = 0.99999):
281 | """Returns true if the trajectories are equal.
282 |
283 | Args:
284 | trajectory (Trajectory): _description_
285 | check_properties (bool, optional): _description_. Defaults to False.
286 | overlap_threshold (float, optional): _description_. Defaults to 0.99999.
287 |
288 | Returns:
289 | _type_: _description_
290 | """
291 | if not len(self) == len(trajectory):
292 | return False
293 |
294 | for r1, r2 in zip(self.regions(), trajectory.regions()):
295 | if calculate_overlap(r1, r2) < overlap_threshold and not (r1.type == RegionType.SPECIAL and r2.type == RegionType.SPECIAL):
296 | return False
297 |
298 | if check_properties:
299 | if not set(self._properties.keys()) == set(trajectory._properties.keys()):
300 | return False
301 | for name, _ in self._properties.items():
302 | for p1, p2 in zip(self._properties[name], trajectory._properties[name]):
303 | if not p1 == p2 and not (p1 is None and p2 is None):
304 | return False
305 | return True
306 |
--------------------------------------------------------------------------------
/vot/tracker/tests.py:
--------------------------------------------------------------------------------
1 | """ Unit tests for the tracker module. """
2 |
3 | import unittest
4 |
5 | from ..dataset.dummy import generate_dummy
6 | from ..tracker.dummy import DummyTracker
7 |
8 | class TestStacks(unittest.TestCase):
9 | """Tests for the stacks module."""
10 |
11 | def test_tracker_test(self):
12 | """Test tracker runtime with dummy sequence and dummy tracker."""
13 |
14 | tracker = DummyTracker
15 | sequence = generate_dummy(10)
16 |
17 | with tracker.runtime(log=False) as runtime:
18 | runtime.initialize(sequence.frame(0), sequence.groundtruth(0))
19 | for i in range(1, len(sequence)):
20 | runtime.update(sequence.frame(i))
21 |
--------------------------------------------------------------------------------
/vot/utilities/data.py:
--------------------------------------------------------------------------------
1 | """ Data structures for storing data in a grid."""
2 |
3 | import functools
4 | import unittest
5 |
6 | class Grid(object):
7 | """ A grid is a multidimensional array with named dimensions. """
8 |
9 | @staticmethod
10 | def scalar(obj):
11 | """ Creates a grid with a single cell containing the given object.
12 |
13 | Args:
14 | obj (object): The object to store in the grid.
15 | """
16 | grid = Grid(1,1)
17 | grid[0, 0] = obj
18 | return grid
19 |
20 | def __init__(self, *size):
21 | """ Creates a grid with the given dimensions.
22 |
23 | Args:
24 | size (int): The size of each dimension.
25 | """
26 | assert len(size) > 0
27 | self._size = size
28 | self._data = [None] * functools.reduce(lambda x, y: x * y, size)
29 |
30 | def _ravel(self, pos):
31 | """ Converts a multidimensional index to a single index.
32 |
33 | Args:
34 | pos (tuple): The multidimensional index.
35 |
36 | Returns:
37 | int: The single index.
38 | """
39 | if not isinstance(pos, tuple):
40 | pos = (pos, )
41 | assert len(pos) == len(self._size)
42 |
43 | raveled = 0
44 | row = 1
45 | for n, i in zip(reversed(self._size), reversed(pos)):
46 | if i < 0 or i >= n:
47 | raise IndexError("Index out of bounds")
48 | raveled = i * row + raveled
49 | row = row * n
50 | return raveled
51 |
52 | def _unravel(self, index):
53 | """ Converts a single index to a multidimensional index.
54 |
55 | Args:
56 | index (int): The single index.
57 |
58 | Returns:
59 | tuple: The multidimensional index.
60 | """
61 | unraveled = []
62 | for n in reversed(self._size):
63 | unraveled.append(index % n)
64 | index = index // n
65 | return tuple(reversed(unraveled))
66 |
67 | def __str__(self):
68 | """ Returns a string representation of the grid."""
69 | return str(self._data)
70 |
71 | @property
72 | def dimensions(self):
73 | """ Returns the number of dimensions of the grid. """
74 | return len(self._size)
75 |
76 | def size(self, i: int = None):
77 | """ Returns the size of the grid or the size of a specific dimension.
78 |
79 | Args:
80 | i (int): The dimension to query. If None, the size of the grid is returned.
81 |
82 | Returns:
83 | int: The size of the grid or the size of the given dimension.
84 | """
85 | if i is None:
86 | return tuple(self._size)
87 | assert i >= 0 and i < len(self._size)
88 | return self._size[i]
89 |
90 | def __len__(self):
91 | """ Returns the number of elements in the grid. """
92 | return len(self._data)
93 |
94 | def __getitem__(self, i):
95 | """ Returns the element at the given index.
96 |
97 | Args:
98 | i (tuple): The index of the element. If the grid is one-dimensional, the index can be an integer.
99 |
100 | Returns:
101 | object: The element at the given index.
102 | """
103 | return self._data[self._ravel(i)]
104 |
105 | def __setitem__(self, i, data):
106 | """ Sets the element at the given index.
107 |
108 | Args:
109 | i (tuple): The index of the element. If the grid is one-dimensional, the index can be an integer.
110 | data (object): The data to store at the given index.
111 | """
112 | self._data[self._ravel(i)] = data
113 |
114 | def __iter__(self):
115 | """ Returns an iterator over the elements of the grid. """
116 | return iter(self._data)
117 |
118 | def cell(self, *i):
119 | """ Returns the element at the given index packed in a scalar grid.
120 |
121 | Args:
122 | i (int): The index of the element. If the grid is one-dimensional, the index can be an integer.
123 |
124 | Returns:
125 | object: The element at the given index packed in a scalar grid.
126 | """
127 | return Grid.scalar(self[i])
128 |
129 | def column(self, i):
130 | """ Returns the column at the given index.
131 |
132 | Args:
133 | i (int): The index of the column.
134 |
135 | Returns:
136 | Grid: The column at the given index.
137 | """
138 | assert self.dimensions == 2
139 | column = Grid(1, self.size()[0])
140 | for j in range(self.size()[0]):
141 | column[0, j] = self[j, i]
142 | return column
143 |
144 | def row(self, i):
145 | """ Returns the row at the given index.
146 |
147 | Args:
148 | i (int): The index of the row.
149 |
150 | Returns:
151 | Grid: The row at the given index.
152 | """
153 | assert self.dimensions == 2
154 | row = Grid(self.size()[1], 1)
155 | for j in range(self.size()[1]):
156 | row[j, 0] = self[i, j]
157 | return row
158 |
159 | def foreach(self, cb) -> "Grid":
160 | """ Applies a function to each element of the grid.
161 |
162 | Args:
163 | cb (function): The function to apply to each element. The first argument is the element, the following
164 | arguments are the indices of the element.
165 |
166 | Returns:
167 | Grid: A grid containing the results of the function.
168 | """
169 | result = Grid(*self._size)
170 |
171 | for i, x in enumerate(self._data):
172 | a = self._unravel(i)
173 | result[a] = cb(x, *a)
174 |
175 | return result
176 |
177 | class TestGrid(unittest.TestCase):
178 | """ Unit tests for the Grid class. """
179 |
180 | def test_foreach1(self):
181 | """ Tests the foreach method. """
182 |
183 | a = Grid(5, 3)
184 |
185 | b = a.foreach(lambda x, i, j: 5)
186 |
187 | self.assertTrue(all([x == 5 for x in b]), "Output incorrect")
188 |
189 | def test_foreach2(self):
190 | """ Tests the foreach method. """
191 |
192 | a = Grid(5, 6, 3)
193 |
194 | b = a.foreach(lambda x, i, j, k: k)
195 |
196 | reference = [x % 3 for x in range(len(a))]
197 |
198 | self.assertListEqual(list(b), reference)
199 |
--------------------------------------------------------------------------------
/vot/utilities/io.py:
--------------------------------------------------------------------------------
1 |
2 | import json
3 | import yaml
4 | import collections
5 | import datetime
6 | import numpy as np
7 |
8 | from vot.utilities.data import Grid
9 |
10 | class JSONEncoder(json.JSONEncoder):
11 | """ JSON encoder for internal types. """
12 |
13 | def default(self, o):
14 | """ Default encoder. """
15 | if isinstance(o, Grid):
16 | return list(o)
17 | elif isinstance(o, datetime.date):
18 | return o.strftime('%Y/%m/%d')
19 | elif isinstance(o, np.ndarray):
20 | return o.tolist()
21 | else:
22 | return super().default(o)
23 |
24 | class YAMLEncoder(yaml.Dumper):
25 | """ YAML encoder for internal types."""
26 |
27 | def represent_tuple(self, data):
28 | """ Represents a tuple. """
29 | return self.represent_list(list(data))
30 |
31 |
32 | def represent_object(self, o):
33 | """ Represents an object. """
34 | if isinstance(o, Grid):
35 | return self.represent_list(list(o))
36 | elif isinstance(o, datetime.date):
37 | return o.strftime('%Y/%m/%d')
38 | elif isinstance(o, np.ndarray):
39 | return self.represent_list(o.tolist())
40 | else:
41 | return super().represent_object(o)
42 |
43 | YAMLEncoder.add_representer(collections.OrderedDict, YAMLEncoder.represent_dict)
44 | YAMLEncoder.add_representer(tuple, YAMLEncoder.represent_tuple)
45 | YAMLEncoder.add_representer(Grid, YAMLEncoder.represent_object)
46 | YAMLEncoder.add_representer(np.ndarray,YAMLEncoder.represent_object)
47 | YAMLEncoder.add_multi_representer(np.integer, YAMLEncoder.represent_int)
48 | YAMLEncoder.add_multi_representer(np.inexact, YAMLEncoder.represent_float)
--------------------------------------------------------------------------------
/vot/utilities/migration.py:
--------------------------------------------------------------------------------
1 | """ Migration utilities for old workspaces (legacy Matlab toolkit)"""
2 |
3 | import os
4 | import re
5 | import logging
6 |
7 | import yaml
8 | import numpy as np
9 |
10 | from vot.tracker import is_valid_identifier
11 | from vot.stack import resolve_stack
12 | from vot.workspace import WorkspaceException
13 |
14 | def migrate_matlab_workspace(directory: str):
15 | """ Migrates a legacy matlab workspace to the new format.
16 |
17 | Args:
18 | directory (str): The directory of the workspace.
19 |
20 | Raises:
21 | WorkspaceException: If the workspace is already initialized.
22 | WorkspaceException: If the workspace is not a legacy workspace.
23 | """
24 |
25 | logger = logging.getLogger("vot")
26 |
27 | logger.info("Attempting to migrate workspace in %s", directory)
28 |
29 | def scan_text(pattern, content, default=None):
30 | """ Scans the text for a pattern and returns the first match.
31 |
32 | Args:
33 | pattern (str): The pattern to search for.
34 | content (str): The content to search in.
35 | default (str): The default value if no match is found.
36 |
37 | Returns:
38 | str: The first match or the default value.
39 | """
40 | matches = re.findall(pattern, content)
41 | if not len(matches) == 1:
42 | return default
43 | return matches[0]
44 |
45 | config_file = os.path.join(directory, "config.yaml")
46 | if os.path.isfile(config_file):
47 | raise WorkspaceException("Workspace already initialized")
48 |
49 | old_config_file = os.path.join(directory, "configuration.m")
50 | if not os.path.isfile(old_config_file):
51 | raise WorkspaceException("Old workspace config not detected")
52 |
53 | with open(old_config_file, "r") as fp:
54 | content = fp.read()
55 | stack = scan_text("set\\_global\\_variable\\('stack', '([A-Za-z0-9-_]+)'\\)", content)
56 | if stack is None:
57 | raise WorkspaceException("Experiment stack could not be retrieved")
58 |
59 | tracker_ids = list()
60 |
61 | for tracker_dir in [x for x in os.scandir(os.path.join(directory, "results")) if x.is_dir()]:
62 | if not is_valid_identifier(tracker_dir.name):
63 | logger.info("Results directory %s is not a valid identifier, skipping.", tracker_dir.name)
64 | continue
65 | logger.debug("Scanning results for %s", tracker_dir.name)
66 | tracker_ids.append(tracker_dir.name)
67 | for experiment_dir in [x for x in os.scandir(tracker_dir.path) if x.is_dir()]:
68 | for sequence_dir in [x for x in os.scandir(experiment_dir.path) if x.is_dir()]:
69 | timing_file = os.path.join(sequence_dir.path, "{}_time.txt".format(sequence_dir.name))
70 | if os.path.isfile(timing_file):
71 | logger.debug("Migrating %s", timing_file)
72 | times = np.genfromtxt(timing_file, delimiter=",")
73 | if len(times.shape) == 1:
74 | times = np.reshape(times, (times.shape[0], 1))
75 | for k in range(times.shape[1]):
76 | if np.all(times[:, k] == 0):
77 | break
78 | np.savetxt(os.path.join(sequence_dir.path, \
79 | "%s_%03d_time.value" % (sequence_dir.name, k+1)), \
80 | times[:, k] / 1000, fmt='%.6e')
81 | os.unlink(timing_file)
82 |
83 | trackers = dict()
84 |
85 | for tid in tracker_ids:
86 | old_description = os.path.join(directory, "tracker_{}.m".format(tid))
87 | label = tid
88 | if os.path.isfile(old_description):
89 | with open(old_description, "r") as fp:
90 | content = fp.read()
91 | label = scan_text("tracker\\_label *= * ['\"](.*)['\"]", content, tid)
92 | trackers[tid] = dict(label=label, protocol="unknown", command="")
93 |
94 | if trackers:
95 | with open(os.path.join(directory, "trackers.ini"), "w") as fp:
96 | for tid, tdata in trackers.items():
97 | fp.write("[" + tid + "]\n")
98 | for k, v in tdata.items():
99 | fp.write(k + " = " + v + "\n")
100 | fp.write("\n\n")
101 |
102 | if resolve_stack(stack) is None:
103 | logger.warning("Stack %s not found, you will have to manually edit and correct config file.", stack)
104 |
105 | with open(config_file, 'w') as fp:
106 | yaml.dump(dict(stack=stack, registry=["."]), fp)
107 |
108 | #os.unlink(old_config_file)
109 |
110 | logger.info("Workspace %s migrated", directory)
--------------------------------------------------------------------------------
/vot/utilities/net.py:
--------------------------------------------------------------------------------
1 | """ Network utilities for the toolkit. """
2 |
3 | import os
4 | import re
5 | import shutil
6 | import tempfile
7 | from urllib.parse import urlparse, urljoin
8 |
9 | import requests
10 |
11 | from vot import ToolkitException, get_logger
12 |
13 | class NetworkException(ToolkitException):
14 | """ Exception raised when a network error occurs. """
15 | pass
16 |
17 | def get_base_url(url):
18 | """ Returns the base url of a given url.
19 |
20 | Args:
21 | url (str): The url to parse.
22 |
23 | Returns:
24 | str: The base url."""
25 | return url.rsplit('/', 1)[0]
26 |
27 | def is_absolute_url(url):
28 | """ Returns True if the given url is absolute.
29 |
30 | Args:
31 | url (str): The url to parse.
32 |
33 | Returns:
34 | bool: True if the url is absolute, False otherwise.
35 | """
36 |
37 | return bool(urlparse(url).netloc)
38 |
39 | def join_url(url_base, url_path):
40 | """ Joins a base url with a path.
41 |
42 | Args:
43 | url_base (str): The base url.
44 | url_path (str): The path to join.
45 |
46 | Returns:
47 | str: The joined url.
48 | """
49 | if is_absolute_url(url_path):
50 | return url_path
51 | return urljoin(url_base, url_path)
52 |
53 | def get_url_from_gdrive_confirmation(contents):
54 | """ Returns the url of a google drive file from the confirmation page.
55 |
56 | Args:
57 | contents (str): The contents of the confirmation page.
58 |
59 | Returns:
60 | str: The url of the file.
61 | """
62 | url = ''
63 | for line in contents.splitlines():
64 | m = re.search(r'href="(\/uc\?export=download[^"]+)', line)
65 | if m:
66 | url = 'https://docs.google.com' + m.groups()[0]
67 | url = url.replace('&', '&')
68 | return url
69 | m = re.search('confirm=([^;&]+)', line)
70 | if m:
71 | confirm = m.groups()[0]
72 | url = re.sub(r'confirm=([^;&]+)', r'confirm='+confirm, url)
73 | return url
74 | m = re.search(r'"downloadUrl":"([^"]+)', line)
75 | if m:
76 | url = m.groups()[0]
77 | url = url.replace('\\u003d', '=')
78 | url = url.replace('\\u0026', '&')
79 | return url
80 |
81 |
82 | def is_google_drive_url(url):
83 | """ Returns True if the given url is a google drive url.
84 |
85 | Args:
86 | url (str): The url to parse.
87 |
88 | Returns:
89 | bool: True if the url is a google drive url, False otherwise.
90 | """
91 | m = re.match(r'^https?://drive.google.com/uc\?id=.*$', url)
92 | return m is not None
93 |
94 | def download_json(url):
95 | """ Downloads a JSON file from the given url.
96 |
97 | Args:
98 | url (str): The url to parse.
99 |
100 | Returns:
101 | dict: The JSON content.
102 | """
103 | try:
104 | return requests.get(url).json()
105 | except requests.exceptions.RequestException as e:
106 | raise NetworkException("Unable to read JSON file {}".format(e))
107 |
108 |
109 | def download(url, output, callback=None, chunk_size=1024*32, retry=10):
110 | """ Downloads a file from the given url. Supports google drive urls.
111 | callback for progress report, automatically resumes download if connection is closed.
112 |
113 | Args:
114 | url (str): The url to parse.
115 | output (str): The output file path or file handle.
116 | callback (function): The callback function for progress report.
117 | chunk_size (int): The chunk size for download.
118 | retry (int): The number of retries.
119 |
120 | Raises:
121 | NetworkException: If the file is not available.
122 | """
123 |
124 | logger = get_logger()
125 |
126 | with requests.session() as sess:
127 |
128 | is_gdrive = is_google_drive_url(url)
129 |
130 | while True:
131 | res = sess.get(url, stream=True)
132 |
133 | if not res.status_code == 200:
134 | raise NetworkException("File not available")
135 |
136 | if 'Content-Disposition' in res.headers:
137 | # This is the file
138 | break
139 | if not is_gdrive:
140 | break
141 |
142 | # Need to redirect with confiramtion
143 | gurl = get_url_from_gdrive_confirmation(res.text)
144 |
145 | if gurl is None:
146 | raise NetworkException("Permission denied for {}".format(gurl))
147 | url = gurl
148 |
149 |
150 | if output is None:
151 | if is_gdrive:
152 | m = re.search('filename="(.*)"',
153 | res.headers['Content-Disposition'])
154 | output = m.groups()[0]
155 | else:
156 | output = os.path.basename(url)
157 |
158 | output_is_path = isinstance(output, str)
159 |
160 | if output_is_path:
161 | tmp_file = tempfile.mktemp()
162 | filehandle = open(tmp_file, 'wb')
163 | else:
164 | tmp_file = None
165 | filehandle = output
166 |
167 | position = 0
168 | progress = False
169 |
170 | try:
171 | total = res.headers.get('Content-Length')
172 | if total is not None:
173 | total = int(total)
174 | while True:
175 | try:
176 | for chunk in res.iter_content(chunk_size=chunk_size):
177 | filehandle.write(chunk)
178 | position += len(chunk)
179 | progress = True
180 | if callback:
181 | callback(position, total)
182 |
183 | if position < total:
184 | raise requests.exceptions.RequestException("Connection closed")
185 |
186 | if tmp_file:
187 | filehandle.close()
188 | shutil.copy(tmp_file, output)
189 | break
190 |
191 | except requests.exceptions.RequestException as e:
192 | if not progress:
193 | logger.warning("Error when downloading file, retrying")
194 | retry-=1
195 | if retry < 1:
196 | raise NetworkException("Unable to download file {}".format(e))
197 | res = sess.get(url, stream=True)
198 | filehandle.seek(0)
199 | position = 0
200 | else:
201 | logger.warning("Error when downloading file, trying to resume download")
202 | res = sess.get(url, stream=True, headers=({'Range': f'bytes={position}-'} if position > 0 else None))
203 | progress = False
204 |
205 | if position < total:
206 | raise NetworkException("Unable to download file")
207 |
208 | except IOError as e:
209 | raise NetworkException("Local I/O Error when downloading file: %s" % e)
210 | finally:
211 | try:
212 | if tmp_file:
213 | os.remove(tmp_file)
214 | except OSError:
215 | pass
216 |
217 | return output
218 |
219 |
220 | def download_uncompress(url, path):
221 | """ Downloads a file from the given url and uncompress it to the given path.
222 |
223 | Args:
224 | url (str): The url to parse.
225 | path (str): The path to uncompress the file.
226 |
227 | Raises:
228 | NetworkException: If the file is not available.
229 | """
230 | from vot.utilities import extract_files
231 | _, ext = os.path.splitext(urlparse(url).path)
232 | tmp_file = tempfile.mktemp(suffix=ext)
233 | try:
234 | download(url, tmp_file)
235 | extract_files(tmp_file, path)
236 | finally:
237 | if os.path.exists(tmp_file):
238 | os.unlink(tmp_file)
239 |
--------------------------------------------------------------------------------
/vot/utilities/notebook.py:
--------------------------------------------------------------------------------
1 | """ This module contains functions for visualization in Jupyter notebooks. """
2 |
3 | import os
4 | import io
5 | from threading import Thread, Condition
6 |
7 | def is_notebook():
8 | """ Returns True if the current environment is a Jupyter notebook.
9 |
10 | Returns:
11 | bool: True if the current environment is a Jupyter notebook.
12 | """
13 | try:
14 | from IPython import get_ipython
15 | if get_ipython() is None:
16 | raise ImportError("console")
17 | if 'IPKernelApp' not in get_ipython().config: # pragma: no cover
18 | raise ImportError("console")
19 | if 'VSCODE_PID' in os.environ: # pragma: no cover
20 | raise ImportError("vscode")
21 | except ImportError:
22 | return False
23 | else:
24 | return True
25 |
26 | if is_notebook():
27 |
28 | from IPython.display import display
29 | from ipywidgets import widgets
30 | from vot.utilities.draw import ImageDrawHandle
31 |
32 | class SequenceView(object):
33 | """ A widget for visualizing a sequence. """
34 |
35 | def __init__(self):
36 | """ Initializes a new instance of the SequenceView class.
37 |
38 | Args:
39 | sequence (Sequence): The sequence to visualize.
40 | """
41 |
42 | self._handle = ImageDrawHandle(sequence.frame(0).image())
43 |
44 | self._button_restart = widgets.Button(description='Restart')
45 | self._button_next = widgets.Button(description='Next')
46 | self._button_play = widgets.Button(description='Run')
47 | self._frame = widgets.Label(value="")
48 | self._frame.layout.display = "none"
49 | self._frame_feedback = widgets.Label(value="")
50 | self._image = widgets.Image(value="", format="png", width=sequence.size[0] * 2, height=sequence.size[1] * 2)
51 |
52 | state = dict(frame=0, auto=False, alive=True, region=None)
53 | condition = Condition()
54 |
55 | self._buttons = widgets.HBox(children=(frame, self._button_restart, self._button_next, button_play, frame2))
56 |
57 | def _push_image(handle):
58 | """ Pushes an image to the widget.
59 |
60 | Args:
61 | handle (ImageDrawHandle): The image handle.
62 | """
63 | with io.BytesIO() as output:
64 | handle.snapshot.save(output, format="PNG")
65 | return output.getvalue()
66 |
67 | def visualize_tracker(tracker: "Tracker", sequence: "Sequence"):
68 | """ Visualizes a tracker in a Jupyter notebook.
69 |
70 | Args:
71 | tracker (Tracker): The tracker to visualize.
72 | sequence (Sequence): The sequence to visualize.
73 | """
74 | from IPython.display import display
75 | from ipywidgets import widgets
76 | from vot.utilities.draw import ImageDrawHandle
77 |
78 | def encode_image(handle):
79 | """ Encodes an image so that it can be displayed in a Jupyter notebook.
80 |
81 | Args:
82 | handle (ImageDrawHandle): The image handle.
83 |
84 | Returns:
85 | bytes: The encoded image."""
86 | with io.BytesIO() as output:
87 | handle.snapshot.save(output, format="PNG")
88 | return output.getvalue()
89 |
90 | handle = ImageDrawHandle(sequence.frame(0).image())
91 |
92 | button_restart = widgets.Button(description='Restart')
93 | button_next = widgets.Button(description='Next')
94 | button_play = widgets.Button(description='Run')
95 | frame = widgets.Label(value="")
96 | frame.layout.display = "none"
97 | frame2 = widgets.Label(value="")
98 | image = widgets.Image(value=encode_image(handle), format="png", width=sequence.size[0] * 2, height=sequence.size[1] * 2)
99 |
100 | state = dict(frame=0, auto=False, alive=True, region=None)
101 | condition = Condition()
102 |
103 | buttons = widgets.HBox(children=(frame, button_restart, button_next, button_play, frame2))
104 |
105 | image.value = encode_image(handle)
106 |
107 | def run():
108 | """ Runs the tracker. """
109 |
110 | runtime = tracker.runtime()
111 |
112 | while state["alive"]:
113 |
114 | if state["frame"] == 0:
115 | state["region"], _, _ = runtime.initialize(sequence.frame(0), sequence.groundtruth(0))
116 | else:
117 | state["region"], _, _ = runtime.update(sequence.frame(state["frame"]))
118 |
119 | update_image()
120 |
121 | with condition:
122 | condition.wait()
123 |
124 | if state["frame"] == len(sequence):
125 | state["alive"] = False
126 | continue
127 |
128 | state["frame"] = state["frame"] + 1
129 |
130 |
131 | def update_image():
132 | """ Updates the image. """
133 | handle.image(sequence.frame(state["frame"]).image())
134 | handle.style(color="green").region(sequence.frame(state["frame"]).groundtruth())
135 | if state["region"]:
136 | handle.style(color="red").region(state["region"])
137 | image.value = encode_image(handle)
138 | frame.value = "Frame: " + str(state["frame"] - 1)
139 |
140 | def on_click(button):
141 | """ Handles a button click. """
142 | if button == button_next:
143 | with condition:
144 | state["auto"] = False
145 | condition.notify()
146 | if button == button_restart:
147 | with condition:
148 | state["frame"] = 0
149 | condition.notify()
150 | if button == button_play:
151 | with condition:
152 | state["auto"] = not state["auto"]
153 | button.description = "Stop" if state["auto"] else "Run"
154 | condition.notify()
155 |
156 | button_next.on_click(on_click)
157 | button_restart.on_click(on_click)
158 | button_play.on_click(on_click)
159 | widgets.jslink((frame, "value"), (frame2, "value"))
160 |
161 | def on_update(_):
162 | """ Handles a widget update."""
163 | with condition:
164 | if state["auto"]:
165 | condition.notify()
166 |
167 | frame2.observe(on_update, names=("value", ))
168 |
169 | thread = Thread(target=run)
170 | display(widgets.Box([widgets.VBox(children=(image, buttons))]))
171 | thread.start()
172 |
173 | def visualize_results(experiment: "Experiment", sequence: "Sequence"):
174 | """ Visualizes the results of an experiment in a Jupyter notebook.
175 |
176 | Args:
177 | experiment (Experiment): The experiment to visualize.
178 | sequence (Sequence): The sequence to visualize.
179 |
180 | """
181 |
182 | from IPython.display import display
183 | from ipywidgets import widgets
184 | from vot.utilities.draw import ImageDrawHandle
185 |
186 | def encode_image(handle):
187 | """ Encodes an image so that it can be displayed in a Jupyter notebook.
188 |
189 | Args:
190 | handle (ImageDrawHandle): The image handle.
191 |
192 | Returns:
193 | bytes: The encoded image.
194 | """
195 |
196 | with io.BytesIO() as output:
197 | handle.snapshot.save(output, format="PNG")
198 | return output.getvalue()
199 |
200 | handle = ImageDrawHandle(sequence.frame(0).image())
201 |
202 | button_restart = widgets.Button(description='Restart')
203 | button_next = widgets.Button(description='Next')
204 | button_play = widgets.Button(description='Run')
205 | frame = widgets.Label(value="")
206 | frame.layout.display = "none"
207 | frame2 = widgets.Label(value="")
208 | image = widgets.Image(value=encode_image(handle), format="png", width=sequence.size[0] * 2, height=sequence.size[1] * 2)
209 |
210 | state = dict(frame=0, auto=False, alive=True, region=None)
211 | condition = Condition()
212 |
213 | buttons = widgets.HBox(children=(frame, button_restart, button_next, button_play, frame2))
214 |
215 | image.value = encode_image(handle)
216 |
217 | def run():
218 | """ Runs the tracker. """
219 |
220 | runtime = tracker.runtime()
221 |
222 | while state["alive"]:
223 |
224 | if state["frame"] == 0:
225 | state["region"], _, _ = runtime.initialize(sequence.frame(0), sequence.groundtruth(0))
226 | else:
227 | state["region"], _, _ = runtime.update(sequence.frame(state["frame"]))
228 |
229 | update_image()
230 |
231 | with condition:
232 | condition.wait()
233 |
234 | if state["frame"] == len(sequence):
235 | state["alive"] = False
236 | continue
237 |
238 | state["frame"] = state["frame"] + 1
239 |
240 |
241 | def update_image():
242 | """ Updates the image. """
243 | handle.image(sequence.frame(state["frame"]).image())
244 | handle.style(color="green").region(sequence.frame(state["frame"]).groundtruth())
245 | if state["region"]:
246 | handle.style(color="red").region(state["region"])
247 | image.value = encode_image(handle)
248 | frame.value = "Frame: " + str(state["frame"] - 1)
249 |
250 | def on_click(button):
251 | """ Handles a button click. """
252 | if button == button_next:
253 | with condition:
254 | state["auto"] = False
255 | condition.notify()
256 | if button == button_restart:
257 | with condition:
258 | state["frame"] = 0
259 | condition.notify()
260 | if button == button_play:
261 | with condition:
262 | state["auto"] = not state["auto"]
263 | button.description = "Stop" if state["auto"] else "Run"
264 | condition.notify()
265 |
266 | button_next.on_click(on_click)
267 | button_restart.on_click(on_click)
268 | button_play.on_click(on_click)
269 | widgets.jslink((frame, "value"), (frame2, "value"))
270 |
271 | def on_update(_):
272 | """ Handles a widget update."""
273 | with condition:
274 | if state["auto"]:
275 | condition.notify()
276 |
277 | frame2.observe(on_update, names=("value", ))
278 |
279 | thread = Thread(target=run)
280 | display(widgets.Box([widgets.VBox(children=(image, buttons))]))
281 | thread.start()
--------------------------------------------------------------------------------
/vot/version.py:
--------------------------------------------------------------------------------
1 | """
2 | Toolkit version
3 | """
4 | __version__ = '0.7.3'
--------------------------------------------------------------------------------
/vot/workspace/__init__.py:
--------------------------------------------------------------------------------
1 | """This module contains the Workspace class that represents the main junction of trackers, datasets and experiments."""
2 |
3 | import os
4 | import typing
5 | import importlib
6 |
7 | import yaml
8 | from lazy_object_proxy import Proxy
9 |
10 | from attributee import Attribute, Attributee, Nested, List, String, CoerceContext
11 |
12 | from .. import ToolkitException, get_logger
13 | from ..dataset import Dataset, load_dataset
14 | from ..tracker import Registry, Tracker
15 | from ..stack import Stack, resolve_stack
16 | from ..utilities import normalize_path
17 | from ..report import ReportConfiguration
18 | from .storage import LocalStorage, Storage, NullStorage
19 |
20 | _logger = get_logger()
21 |
22 | class WorkspaceException(ToolkitException):
23 | """Errors related to workspace raise this exception
24 | """
25 | pass
26 |
27 | class StackLoader(Attribute):
28 | """Special attribute that converts a string or a dictionary input to a Stack object.
29 | """
30 |
31 | def coerce(self, value, context: typing.Optional[CoerceContext]):
32 | """Coerce a value to a Stack object
33 |
34 | Args:
35 | value (typing.Any): Value to coerce
36 | context (typing.Optional[CoerceContext]): Coercion context
37 |
38 | Returns:
39 | Stack: Coerced value
40 | """
41 | importlib.import_module("vot.analysis")
42 | importlib.import_module("vot.experiment")
43 | if isinstance(value, str):
44 |
45 | stack_file = resolve_stack(value, context.parent.directory)
46 |
47 | if stack_file is None:
48 | raise WorkspaceException("Experiment stack does not exist")
49 |
50 | stack = Stack.read(stack_file)
51 | stack._name = value
52 |
53 | return stack
54 | else:
55 | return Stack(**value)
56 |
57 | def dump(self, value: "Stack") -> str:
58 | """Dump a Stack object to a string or a dictionary
59 |
60 | Args:
61 | value (Stack): Value to dump
62 |
63 | Returns:
64 | str: Dumped value
65 | """
66 | if value.name is None:
67 | return value.dump()
68 | else:
69 | return value.name
70 |
71 | class RegistryLoader(Attribute):
72 | """Special attribute that converts a list of strings input to a Registry object. The paths are appended to
73 | the global registry search paths.
74 | """
75 |
76 | def coerce(self, value, context: typing.Optional[CoerceContext]):
77 |
78 | from vot import config, get_logger
79 |
80 | # Workspace registry paths are relative to the workspace directory
81 | paths = list(List(String(transformer=lambda x, ctx: normalize_path(x, ctx.parent.directory))).coerce(value, context))
82 |
83 | # Combine the paths with the global registry search paths (relative to the current directory)
84 | registry = Registry(paths + [normalize_path(x, os.curdir) for x in config.registry], root=context.parent.directory)
85 | registry._paths = paths
86 |
87 | get_logger().debug("Found data for %d trackers", len(registry))
88 |
89 | return registry
90 |
91 | def dump(self, value: "Registry") -> typing.List[str]:
92 | assert isinstance(value, Registry)
93 | return value._paths
94 |
95 | class Workspace(Attributee):
96 | """Workspace class represents the main junction of trackers, datasets and experiments. Each workspace performs
97 | given experiments on a provided dataset.
98 | """
99 |
100 | registry = RegistryLoader() # List(String(transformer=lambda x, ctx: normalize_path(x, ctx.parent.directory)))
101 | stack = StackLoader()
102 | sequences = String(default="sequences")
103 | report = Nested(ReportConfiguration)
104 |
105 | @staticmethod
106 | def exists(directory: str) -> bool:
107 | """Check if a workspace exists in a given directory.
108 |
109 | Args:
110 | directory (str): Directory to check
111 |
112 | Returns:
113 | bool: True if the workspace exists, False otherwise.
114 | """
115 | return os.path.isfile(os.path.join(directory, "config.yaml"))
116 |
117 | @staticmethod
118 | def initialize(directory: str, config: typing.Optional[typing.Dict] = None, download: bool = True) -> None:
119 | """Initialize a new workspace in a given directory with the given config
120 |
121 | Args:
122 | directory (str): Root for workspace storage
123 | config (typing.Optional[typing.Dict], optional): Workspace initial configuration. Defaults to None.
124 | download (bool, optional): Download the dataset immediately. Defaults to True.
125 |
126 | Raises:
127 | WorkspaceException: When a workspace cannot be created.
128 | """
129 |
130 | config_file = os.path.join(directory, "config.yaml")
131 | if Workspace.exists(directory):
132 | raise WorkspaceException("Workspace already initialized")
133 |
134 | os.makedirs(directory, exist_ok=True)
135 |
136 | with open(config_file, 'w') as fp:
137 | yaml.dump(config if config is not None else dict(), fp)
138 |
139 | os.makedirs(os.path.join(directory, "sequences"), exist_ok=True)
140 | os.makedirs(os.path.join(directory, "results"), exist_ok=True)
141 |
142 | if not os.path.isfile(os.path.join(directory, "trackers.ini")):
143 | open(os.path.join(directory, "trackers.ini"), 'w').close()
144 |
145 | if download:
146 | # Try do retrieve dataset from stack and download it
147 | stack_file = resolve_stack(config["stack"], directory)
148 | dataset_directory = normalize_path(config.get("sequences", "sequences"), directory)
149 | if stack_file is None:
150 | return
151 | dataset = None
152 | with open(stack_file, 'r') as fp:
153 | stack_metadata = yaml.load(fp, Loader=yaml.BaseLoader)
154 | dataset = stack_metadata["dataset"]
155 | if dataset:
156 | Workspace.download_dataset(dataset, dataset_directory)
157 |
158 | @staticmethod
159 | def download_dataset(dataset: str, directory: str) -> None:
160 | """Download the dataset if no dataset is present already.
161 |
162 | Args:
163 | dataset (str): Dataset URL or ID
164 | directory (str): Directory where the dataset is saved
165 |
166 | """
167 | if os.path.exists(os.path.join(directory, "list.txt")): #TODO: this has to be improved now that we also support other datasets that may not have list.txt
168 | return False
169 |
170 | from vot.dataset import download_dataset
171 | download_dataset(dataset, directory)
172 |
173 | _logger.info("Download completed")
174 |
175 | @staticmethod
176 | def load(directory):
177 | """Load a workspace from a given location. This
178 |
179 | Args:
180 | directory ([type]): [description]
181 |
182 | Raises:
183 | WorkspaceException: [description]
184 |
185 | Returns:
186 | [type]: [description]
187 | """
188 | directory = normalize_path(directory)
189 | config_file = os.path.join(directory, "config.yaml")
190 | if not os.path.isfile(config_file):
191 | raise WorkspaceException("Workspace not initialized")
192 |
193 | with open(config_file, 'r') as fp:
194 | config = yaml.load(fp, Loader=yaml.BaseLoader)
195 | return Workspace(directory, **config)
196 |
197 | def __init__(self, directory: str, **kwargs):
198 | """Do not call this constructor directly unless you know what you are doing,
199 | instead use the static Workspace.load method.
200 |
201 | Args:
202 | directory ([type]): [description]
203 | """
204 | self._directory = directory
205 |
206 | self._storage = Proxy(lambda: LocalStorage(directory) if directory is not None else NullStorage())
207 |
208 | super().__init__(**kwargs)
209 |
210 | dataset_directory = normalize_path(self.sequences, directory)
211 |
212 | if not self.stack.dataset is None:
213 | Workspace.download_dataset(self.stack.dataset, dataset_directory)
214 |
215 | self._dataset = load_dataset(dataset_directory)
216 |
217 | # Register storage with all experiments in the stack
218 | for experiment in self.stack.experiments.values():
219 | experiment._storage = self._storage
220 |
221 | @property
222 | def directory(self) -> str:
223 | """Returns the root directory for the workspace.
224 |
225 | Returns:
226 | str: The absolute path to the root of the workspace.
227 | """
228 | return self._directory
229 |
230 | @property
231 | def dataset(self) -> Dataset:
232 | """Returns dataset associated with the workspace
233 |
234 | Returns:
235 | Dataset: The dataset object.
236 | """
237 | return self._dataset
238 |
239 | @property
240 | def storage(self) -> Storage:
241 | """Returns the storage object associated with this workspace.
242 |
243 | Returns:
244 | Storage: The storage object.
245 | """
246 | return self._storage
247 |
248 | def list_results(self, registry: "Registry") -> typing.List["Tracker"]:
249 | """Utility method that looks for all subfolders in the results folder and tries to resolve them
250 | as tracker references. It returns a list of Tracker objects, i.e. trackers that have at least
251 | some results or an existing results directory.
252 |
253 | Returns:
254 | [typing.List[Tracker]]: A list of trackers with results.
255 | """
256 | references = self._storage.substorage("results").folders()
257 | return registry.resolve(*references)
258 |
--------------------------------------------------------------------------------
/vot/workspace/tests.py:
--------------------------------------------------------------------------------
1 | """Tests for workspace related methods and classes."""
2 |
3 | import logging
4 | import tempfile
5 | import unittest
6 | from vot import get_logger
7 | from vot.workspace.storage import Cache, LocalStorage
8 |
9 | from vot.workspace import Workspace, NullStorage
10 |
11 | class TestStacks(unittest.TestCase):
12 | """Tests for workspace related methods
13 | """
14 |
15 | def test_void_storage(self):
16 | """Test if void storage works
17 | """
18 |
19 | storage = NullStorage()
20 |
21 | with storage.write("test.data") as handle:
22 | handle.write("test")
23 |
24 | self.assertIsNone(storage.read("test.data"))
25 |
26 | def test_local_storage(self):
27 | """Test if local storage works
28 | """
29 |
30 | with tempfile.TemporaryDirectory() as testdir:
31 | storage = LocalStorage(testdir)
32 |
33 | with storage.write("test.txt") as handle:
34 | handle.write("Test")
35 |
36 | self.assertTrue(storage.isdocument("test.txt"))
37 |
38 | # TODO: more tests
39 |
40 | def test_workspace_create(self):
41 | """Test if workspace creation works
42 | """
43 |
44 | get_logger().setLevel(logging.WARN) # Disable progress bar
45 |
46 | default_config = dict(stack="tests/basic", registry=["./trackers.ini"])
47 |
48 | with tempfile.TemporaryDirectory() as testdir:
49 | Workspace.initialize(testdir, default_config, download=True)
50 | Workspace.load(testdir)
51 |
52 | def test_cache(self):
53 | """Test if local storage cache works
54 | """
55 |
56 | with tempfile.TemporaryDirectory() as testdir:
57 |
58 | cache = Cache(LocalStorage(testdir))
59 |
60 | self.assertFalse("test" in cache)
61 |
62 | cache["test"] = 1
63 |
64 | self.assertTrue("test" in cache)
65 |
66 | self.assertTrue(cache["test"] == 1)
67 |
68 | del cache["test"]
69 |
70 | self.assertRaises(KeyError, lambda: cache["test"])
--------------------------------------------------------------------------------