├── .github
└── workflows
│ ├── publish.yml
│ ├── pylint.yml
│ └── python-package.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .pylintrc
├── LICENSE.txt
├── README.md
├── assets
└── visualization.jpg
├── ctc_metrics
├── __init__.py
├── metrics
│ ├── __init__.py
│ ├── biological
│ │ ├── __init__.py
│ │ ├── bc.py
│ │ ├── bio.py
│ │ ├── cca.py
│ │ ├── ct.py
│ │ ├── op_clb.py
│ │ └── tf.py
│ ├── clearmot
│ │ ├── __init__.py
│ │ └── mota.py
│ ├── hota
│ │ ├── __init__.py
│ │ ├── chota.py
│ │ └── hota.py
│ ├── identity_metrics
│ │ ├── __init__.py
│ │ └── idf1.py
│ ├── others
│ │ ├── __init__.py
│ │ ├── faf.py
│ │ └── mt_ml.py
│ ├── technical
│ │ ├── __init__.py
│ │ ├── det.py
│ │ ├── lnk.py
│ │ ├── op_csb.py
│ │ ├── op_ctb.py
│ │ ├── seg.py
│ │ └── tra.py
│ └── validation
│ │ ├── __init__.py
│ │ └── valid.py
├── scripts
│ ├── __init__.py
│ ├── evaluate.py
│ ├── noise.py
│ ├── validate.py
│ └── visualize.py
└── utils
│ ├── __init__.py
│ ├── filesystem.py
│ ├── handle_results.py
│ └── representations.py
├── py-ctcmetrics.toml
├── requirements.txt
├── setup.py
├── test
├── __init__.py
├── prepare_test_data.sh
├── test_metrics.py
├── test_validate.py
└── utils.py
└── third_party
├── Evaluation software.pdf
├── LICENSE
├── Linux
├── DETMeasure
├── SEGMeasure
└── TRAMeasure
└── Win
├── DETMeasure.exe
├── SEGMeasure.exe
├── TRAMeasure.exe
├── cbia.lib.i3dalgo.dyn.rel.x64.15.dll
├── cbia.lib.i3dcore.dyn.rel.x64.15.dll
├── cbia.lib.tiff.dyn.rel.x64.15.dll
├── cbia.lib.z.dyn.rel.x64.15.dll
└── wxbase311u_vc_x64_cbia.dll
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: PyPI
2 |
3 | on:
4 | release:
5 | types: [created]
6 |
7 | jobs:
8 | pypi-publish:
9 | name: Publish release to PyPI
10 | runs-on: ubuntu-latest
11 | environment:
12 | name: pypi
13 | url: https://pypi.org/p/py-ctcmetrics
14 | permissions:
15 | id-token: write
16 | steps:
17 | - uses: actions/checkout@v4
18 | - name: Set up Python
19 | uses: actions/setup-python@v4
20 | with:
21 | python-version: "3.x"
22 | - name: Install dependencies
23 | run: |
24 | python -m pip install --upgrade pip
25 | pip install setuptools wheel
26 | - name: Build package
27 | run: |
28 | python setup.py sdist bdist_wheel # Could also be python -m build
29 | - name: Publish package distributions to PyPI
30 | uses: pypa/gh-action-pypi-publish@release/v1
31 |
--------------------------------------------------------------------------------
/.github/workflows/pylint.yml:
--------------------------------------------------------------------------------
1 | name: Pylint
2 |
3 | on: [push]
4 |
5 | jobs:
6 | build:
7 | runs-on: ubuntu-latest
8 | strategy:
9 | matrix:
10 | python-version: ["3.9", "3.10", "3.11"]
11 | steps:
12 | - uses: actions/checkout@v4
13 | - name: Set up Python ${{ matrix.python-version }}
14 | uses: actions/setup-python@v4
15 | with:
16 | python-version: ${{ matrix.python-version }}
17 | - name: Install dependencies
18 | run: |
19 | python -m pip install --upgrade pip
20 | pip install pylint
21 | - name: Analysing the code with pylint
22 | run: |
23 | pylint $(git ls-files '*.py') --rcfile .pylintrc
24 |
--------------------------------------------------------------------------------
/.github/workflows/python-package.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: Python package
5 |
6 | on:
7 | push:
8 | branches: [ "main", "clearmot_and_hota" ]
9 | pull_request:
10 | branches: [ "main" ]
11 |
12 | jobs:
13 | build:
14 |
15 | runs-on: ubuntu-latest
16 | strategy:
17 | fail-fast: false
18 | matrix:
19 | python-version: ["3.9", "3.10", "3.11"]
20 |
21 | steps:
22 | - uses: actions/checkout@v4
23 | - name: Set up Python ${{ matrix.python-version }}
24 | uses: actions/setup-python@v4
25 | with:
26 | python-version: ${{ matrix.python-version }}
27 | - name: Install dependencies
28 | run: |
29 | python -m pip install --upgrade pip
30 | python -m pip install flake8 pytest
31 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
32 | - name: Lint with flake8
33 | run: |
34 | # stop the build if there are Python syntax errors or undefined names
35 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
36 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
37 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
38 | - name: Test with pytest
39 | run: |
40 | ls .
41 | sh ./test/prepare_test_data.sh
42 | ls test
43 | pytest
44 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: local
3 | hooks:
4 | - id: pylint
5 | name: pylint
6 | entry: pylint
7 | language: system
8 | types: [python]
9 | args:
10 | [
11 | "-rn", # Only display messages
12 | "--rcfile=.pylintrc", # Link to your config file
13 | ]
14 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 | [MAIN]
2 |
3 | # Specify a configuration file.
4 | #rcfile=
5 |
6 | # Python code to execute, usually for sys.path manipulation such as
7 | # pygtk.require().
8 | #init-hook=
9 |
10 | # Files or directories to be skipped. They should be base names, not
11 | # paths.
12 | ignore=CVS
13 |
14 | # Add files or directories matching the regex patterns to the ignore-list. The
15 | # regex matches against paths and can be in Posix or Windows format.
16 | ignore-paths=
17 |
18 | # Files or directories matching the regex patterns are skipped. The regex
19 | # matches against base names, not paths.
20 | ignore-patterns=^\.#
21 |
22 | # Pickle collected data for later comparisons.
23 | persistent=yes
24 |
25 | # List of plugins (as comma separated values of python modules names) to load,
26 | # usually to register additional checkers.
27 | load-plugins=
28 | pylint.extensions.check_elif,
29 | pylint.extensions.bad_builtin,
30 | pylint.extensions.docparams,
31 | pylint.extensions.for_any_all,
32 | pylint.extensions.set_membership,
33 | pylint.extensions.code_style,
34 | pylint.extensions.overlapping_exceptions,
35 | pylint.extensions.typing,
36 | pylint.extensions.redefined_variable_type,
37 | pylint.extensions.comparison_placement,
38 | pylint.extensions.mccabe,
39 |
40 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
41 | # number of processors available to use.
42 | jobs=0
43 |
44 | # When enabled, pylint would attempt to guess common misconfiguration and emit
45 | # user-friendly hints instead of false-positive error messages.
46 | suggestion-mode=yes
47 |
48 | # Allow loading of arbitrary C extensions. Extensions are imported into the
49 | # active Python interpreter and may run arbitrary code.
50 | unsafe-load-any-extension=no
51 |
52 | # A comma-separated list of package or module names from where C extensions may
53 | # be loaded. Extensions are loading into the active Python interpreter and may
54 | # run arbitrary code
55 | extension-pkg-allow-list=
56 |
57 | # Minimum supported python version
58 | py-version = 3.7.2
59 |
60 | # Control the amount of potential inferred values when inferring a single
61 | # object. This can help the performance when dealing with large functions or
62 | # complex, nested conditions.
63 | limit-inference-results=100
64 |
65 | # Specify a score threshold to be exceeded before program exits with error.
66 | fail-under=10.0
67 |
68 | # Return non-zero exit code if any of these messages/categories are detected,
69 | # even if score is above --fail-under value. Syntax same as enable. Messages
70 | # specified are enabled, while categories only check already-enabled messages.
71 | fail-on=
72 |
73 |
74 | [MESSAGES CONTROL]
75 |
76 | # Only show warnings with the listed confidence levels. Leave empty to show
77 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
78 | # confidence=
79 |
80 | # Enable the message, report, category or checker with the given id(s). You can
81 | # either give multiple identifier separated by comma (,) or put this option
82 | # multiple time (only on the command line, not in the configuration file where
83 | # it should appear only once). See also the "--disable" option for examples.
84 | enable=
85 | use-symbolic-message-instead,
86 | useless-suppression,
87 |
88 | # Disable the message, report, category or checker with the given id(s). You
89 | # can either give multiple identifiers separated by comma (,) or put this
90 | # option multiple times (only on the command line, not in the configuration
91 | # file where it should appear only once).You can also use "--disable=all" to
92 | # disable everything first and then re-enable specific checks. For example, if
93 | # you want to run only the similarities checker, you can use "--disable=all
94 | # --enable=similarities". If you want to run only the classes checker, but have
95 | # no Warning level messages displayed, use"--disable=all --enable=classes
96 | # --disable=W"
97 |
98 | disable=
99 | attribute-defined-outside-init,
100 | invalid-name,
101 | missing-docstring,
102 | protected-access,
103 | too-few-public-methods,
104 | # handled by black
105 | format,
106 | # We anticipate #3512 where it will become optional
107 | fixme,
108 | cyclic-import,
109 | import-error,
110 |
111 |
112 | [REPORTS]
113 |
114 | # Set the output format. Available formats are text, parseable, colorized, msvs
115 | # (visual studio) and html. You can also give a reporter class, eg
116 | # mypackage.mymodule.MyReporterClass.
117 | output-format=text
118 |
119 | # Tells whether to display a full report or only the messages
120 | reports=no
121 |
122 | # Python expression which should return a note less than 10 (10 is the highest
123 | # note). You have access to the variables 'fatal', 'error', 'warning', 'refactor', 'convention'
124 | # and 'info', which contain the number of messages in each category, as
125 | # well as 'statement', which is the total number of statements analyzed. This
126 | # score is used by the global evaluation report (RP0004).
127 | evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
128 |
129 | # Template used to display messages. This is a python new-style format string
130 | # used to format the message information. See doc for all details
131 | #msg-template=
132 |
133 | # Activate the evaluation score.
134 | score=yes
135 |
136 |
137 | [LOGGING]
138 |
139 | # Logging modules to check that the string format arguments are in logging
140 | # function parameter format
141 | logging-modules=logging
142 |
143 | # The type of string formatting that logging methods do. `old` means using %
144 | # formatting, `new` is for `{}` formatting.
145 | logging-format-style=old
146 |
147 |
148 | [MISCELLANEOUS]
149 |
150 | # List of note tags to take in consideration, separated by a comma.
151 | notes=FIXME,XXX,TODO
152 |
153 | # Regular expression of note tags to take in consideration.
154 | #notes-rgx=
155 |
156 |
157 | [SIMILARITIES]
158 |
159 | # Minimum lines number of a similarity.
160 | min-similarity-lines=6
161 |
162 | # Ignore comments when computing similarities.
163 | ignore-comments=yes
164 |
165 | # Ignore docstrings when computing similarities.
166 | ignore-docstrings=yes
167 |
168 | # Ignore imports when computing similarities.
169 | ignore-imports=yes
170 |
171 | # Signatures are removed from the similarity computation
172 | ignore-signatures=yes
173 |
174 |
175 | [VARIABLES]
176 |
177 | # Tells whether we should check for unused import in __init__ files.
178 | init-import=no
179 |
180 | # A regular expression matching the name of dummy variables (i.e. expectedly
181 | # not used).
182 | dummy-variables-rgx=_$|dummy
183 |
184 | # List of additional names supposed to be defined in builtins. Remember that
185 | # you should avoid defining new builtins when possible.
186 | additional-builtins=
187 |
188 | # List of strings which can identify a callback function by name. A callback
189 | # name must start or end with one of those strings.
190 | callbacks=cb_,_cb
191 |
192 | # Tells whether unused global variables should be treated as a violation.
193 | allow-global-unused-variables=yes
194 |
195 | # List of names allowed to shadow builtins
196 | allowed-redefined-builtins=
197 |
198 | # Argument names that match this expression will be ignored. Default to name
199 | # with leading underscore.
200 | ignored-argument-names=_.*
201 |
202 | # List of qualified module names which can have objects that can redefine
203 | # builtins.
204 | redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
205 |
206 |
207 | [FORMAT]
208 |
209 | # Maximum number of characters on a single line.
210 | max-line-length=120
211 |
212 | # Regexp for a line that is allowed to be longer than the limit.
213 | ignore-long-lines=^\s*(# )??$
214 |
215 | # Allow the body of an if to be on the same line as the test if there is no
216 | # else.
217 | single-line-if-stmt=no
218 |
219 | # Allow the body of a class to be on the same line as the declaration if body
220 | # contains single statement.
221 | single-line-class-stmt=no
222 |
223 | # Maximum number of lines in a module
224 | max-module-lines=1000
225 |
226 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
227 | # tab).
228 | indent-string=' '
229 |
230 | # Number of spaces of indent required inside a hanging or continued line.
231 | indent-after-paren=4
232 |
233 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
234 | expected-line-ending-format=
235 |
236 |
237 | [BASIC]
238 |
239 | # Good variable names which should always be accepted, separated by a comma
240 | good-names=i,j,k,ex,Run,_
241 |
242 | # Good variable names regexes, separated by a comma. If names match any regex,
243 | # they will always be accepted
244 | good-names-rgxs=
245 |
246 | # Bad variable names which should always be refused, separated by a comma
247 | bad-names=foo,bar,baz,toto,tutu,tata
248 |
249 | # Bad variable names regexes, separated by a comma. If names match any regex,
250 | # they will always be refused
251 | bad-names-rgxs=
252 |
253 | # Colon-delimited sets of names that determine each other's naming style when
254 | # the name regexes allow several styles.
255 | name-group=
256 |
257 | # Include a hint for the correct naming format with invalid-name
258 | include-naming-hint=no
259 |
260 | # Naming style matching correct function names.
261 | function-naming-style=snake_case
262 |
263 | # Regular expression matching correct function names
264 | function-rgx=[a-z_][a-z0-9_]{2,30}$
265 |
266 | # Naming style matching correct variable names.
267 | variable-naming-style=snake_case
268 |
269 | # Regular expression matching correct variable names
270 | variable-rgx=[a-z_][a-z0-9_]{2,30}$
271 |
272 | # Naming style matching correct constant names.
273 | const-naming-style=UPPER_CASE
274 |
275 | # Regular expression matching correct constant names
276 | const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
277 |
278 | # Naming style matching correct attribute names.
279 | attr-naming-style=snake_case
280 |
281 | # Regular expression matching correct attribute names
282 | attr-rgx=[a-z_][a-z0-9_]{2,}$
283 |
284 | # Naming style matching correct argument names.
285 | argument-naming-style=snake_case
286 |
287 | # Regular expression matching correct argument names
288 | argument-rgx=[a-z_][a-z0-9_]{2,30}$
289 |
290 | # Naming style matching correct class attribute names.
291 | class-attribute-naming-style=any
292 |
293 | # Regular expression matching correct class attribute names
294 | class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
295 |
296 | # Naming style matching correct class constant names.
297 | class-const-naming-style=UPPER_CASE
298 |
299 | # Regular expression matching correct class constant names. Overrides class-
300 | # const-naming-style.
301 | #class-const-rgx=
302 |
303 | # Naming style matching correct inline iteration names.
304 | inlinevar-naming-style=any
305 |
306 | # Regular expression matching correct inline iteration names
307 | inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
308 |
309 | # Naming style matching correct class names.
310 | class-naming-style=PascalCase
311 |
312 | # Regular expression matching correct class names
313 | class-rgx=[A-Z_][a-zA-Z0-9]+$
314 |
315 |
316 | # Naming style matching correct module names.
317 | module-naming-style=snake_case
318 |
319 | # Regular expression matching correct module names
320 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
321 |
322 |
323 | # Naming style matching correct method names.
324 | method-naming-style=snake_case
325 |
326 | # Regular expression matching correct method names
327 | method-rgx=[a-z_][a-z0-9_]{2,}$
328 |
329 | # Regular expression which can overwrite the naming style set by typevar-naming-style.
330 | #typevar-rgx=
331 |
332 | # Regular expression which should only match function or class names that do
333 | # not require a docstring. Use ^(?!__init__$)_ to also check __init__.
334 | no-docstring-rgx=__.*__
335 |
336 | # Minimum line length for functions/classes that require docstrings, shorter
337 | # ones are exempt.
338 | docstring-min-length=-1
339 |
340 | # List of decorators that define properties, such as abc.abstractproperty.
341 | property-classes=abc.abstractproperty
342 |
343 |
344 | [TYPECHECK]
345 |
346 | # Regex pattern to define which classes are considered mixins if ignore-mixin-
347 | # members is set to 'yes'
348 | mixin-class-rgx=.*MixIn
349 |
350 | # List of module names for which member attributes should not be checked
351 | # (useful for modules/projects where namespaces are manipulated during runtime
352 | # and thus existing member attributes cannot be deduced by static analysis). It
353 | # supports qualified module names, as well as Unix pattern matching.
354 | ignored-modules=
355 |
356 | # List of class names for which member attributes should not be checked (useful
357 | # for classes with dynamically set attributes). This supports the use of
358 | # qualified names.
359 | ignored-classes=SQLObject, optparse.Values, thread._local, _thread._local
360 |
361 | # List of members which are set dynamically and missed by pylint inference
362 | # system, and so shouldn't trigger E1101 when accessed. Python regular
363 | # expressions are accepted.
364 | generated-members=REQUEST,acl_users,aq_parent,argparse.Namespace
365 |
366 | # List of decorators that create context managers from functions, such as
367 | # contextlib.contextmanager.
368 | contextmanager-decorators=contextlib.contextmanager
369 |
370 | # Tells whether to warn about missing members when the owner of the attribute
371 | # is inferred to be None.
372 | ignore-none=yes
373 |
374 | # This flag controls whether pylint should warn about no-member and similar
375 | # checks whenever an opaque object is returned when inferring. The inference
376 | # can return multiple potential results while evaluating a Python object, but
377 | # some branches might not be evaluated, which results in partial inference. In
378 | # that case, it might be useful to still emit no-member and other checks for
379 | # the rest of the inferred objects.
380 | ignore-on-opaque-inference=yes
381 |
382 | # Show a hint with possible names when a member name was not found. The aspect
383 | # of finding the hint is based on edit distance.
384 | missing-member-hint=yes
385 |
386 | # The minimum edit distance a name should have in order to be considered a
387 | # similar match for a missing member name.
388 | missing-member-hint-distance=1
389 |
390 | # The total number of similar names that should be taken in consideration when
391 | # showing a hint for a missing member.
392 | missing-member-max-choices=1
393 |
394 | [SPELLING]
395 |
396 | # Spelling dictionary name. Available dictionaries: none. To make it working
397 | # install python-enchant package.
398 | spelling-dict=
399 |
400 | # List of comma separated words that should not be checked.
401 | spelling-ignore-words=
402 |
403 | # List of comma separated words that should be considered directives if they
404 | # appear and the beginning of a comment and should not be checked.
405 | spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:,pragma:,# noinspection
406 |
407 | # A path to a file that contains private dictionary; one word per line.
408 | spelling-private-dict-file=.pyenchant_pylint_custom_dict.txt
409 |
410 | # Tells whether to store unknown words to indicated private dictionary in
411 | # --spelling-private-dict-file option instead of raising a message.
412 | spelling-store-unknown-words=no
413 |
414 | # Limits count of emitted suggestions for spelling mistakes.
415 | max-spelling-suggestions=2
416 |
417 |
418 | [DESIGN]
419 |
420 | # Maximum number of arguments for function / method
421 | max-args=10
422 |
423 | # Maximum number of arguments for function / method
424 | max-positional-arguments=11
425 |
426 | # Maximum number of locals for function / method body
427 | max-locals=25
428 |
429 | # Maximum number of return / yield for function / method body
430 | max-returns=11
431 |
432 | # Maximum number of branch for function / method body
433 | max-branches=27
434 |
435 | # Maximum number of statements in function / method body
436 | max-statements=100
437 |
438 | # Maximum number of parents for a class (see R0901).
439 | max-parents=7
440 |
441 | # List of qualified class names to ignore when counting class parents (see R0901).
442 | ignored-parents=
443 |
444 | # Maximum number of attributes for a class (see R0902).
445 | max-attributes=11
446 |
447 | # Minimum number of public methods for a class (see R0903).
448 | min-public-methods=2
449 |
450 | # Maximum number of public methods for a class (see R0904).
451 | max-public-methods=25
452 |
453 | # Maximum number of boolean expressions in an if statement (see R0916).
454 | max-bool-expr=5
455 |
456 | # List of regular expressions of class ancestor names to
457 | # ignore when counting public methods (see R0903).
458 | exclude-too-few-public-methods=
459 |
460 | max-complexity=10
461 |
462 | [CLASSES]
463 |
464 | # List of method names used to declare (i.e. assign) instance attributes.
465 | defining-attr-methods=__init__,__new__,setUp,__post_init__
466 |
467 | # List of valid names for the first argument in a class method.
468 | valid-classmethod-first-arg=cls
469 |
470 | # List of valid names for the first argument in a metaclass class method.
471 | valid-metaclass-classmethod-first-arg=mcs
472 |
473 | # List of member names, which should be excluded from the protected access
474 | # warning.
475 | exclude-protected=_asdict,_fields,_replace,_source,_make
476 |
477 | # Warn about protected attribute access inside special methods
478 | check-protected-access-in-special-methods=no
479 |
480 | [IMPORTS]
481 |
482 | # List of modules that can be imported at any level, not just the top level
483 | # one.
484 | allow-any-import-level=
485 |
486 | # Allow wildcard imports from modules that define __all__.
487 | allow-wildcard-with-all=no
488 |
489 | # Analyse import fallback blocks. This can be used to support both Python 2 and
490 | # 3 compatible code, which means that the block might have code that exists
491 | # only in one or another interpreter, leading to false positives when analysed.
492 | analyse-fallback-blocks=no
493 |
494 | # Deprecated modules which should not be used, separated by a comma
495 | deprecated-modules=regsub,TERMIOS,Bastion,rexec
496 |
497 | # Create a graph of every (i.e. internal and external) dependencies in the
498 | # given file (report RP0402 must not be disabled)
499 | import-graph=
500 |
501 | # Create a graph of external dependencies in the given file (report RP0402 must
502 | # not be disabled)
503 | ext-import-graph=
504 |
505 | # Create a graph of internal dependencies in the given file (report RP0402 must
506 | # not be disabled)
507 | int-import-graph=
508 |
509 | # Force import order to recognize a module as part of the standard
510 | # compatibility libraries.
511 | known-standard-library=
512 |
513 | # Force import order to recognize a module as part of a third party library.
514 | known-third-party=enchant
515 |
516 | # Couples of modules and preferred modules, separated by a comma.
517 | preferred-modules=
518 |
519 |
520 | [EXCEPTIONS]
521 |
522 | # Exceptions that will emit a warning when being caught. Defaults to
523 | # "Exception"
524 | overgeneral-exceptions=builtins.Exception
525 |
526 |
527 | [TYPING]
528 |
529 | # Set to ``no`` if the app / library does **NOT** need to support runtime
530 | # introspection of type annotations. If you use type annotations
531 | # **exclusively** for type checking of an application, you're probably fine.
532 | # For libraries, evaluate if some users what to access the type hints at
533 | # runtime first, e.g., through ``typing.get_type_hints``. Applies to Python
534 | # versions 3.7 - 3.9
535 | runtime-typing = no
536 |
537 |
538 | [DEPRECATED_BUILTINS]
539 |
540 | # List of builtins function names that should not be used, separated by a comma
541 | bad-functions=map,input
542 |
543 |
544 | [REFACTORING]
545 |
546 | # Maximum number of nested blocks for function / method body
547 | max-nested-blocks=5
548 |
549 | # Complete name of functions that never returns. When checking for
550 | # inconsistent-return-statements if a never returning function is called then
551 | # it will be considered as an explicit return statement and no message will be
552 | # printed.
553 | never-returning-functions=sys.exit,argparse.parse_error
554 |
555 |
556 | [STRING]
557 |
558 | # This flag controls whether inconsistent-quotes generates a warning when the
559 | # character used as a quote delimiter is used inconsistently within a module.
560 | check-quote-consistency=no
561 |
562 | # This flag controls whether the implicit-str-concat should generate a warning
563 | # on implicit string concatenation in sequences defined over several lines.
564 | check-str-concat-over-line-jumps=no
565 |
566 |
567 | [CODE_STYLE]
568 |
569 | # Max line length for which to sill emit suggestions. Used to prevent optional
570 | # suggestions which would get split by a code formatter (e.g., black). Will
571 | # default to the setting for ``max-line-length``.
572 | #max-line-length-suggestions=
573 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2023 - 2023, Timo Kaiser
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without modification,
5 | are permitted provided that the following conditions are met:
6 |
7 | 1. Redistributions of source code must retain the above copyright notice, this
8 | list of conditions and the following disclaimer.
9 |
10 | 2. Redistributions in binary form must reproduce the above copyright notice,
11 | this list of conditions and the following disclaimer in the documentation
12 | and/or other materials provided with the distribution.
13 |
14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
18 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24 | POSSIBILITY OF SUCH DAMAGE.
25 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://opensource.org/licenses/BSD-2-Clause)
2 | [](https://github.com/TimoK93/ctc-metrics/actions/workflows/pylint.yml)
3 | [](https://github.com/TimoK93/ctc-metrics/actions/workflows/python-package.yml)
4 | [](https://github.com/CellTrackingChallenge/py-ctcmetrics/actions/workflows/publish.yml)
5 |
6 | # Py-CTCMetrics
7 | A python implementation of the metrics used in the paper
8 | [CHOTA: A Higher Order Accuracy Metric for Cell Tracking](https://arxiv.org/abs/2408.11571) by
9 | *Timo Kaiser et al.*. The code is
10 | designed to evaluate tracking results in the format of the
11 | [Cell-Tracking-Challenge](https://celltrackingchallenge.net/) but can also be used
12 | for custom results.
13 | The repository contains the metrics of the
14 | [Cell-Tracking-Challenge](https://celltrackingchallenge.net/),
15 | the [MOTChallenge](https://motchallenge.net/), and the
16 | [CHOTA](https://arxiv.org/abs/2408.11571) metric.
17 |
18 | Detailed descriptions of the metrics can be found in the [paper](https://arxiv.org/abs/2408.11571).
19 |
20 | ---
21 |
22 | ## Features at a Glance
23 |
24 | - **Validation**: Check if the tracking results are correctly formatted.
25 | - **Evaluation**: Evaluate the tracking results with respect to the ground truth using CTC, MotChallenge, and CHOTA metrics.
26 | - **Visualization**: Visualize the tracking results.
27 | - **Video Creation**: Create a video of the visualized tracking results for presentation purposes.
28 | - **Noise Evaluation**: Evaluate the impact of specific errors on the evaluation metrics.
29 |
30 | ---
31 |
32 | ## Requirements
33 |
34 | We tested the code with **Python 3.10**. Additional packages that will be
35 | installed automatically are listed in the [requirements.txt](requirements.txt).
36 |
37 | ---
38 |
39 | ## Installation
40 |
41 | The package can be installed via pip:
42 |
43 | ```bash
44 | pip install py-ctcmetrics
45 | ```
46 |
47 | or from the source code:
48 |
49 | ```bash
50 | git clone https://github.com/CellTrackingChallenge/py-ctcmetrics
51 | cd py-ctcmetrics
52 | pip install .
53 | ```
54 |
55 | ---
56 |
57 | ## Usage
58 |
59 |
60 | The package supports validation, evaluation, and visualization of tracking
61 | results. The following examples are shown for an example directory that is
62 | structured in the CTC-format as follows:
63 |
64 | ```bash
65 | ctc
66 | ├── train
67 | │ ├── challenge_x
68 | │ │ ├── 01_GT
69 | │ │ ├── 01_RES
70 | │ │ ├── 02_GT
71 | │ │ ├── 02_RES
72 | │ ├── challenge_y
73 | │ │ ├── 01_GT
74 | │ │ ├── 01_RES
75 | │ │ ├── 02_GT
76 | │ │ ├── 02_RES
77 | ```
78 | The directory ```ctc``` contains the training data. The subdirectories
79 | ```challenge_x``` and ```challenge_y``` contain the data for the different
80 | challenges. Each challenge directory contains subdirectories for the sequences
81 | ```01_GT```, ```01_RES```, ```02_GT```, and ```02_RES```. The directories
82 | ```01_GT``` and ```01_RES``` contain the ground truth and the tracking results
83 | for the sequence ```01```. The same applies to the directories ```02_GT``` and
84 | ```02_RES```.
85 |
86 |
87 | ### Validation
88 |
89 | Produced tracking results can be invalid due to various reasons. To check if
90 | the tracking results are correctly formatted according to the CTC-format, the
91 | ```ctc_validate``` command can be used. The command checks if the tracking
92 | results are correctly formatted. There is no ground truth needed for this
93 | validation.
94 |
95 | To validate if ```challenge_x/01``` is correctly formatted, run the
96 | command
97 | ```bash
98 | ctc_validate --res "/ctc/train/challenge_x/01_RES"
99 | ```
100 | Moreover, you can recursively validate the tracking results for all
101 | challenges/sequences in a directory by adding the flag ```-r```:
102 | ```bash
103 | ctc_validate --res "/ctc/train" -r
104 | ```
105 | In this example, all four sequences will be validated.
106 |
107 | ### Evaluation
108 |
109 | To evaluate results against the ground truth, similar commands can be used.
110 | For example, to evaluate the sequence ```challenge_x/01```, run the command
111 | ```bash
112 | ctc_evaluate --gt "/ctc/train/challenge_x/01_GT" --res "/ctc/train/challenge_x/01_RES"
113 | ```
114 | or recursively for all sequences in a directory:
115 | ```bash
116 | ctc_evaluate --gt "/ctc/train" --res "/ctc/train" -r
117 | ```
118 | The ```gt``` argument specifies the path to the ground truth directory, and the
119 | ```res``` argument specifies the path to the results directory. The flag ```-r```
120 | recursively evaluates all sequences in the directory.
121 |
122 | Per default, the code is executed using multiple processes with one process per
123 | available CPU core. Multiprocessing decreases the execution time but also
124 | increases
125 | the memory consumption. If you need to set the maximal number of processes,
126 | the number of processes can be specified with the argument
127 | ```--num-threads``` or ```-n```:
128 | ```bash
129 | ctc_evaluate --gt "/ctc/train" --res "/ctc/train" -r -n 4
130 | ```
131 |
132 | The evaluation results are printed to the console. If you want to save the
133 | results to a csv file, you can use the argument ```--csv-file``` :
134 | ```bash
135 | ctc_evaluate --gt "/ctc/train" --res "/ctc/train" -r --csv-file "/ctc/results.csv"
136 | ```
137 | **Note:** The csv file will be overwritten if it already exists!
138 |
139 | The following table shows the available arguments:
140 |
141 | | Argument | Description | Default |
142 | |---------------|------------------------------------------------------| --- |
143 | | --gt | Path to the ground truth directory. | None |
144 | | --res | Path to the results directory. | None |
145 | | --recursive | Recursively evaluate all sequences in the directory. | False |
146 | | --csv-file | Path to a csv file to save the results. | None |
147 | | --num-threads | Number of threads to use for evaluation. | 1 |
148 |
149 | Per default, all given metrics are evaluated. You can also select the metrics
150 | you are interested in to avoid the calculation of metrics that are not in your
151 | interest. Additional arguments to select a subset of specific metrics are:
152 |
153 | | Argument | Description |
154 | | --- |-----------------------------------------------------------------|
155 | | --valid | Check if the result has valid format |
156 | | --det | The DET detection metric |
157 | | --seg | The SEG segmentation metric |
158 | | --tra | The TRA tracking metric |
159 | | --lnk | The LNK linking metric |
160 | | --ct | The CT (complete tracks) metric |
161 | | --tf | The TF (track fraction) metric |
162 | | --bc | The BC(i) (branching correctness) metric |
163 | | --cca | The CCA (cell cycle accuracy) metric |
164 | | --mota | The MOTA (Multiple Object Tracking Accuracy) metric |
165 | | --hota | The HOTA (Higher Order Tracking Accuracy) metric |
166 | | --idf1 | The IDF1 (ID F1) metric |
167 | | --chota | The CHOTA (Cell-specific Higher Order Tracking Accuracy) metric |
168 | | --mtml | The MT (Mostly Tracked) and ML (Mostly Lost) metrics |
169 | | --faf | The FAF (False Alarm per Frame) metric |
170 | ---
171 |
172 | To use the evaluation protocol in your python code, the code can be imported
173 | as follows:
174 |
175 | ```python
176 | from ctc_metrics import evaluate_sequence, validate_sequence
177 |
178 | # Validate the sequence
179 | res = validate_sequence("/ctc/train/challenge_x/01_RES")
180 | print(res["Valid"])
181 |
182 | # Evaluate the sequence
183 | res = evaluate_sequence("/ctc/train/challenge_x/01_RES/TRA", "/ctc/train/challenge_x/01_GT")
184 | print(res["DET"])
185 | print(res["SEG"])
186 | print(res["TRA"])
187 | ...
188 |
189 | ```
190 |
191 | ### Visualization
192 |
193 |
194 |
195 |
196 |
197 |
198 | You can visualize your tracking results with the following command:
199 |
200 | ```bash
201 | ctc_visualize --img "/ctc/train/challenge_x/01" --res "/ctc/train/challenge_x/01_RES"
202 | ```
203 |
204 | The command will show the visualizations of the tracking results. You can
205 | control the visualization with specific keys:
206 |
207 |
208 | | Key | Description |
209 | |-------|---------------------------------------------------------------------|
210 | | q | Quits the Application |
211 | | w | Start or Pause the auto visualization |
212 | | d | Move to the next frame |
213 | | a | Move to the previous frame |
214 | | l | Toggle the show labels option |
215 | | p | Toggle the show parents option |
216 | | s | Save the current frame to the visualization directory as .jpg image |
217 |
218 |
219 | There are additional arguments that can be used to specify the visualization.
220 | The following table shows the available arguments:
221 |
222 |
223 | | Argument | Description | Default |
224 | |-------------------|------------------------------------------------------------------------------------------|---------|
225 | | --img | The directory to the images **(required)** | |
226 | | --res | The directory to the result masks **(required)** | |
227 | | --viz | The directory to save the visualization | None |
228 | | --video-name | The path to the video if a video should be created | None |
229 | | --border-width | The width of the border. Either an integer or a string that describes the challenge name | None |
230 | | --show-no-labels | Print no instance labels to the output as default | False |
231 | | --show-no-parents | Print no parent labels to the output as default | False |
232 | | --ids-to-show | The IDs of the instances to show. If defined, all others will be ignored. | None |
233 | | --start-frame | The frame to start the visualization | 0 |
234 | | --framerate | The framerate of the video | 10 |
235 | | --opacity | The opacity of the instance colors | 0.5 |
236 |
237 | **Note:** The argument `--video-name` describes a path to a video file. The file
238 | extension needs to be `.mp4`. If a video name is selected be created,
239 | the visualization is not shown on the screen. Please use the arguments to
240 | specify your visualization settings such as the opacity, border width etc.
241 |
242 | ### Impact of Errors and Noise on the Evaluation Metrics
243 |
244 | The evaluation metrics are sensitive to errors in the tracking results. To
245 | investigate the impact of detection and association errors on your dataset with
246 | respect to the evaluation metrics, the repository provides a script to
247 | automatically run the evaluation with different noise levels. The script uses
248 | the ground truth of the data as oracle tracking result. Then, noise is
249 | added to the oracle tracking results and the evaluation metrics are calculated
250 | for the noisy tracking results. The scripts independently evaluates the
251 | influence of randomly inserted false positives, false negatives, ID switches,
252 | and removals of correct result-to-reference matches.
253 | The results are stored into a predefined csv file. The script can be used as
254 | follows:
255 |
256 | ```bash
257 | ctc_noise --gt "/ctc/train/challenge_x/01_GT" --csv-file "/ctc/noise_experiments.csv"
258 | ```
259 |
260 | Similar to the validation and evaluation scripts, the script can be used
261 | recursively for all sequences in a directory by adding the flag ```-r```.
262 | Moreover, the number of threads can be specified with the argument
263 | ```--num-threads``` or ```-n```. Since metrics are sometimes prone to randomness
264 | in the distribution of the errors, the script can be run multiple times with
265 | different seeds to get a more robust evaluation. The number of seeds can be
266 | specified with the argument ```--repeat```.
267 |
268 | The following table shows the available arguments:
269 |
270 | | Argument | Description | Default |
271 | |---------------|--------------------------------------------------------------------| --- |
272 | | --gt | Path to the ground truth directory. | None |
273 | | --csv-file | Path to a csv file to save the results. | None |
274 | | --recursive | Recursively evaluate all sequences in the directory. | False |
275 | | --num-threads | Number of threads to use for evaluation. | 1 |
276 | | --repeats | Number of repeats to run the evaluation. | 1 |
277 | | -- num-false-pos | Number of false positive instances to add to the tracking results. | 500 |
278 | | -- num-false-neg | Number of false negative instances to add to the tracking results. | 500 |
279 | | -- num-idsw | Number of ID switches to add to the tracking results. | 500 |
280 | | -- num-matches | Number of matches to add to the tracking results. | 500 |
281 | | -- save-after | Save the intermediate results after a specific number of runs. | 100 |
282 |
283 |
284 | ---
285 |
286 | ## Notes
287 |
288 | - If you only have segmentation results and want to evaluate the *SEG* metric,
289 | it is important that you pass the *--seg* flag to the evaluation command.
290 | Otherwise, the code could flag your input as invalid, because the
291 | *res_track.txt* file is missing or inconsistent.
292 |
293 | ---
294 |
295 | ## Contributing
296 |
297 | Contributions are welcome! For bug reports or requests please
298 | [submit an issue](www.github.com/TimoK93/ctc-metrics/issues). For new features
299 | please [submit a pull request](www.github.com/TimoK93/ctc-metrics/pulls).
300 |
301 | If you want to contribute, please check your code with pylint and the
302 | pre-commit hooks before submitting a pull request:
303 |
304 | ```bash
305 | pip install pre-commit, pylint
306 | pre-commit run --all-files
307 | ```
308 |
309 | ---
310 |
311 | ## Acknowledgement and Citations
312 |
313 | The code was developed by Timo Kaiser on behalf of the [Institute of Information
314 | Processing](https://www.tnt.uni-hannover.de/) at the Leibniz University Hanover in conjunction with the
315 | organizers of the [Cell-Tracking-Challenge](https://celltrackingchallenge.net/).
316 |
317 |
318 | If you use this code or the CHOTA metric in your research, please cite the following paper:
319 | ```bibtex
320 | @inproceedings{kaiser2024chota,
321 | author = {Kaiser, Timo and Ulman, Vladimír and Rosenhahn, Bodo},
322 | title = {CHOTA: A Higher Order Accuracy Metric for Cell Tracking},
323 | booktitle = {European Conference on Computer Vision Workshops (ECCVW)},
324 | year = {2024},
325 | organization={Springer}
326 | }
327 | ```
328 |
329 | If you use CTC-Metrics (BC, CT, CCA, TF, SEG, TRA, DET, LNK) in your research, please cite the following paper:
330 | ```bibtex
331 | @article{thecelltrackingchallenge,
332 | author = {Maška, Martin and Ulman, Vladimír and Delgado-Rodriguez, Pablo and Gómez de Mariscal, Estibaliz and Necasova, Tereza and Guerrero Peña, Fidel Alejandro and Ing Ren, Tsang and Meyerowitz, Elliot and Scherr, Tim and Löffler, Katharina and Mikut, Ralf and Guo, Tianqi and Wang, Yin and Allebach, Jan and Bao, Rina and Al-Shakarji, Noor and Rahmon, Gani and Toubal, Imad Eddine and Palaniappan, K. and Ortiz-de-Solorzano, Carlos},
333 | year = {2023},
334 | month = {05},
335 | pages = {1-11},
336 | title = {The Cell Tracking Challenge: 10 years of objective benchmarking},
337 | volume = {20},
338 | journal = {Nature Methods},
339 | doi = {10.1038/s41592-023-01879-y}
340 | }
341 | ```
342 | and
343 | ```bibtex
344 | @article{matula2015cell,
345 | title={Cell tracking accuracy measurement based on comparison of acyclic oriented graphs},
346 | author={Matula, Pavel and Ma{\v{s}}ka, Martin and Sorokin, Dmitry V and Matula, Petr and Ortiz-de-Sol{\'o}rzano, Carlos and Kozubek, Michal},
347 | journal={PloS one},
348 | volume={10},
349 | number={12},
350 | pages={e0144959},
351 | year={2015},
352 | publisher={Public Library of Science San Francisco, CA USA}
353 | }
354 | ```
355 |
356 | If you use the HOTA metric in your research, please cite the following paper:
357 | ```bibtex
358 | @article{luiten2021hota,
359 | title={Hota: A higher order metric for evaluating multi-object tracking},
360 | author={Luiten, Jonathon and Osep, Aljosa and Dendorfer, Patrick and Torr, Philip and Geiger, Andreas and Leal-Taix{\'e}, Laura and Leibe, Bastian},
361 | journal={International journal of computer vision},
362 | volume={129},
363 | pages={548--578},
364 | year={2021},
365 | publisher={Springer}
366 | }
367 | ```
368 |
369 | If you use the IDF1 metric in your research, please cite the following paper:
370 | ```bibtex
371 | @inproceedings{ristani2016performance,
372 | title={Performance measures and a data set for multi-target, multi-camera tracking},
373 | author={Ristani, Ergys and Solera, Francesco and Zou, Roger and Cucchiara, Rita and Tomasi, Carlo},
374 | booktitle={European conference on computer vision},
375 | pages={17--35},
376 | year={2016},
377 | organization={Springer}
378 | }
379 | ```
380 |
381 | If you use the MOTA metric in your research, please cite the following paper:
382 | ```bibtex
383 | @article{bernardin2008evaluating,
384 | title={Evaluating multiple object tracking performance: the clear mot metrics},
385 | author={Bernardin, Keni and Stiefelhagen, Rainer},
386 | journal={EURASIP Journal on Image and Video Processing},
387 | volume={2008},
388 | pages={1--10},
389 | year={2008},
390 | publisher={Springer}
391 | }
392 | ```
393 |
394 | ---
395 |
396 | ## License
397 |
398 | This project is licensed under the BSD 2-Clause License - see the
399 | [LICENSE](LICENSE.txt) file for details.
400 |
401 |
402 |
--------------------------------------------------------------------------------
/assets/visualization.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/assets/visualization.jpg
--------------------------------------------------------------------------------
/ctc_metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from ctc_metrics.scripts.evaluate import evaluate_sequence
2 | from ctc_metrics.scripts.validate import validate_sequence
3 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from ctc_metrics.metrics.validation.valid import valid
2 | from ctc_metrics.metrics.biological.bc import bc
3 | from ctc_metrics.metrics.biological.ct import ct
4 | from ctc_metrics.metrics.biological.cca import cca
5 | from ctc_metrics.metrics.biological.tf import tf
6 | from ctc_metrics.metrics.technical.seg import seg
7 | from ctc_metrics.metrics.technical.tra import tra
8 | from ctc_metrics.metrics.technical.det import det
9 | from ctc_metrics.metrics.clearmot.mota import mota
10 | from ctc_metrics.metrics.hota.hota import hota
11 | from ctc_metrics.metrics.hota.chota import chota
12 | from ctc_metrics.metrics.identity_metrics.idf1 import idf1
13 | from ctc_metrics.metrics.others.mt_ml import mtml
14 | from ctc_metrics.metrics.others.faf import faf
15 | from ctc_metrics.metrics.technical.op_ctb import op_ctb
16 | from ctc_metrics.metrics.technical.op_csb import op_csb
17 | from ctc_metrics.metrics.biological.bio import bio
18 | from ctc_metrics.metrics.biological.op_clb import op_clb
19 | from ctc_metrics.metrics.technical.lnk import lnk
20 |
21 | ALL_METRICS = [
22 | "Valid", "CHOTA", "BC", "CT", "CCA", "TF", "SEG", "TRA", "DET", "MOTA",
23 | "HOTA", "IDF1", "MTML", "FAF", "LNK", "OP_CTB", "OP_CSB", "BIO", "OP_CLB"
24 | ]
25 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/biological/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/ctc_metrics/metrics/biological/__init__.py
--------------------------------------------------------------------------------
/ctc_metrics/metrics/biological/bc.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def get_ids_that_ends_with_split(
5 | tracks: np.ndarray
6 | ):
7 | """
8 | Extracts the ids of tracks that end with a cell split.
9 |
10 | Args:
11 | tracks: The tracks to check. A numpy nd array with columns:
12 | - label
13 | - birth frame
14 | - end frame
15 | - parent
16 |
17 | Returns:
18 | The ids of tracks that end with a cell split stored in a numpy.ndarray.
19 | """
20 | parents, counts = np.unique(tracks[:, 3], return_counts=True)
21 | counts = counts[parents > 0]
22 | parents = parents[parents > 0]
23 | ends_with_split = parents[counts > 1]
24 | return ends_with_split
25 |
26 |
27 | def calculate_f1_score(
28 | tp: int,
29 | fp: int,
30 | fn: int
31 | ):
32 | """
33 | Calculates the f1 score.
34 |
35 | Args:
36 | tp: The number of true positives.
37 | fp: The number of false positives.
38 | fn: The number of false negatives.
39 |
40 | Returns:
41 | The f1 score.
42 | """
43 | precision = tp / max((tp + fp), 1)
44 | recall = tp / max((tp + fn), 1)
45 | f1_score = 2 * (precision * recall) / max((precision + recall), 0.0001)
46 | return f1_score
47 |
48 |
49 | def is_matching(
50 | id_comp: int,
51 | id_ref: int,
52 | mapped_ref: list,
53 | mapped_comp: list,
54 | ref_children: np.ndarray,
55 | comp_children: np.ndarray,
56 | tr: int,
57 | tc: int
58 | ):
59 | """
60 | Checks if the reference and the computed track match.
61 |
62 | Args:
63 | id_comp: The computed track id.
64 | id_ref: The reference track id.
65 | mapped_ref: The matched labels of the ground truth masks.
66 | mapped_comp: The matched labels of the result masks.
67 | ref_children: The children ids of the reference track.
68 | comp_children: The children ids of the computed track.
69 | tr: The frame of the reference track end.
70 | tc: The frame of the computed track end.
71 |
72 | Returns:
73 | True if the reference and the computed track match, False otherwise.
74 | """
75 | # Check if the number of children is the same
76 | if len(ref_children) != len(comp_children):
77 | return False
78 | # Compare parents
79 | t1, t2 = min(tr, tc), max(tr, tc)
80 | mr, mc = mapped_ref[t1], mapped_comp[t1]
81 | if np.sum(mc == id_comp) < 1 or np.sum(mr == id_ref) != 1:
82 | return False
83 | ind = np.argwhere(mr == id_ref).squeeze()
84 | if mc[ind] != id_comp:
85 | return False
86 | # Compare children
87 | mr, mc = np.asarray(mapped_ref[t2 + 1]), np.asarray(mapped_comp[t2 + 1])
88 | if not np.all(np.isin(comp_children, mc)):
89 | return False
90 | if not np.all(np.isin(mr[np.isin(mc, comp_children)], ref_children)):
91 | return False
92 | return True
93 |
94 |
95 | def bc(
96 | comp_tracks: np.ndarray,
97 | ref_tracks: np.ndarray,
98 | mapped_ref: list,
99 | mapped_comp: list,
100 | i: int
101 | ):
102 | """
103 | Computes the branching correctness metric. As described in the paper,
104 | "An objective comparison of cell-tracking algorithms."
105 | - Vladimir Ulman et al., Nature methods 2017
106 |
107 | Args:
108 | comp_tracks: The result tracks. A (n,4) numpy ndarray with columns:
109 | - label
110 | - birth frame
111 | - end frame
112 | - parent
113 | ref_tracks: The ground truth tracks. A (n,4) numpy ndarray with columns:
114 | - label
115 | - birth frame
116 | - end frame
117 | - parent
118 | mapped_ref: The matched labels of the ground truth masks. A list of
119 | length equal to the number of frames. Each element is a list with
120 | the matched labels of the ground truth masks in the respective
121 | frame. The elements are in the same order as the corresponding
122 | elements in mapped_comp.
123 | mapped_comp: The matched labels of the result masks. A list of length
124 | equal to the number of frames. Each element is a list with the
125 | matched labels of the result masks in the respective frame. The
126 | elements are in the same order as the corresponding elements in
127 | mapped_ref.
128 | i: The maximal allowed error in frames.
129 |
130 | Returns:
131 | The branching correctness metric.
132 | """
133 |
134 | # Extract relevant tracks with children in reference
135 | ends_with_split_ref = get_ids_that_ends_with_split(ref_tracks)
136 | t_ref = np.array([ref_tracks[ref_tracks[:, 0] == ref][0, 2]
137 | for ref in ends_with_split_ref])
138 | if len(ends_with_split_ref) == 0:
139 | return None
140 | # Extract relevant tracks with children in computed result
141 | ends_with_split_comp = get_ids_that_ends_with_split(comp_tracks)
142 | t_comp = np.asarray([comp_tracks[comp_tracks[:, 0] == comp][0, 2]
143 | for comp in ends_with_split_comp])
144 | if len(ends_with_split_comp) == 0:
145 | return 0
146 | # Find all matches between reference and computed branching events (mitosis)
147 | matches = []
148 | for comp, tc in zip(ends_with_split_comp, t_comp):
149 | # Find potential matches
150 | pot_matches = np.abs(t_ref - tc) <= i
151 | if len(pot_matches) == 0:
152 | continue
153 | comp_children = comp_tracks[comp_tracks[:, 3] == comp][:, 0]
154 | # Evaluate potential matches
155 | for ref, tr in zip(
156 | ends_with_split_ref[pot_matches],
157 | t_ref[pot_matches]
158 | ):
159 | ref_children = ref_tracks[ref_tracks[:, 3] == ref][:, 0]
160 | if is_matching(
161 | comp, ref, mapped_ref, mapped_comp, ref_children,
162 | comp_children, tr, tc
163 | ):
164 | matches.append((ref, comp))
165 | # Calculate BC(i)
166 | return calculate_f1_score(
167 | len(matches),
168 | len(ends_with_split_comp) - len(matches),
169 | len(ends_with_split_ref) - len(matches))
170 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/biological/bio.py:
--------------------------------------------------------------------------------
1 |
2 | def bio(
3 | ct: float,
4 | tf: float,
5 | bc: float,
6 | cca: float,
7 | ):
8 | """
9 | Computes the BIO. As described by
10 | [celltrackingchallenge](http://celltrackingchallenge.net/).
11 | It is the average of the CT, TF, BC, and CCA metrics. If a metric is not
12 | available, it is not considered in the average.
13 |
14 | Args:
15 | ct: The complete tracking metric.
16 | tf: The track fractions metric.
17 | bc: The branching correctness metric.
18 | cca: The cell cycle accuracy metric.
19 |
20 | Returns:
21 | The BIO metric.
22 | """
23 | total_metrics = 0
24 | if ct is not None:
25 | total_metrics += 1
26 | else:
27 | ct = 0
28 | if tf is not None:
29 | total_metrics += 1
30 | else:
31 | tf = 0
32 | if bc is not None:
33 | total_metrics += 1
34 | else:
35 | bc = 0
36 | if cca is not None:
37 | total_metrics += 1
38 | else:
39 | cca = 0
40 | bio_score = (ct + tf + bc + cca) / total_metrics
41 | return bio_score
42 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/biological/cca.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from ctc_metrics.metrics.biological.bc import get_ids_that_ends_with_split
4 |
5 |
6 | def is_valid_track(
7 | tracks: np.ndarray,
8 | ):
9 | """
10 | Extracts the boolean indices of valid tracks that start and end with a
11 | cell split.
12 |
13 | Args:
14 | tracks: The tracks to check.
15 |
16 | Returns:
17 | The boolean indices of valid tracks that start and end with a cell
18 | split.
19 | """
20 | ends_with_split = get_ids_that_ends_with_split(tracks)
21 | is_parent = np.isin(tracks[:, 0], ends_with_split)
22 | is_child = np.isin(tracks[:, 3], ends_with_split)
23 | valid = np.logical_and(is_parent, is_child)
24 | return valid
25 |
26 |
27 | def cca(
28 | comp_tracks: np.ndarray,
29 | ref_tracks: np.ndarray
30 | ):
31 | """
32 | Computes the cell cycle accuracy. As described in the paper,
33 | "An objective comparison of cell-tracking algorithms."
34 | - Vladimir Ulman et al., Nature methods 2017
35 |
36 | Args:
37 | comp_tracks: The result tracks. A (n,4) numpy ndarray with columns:
38 | - label
39 | - birth frame
40 | - end frame
41 | - parent
42 | ref_tracks: The ground truth tracks. A (n,4) numpy ndarray with columns:
43 | - label
44 | - birth frame
45 | - end frame
46 | - parent
47 |
48 | Returns:
49 | The cell cycle accuracy metric.
50 | """
51 |
52 | # Extract relevant tracks with parents and children in reference
53 | valid_ref = is_valid_track(ref_tracks)
54 | if np.sum(valid_ref) == 0:
55 | return None
56 | track_lengths_ref = ref_tracks[valid_ref, 2] - ref_tracks[valid_ref, 1]
57 | # Extract relevant tracks with parents and children in computed result
58 | valid_comp = is_valid_track(comp_tracks)
59 | if np.sum(valid_comp) == 0:
60 | return 0
61 | track_lengths_comp = comp_tracks[valid_comp, 2] - comp_tracks[valid_comp, 1]
62 | # Calculate CCA
63 | max_track_length = np.max(
64 | [np.max(track_lengths_ref), np.max(track_lengths_comp)])
65 | hist_ref = np.zeros(np.max(max_track_length) + 1)
66 | for i in track_lengths_ref:
67 | hist_ref[i] += 1
68 | hist_ref = hist_ref / np.sum(hist_ref)
69 | cum_hist_ref = np.cumsum(hist_ref)
70 | hist_comp = np.zeros(np.max(max_track_length) + 1)
71 | for i in track_lengths_comp:
72 | hist_comp[i] += 1
73 | hist_comp = hist_comp / np.sum(hist_comp)
74 | cum_hist_comp = np.cumsum(hist_comp)
75 | return float(1 - np.max(np.abs(cum_hist_ref - cum_hist_comp)))
76 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/biological/ct.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from ctc_metrics.utils.representations import assign_comp_to_ref
4 |
5 |
6 | def ct(
7 | comp_tracks: np.ndarray,
8 | ref_tracks: np.ndarray,
9 | labels_ref: list,
10 | mapped_ref: list,
11 | mapped_comp: list
12 | ):
13 | """
14 | Computes the complete tracks metric. As described in the paper,
15 | "An objective comparison of cell-tracking algorithms."
16 | - Vladimir Ulman et al., Nature methods 2017
17 |
18 | Args:
19 | comp_tracks: The result tracks. A (n,4) numpy ndarray with columns:
20 | - label
21 | - birth frame
22 | - end frame
23 | - parent
24 | ref_tracks: The ground truth tracks. A (n,4) numpy ndarray with columns:
25 | - label
26 | - birth frame
27 | - end frame
28 | - parent
29 | labels_ref: The labels of the ground truth masks. A list of length
30 | equal to the number of frames. Each element is a list with the
31 | labels of the ground truth masks in the respective frame.
32 | mapped_ref: The matched labels of the ground truth masks. A list of
33 | length equal to the number of frames. Each element is a list with
34 | the matched labels of the ground truth masks in the respective
35 | frame. The elements are in the same order as the corresponding
36 | elements in mapped_comp.
37 | mapped_comp: The matched labels of the result masks. A list of length
38 | equal to the number of frames. Each element is a list with the
39 | matched labels of the result masks in the respective frame. The
40 | elements are in the same order as the corresponding elements in
41 | mapped_ref.
42 |
43 | Returns:
44 | The complete tracks metric.
45 | """
46 | track_assignments = assign_comp_to_ref(labels_ref, mapped_ref, mapped_comp)
47 | correct_tracks = 0
48 | for k, v in track_assignments.items():
49 | start_ref = ref_tracks[ref_tracks[:, 0] == k][0, 1]
50 | end_ref = ref_tracks[ref_tracks[:, 0] == k][0, 2]
51 | assigned_labels = np.unique(v[~np.isnan(v)])
52 | if len(assigned_labels) > 1:
53 | continue
54 | if assigned_labels[0] == 0:
55 | continue
56 | assignee = assigned_labels[0]
57 | start_comp = comp_tracks[comp_tracks[:, 0] == assignee][0, 1]
58 | end_comp = comp_tracks[comp_tracks[:, 0] == assignee][0, 2]
59 | if start_ref == start_comp and end_ref == end_comp:
60 | correct_tracks += 1
61 | T_rc = correct_tracks
62 | T_r = len(ref_tracks)
63 | T_c = len(comp_tracks)
64 | return float(2 * T_rc / (T_c + T_r))
65 |
66 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/biological/op_clb.py:
--------------------------------------------------------------------------------
1 |
2 | def op_clb(
3 | lnk: float,
4 | bio: float,
5 | ):
6 | """
7 | Computes the OP_CLB metric. As described by
8 | [celltrackingchallenge](http://celltrackingchallenge.net/).
9 |
10 | Args:
11 | lnk: The linking metric.
12 | bio: The biological metric.
13 |
14 | Returns:
15 | The OP_CLB metric.
16 | """
17 |
18 | op = 0.5 * lnk + 0.5 * bio
19 | return op
20 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/biological/tf.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from ctc_metrics.utils.representations import assign_comp_to_ref
4 |
5 |
6 | def calculate_fractions_fo_computed_tracks(
7 | ref_tracks: np.ndarray,
8 | labels_ref: list,
9 | mapped_ref: list,
10 | mapped_comp: list,
11 | ):
12 | """
13 | Returns a dictionary with the fractions of overlap for each computed track.
14 | Each entry in the dictionary is a dictionary with the computed track id as
15 | key and a new dictionary as value. In the sub-dictionary, the reference
16 | track id is the key and the fraction of overlap is the value.
17 |
18 | Args:
19 | ref_tracks: The ground truth tracks.
20 | labels_ref: The labels of the ground truth masks.
21 | mapped_ref: The matched labels of the ground truth masks.
22 | mapped_comp: The matched labels of the result masks.
23 |
24 | Returns:
25 | A dictionary with the fractions of overlap for each computed track.
26 | """
27 | track_assignments = assign_comp_to_ref(labels_ref, mapped_ref, mapped_comp)
28 | comp_fractions = {}
29 | for k, v in track_assignments.items():
30 | start_ref = ref_tracks[ref_tracks[:, 0] == k][0, 1]
31 | end_ref = ref_tracks[ref_tracks[:, 0] == k][0, 2]
32 | length = int(end_ref - start_ref + 1)
33 | array = v[start_ref:end_ref + 1]
34 | assigned_labels = np.unique(array)
35 | if len(assigned_labels) == 1 and assigned_labels[0] == 0:
36 | continue
37 | assignments = 0
38 | last_i = 0
39 | for i in array:
40 | if last_i != i or i == 0:
41 | assignments = 0
42 | if i > 0:
43 | assignments += 1
44 | if i not in comp_fractions:
45 | comp_fractions[i] = {}
46 | comp_fractions[i][k] = max(
47 | comp_fractions[i].get(k, 0), assignments / length)
48 | last_i = i
49 | return comp_fractions
50 |
51 |
52 | def tf(
53 | ref_tracks: np.ndarray,
54 | labels_ref: list,
55 | mapped_ref: list,
56 | mapped_comp: list
57 | ):
58 | """
59 | Computes the track fractions metric. As described in the paper,
60 | "An objective comparison of cell-tracking algorithms."
61 | - Vladimir Ulman et al., Nature methods 2017
62 |
63 | Args:
64 | ref_tracks: The ground truth tracks. A (n,4) numpy ndarray with columns:
65 | - label
66 | - birth frame
67 | - end frame
68 | - parent
69 | labels_ref: The labels of the ground truth masks. A list of length
70 | mapped_ref: The matched labels of the ground truth masks. A list of
71 | length equal to the number of frames. Each element is a list with
72 | the matched labels of the ground truth masks in the respective
73 | frame. The elements are in the same order as the corresponding
74 | elements in mapped_comp.
75 | mapped_comp: The matched labels of the result masks. A list of length
76 | equal to the number of frames. Each element is a list with the
77 | matched labels of the result masks in the respective frame. The
78 | elements are in the same order as the corresponding elements in
79 | mapped_ref.
80 |
81 | Returns:
82 | The track fractions metric.
83 | """
84 | comp_fractions = calculate_fractions_fo_computed_tracks(
85 | ref_tracks, labels_ref, mapped_ref, mapped_comp)
86 | # Calculate the track fractions with respect to the reference tracks
87 | tfs = {k: 0 for k in ref_tracks[:, 0]}
88 | for _, v in sorted(comp_fractions.items()):
89 | for k2, v2 in sorted(v.items()):
90 | if tfs[k2] == 1:
91 | continue
92 | if v2 > tfs[k2]:
93 | tfs[k2] = v2
94 | # ###
95 | # The next lines do not make sense, because it causes instabilities.
96 | # Exactly same results with different order of labels can lead to
97 | # different metrics. They are kept, because they are in the original
98 | # codebase for the CTC. Remove the break to solve the issue.
99 | # Note: Does not make real differences in most cases.
100 | # ###
101 | if v2 == 1:
102 | break
103 | # Filter out undetected tracks. They should not be counted.
104 | tfs = [x for k, x in tfs.items() if x > 0]
105 | if len(tfs) == 0:
106 | return 0
107 | return np.mean(np.asarray(tfs))
108 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/clearmot/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/ctc_metrics/metrics/clearmot/__init__.py
--------------------------------------------------------------------------------
/ctc_metrics/metrics/clearmot/mota.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def mota(
5 | labels_ref: list,
6 | labels_comp: list,
7 | mapped_ref: list,
8 | mapped_comp: list
9 | ):
10 | """
11 | Computes the MOTA metric. As described in the paper,
12 | "Evaluating Multiple Object Tracking Performance:
13 | The CLEAR MOT Metrics."
14 | - Keni Bernardin and Rainer Stiefelhagen, EURASIP 2008
15 |
16 | Args:
17 | labels_comp: The labels of the computed masks. A list of length equal
18 | to the number of frames. Each element is a list with the labels of
19 | the computed masks in the respective frame.
20 | labels_ref: The labels of the ground truth masks. A list of length
21 | equal to the number of frames. Each element is a list with the
22 | labels of the ground truth masks in the respective frame.
23 | mapped_ref: The matched labels of the ground truth masks. A list of
24 | length equal to the number of frames. Each element is a list with
25 | the matched labels of the ground truth masks in the respective
26 | frame. The elements are in the same order as the corresponding
27 | elements in mapped_comp.
28 | mapped_comp: The matched labels of the result masks. A list of length
29 | equal to the number of frames. Each element is a list with the
30 | matched labels of the result masks in the respective frame. The
31 | elements are in the same order as the corresponding elements in
32 | mapped_ref.
33 |
34 | Returns:
35 | The MOTA tracks metric.
36 | """
37 | tp, fp, fn, idsw, multi_assignments = 0, 0, 0, 0, 0
38 |
39 | max_label_gt = int(np.max(np.concatenate(labels_ref)))
40 | matches = np.zeros(max_label_gt + 1)
41 | for ref, comp, m_ref, m_comp in zip(
42 | labels_ref, labels_comp, mapped_ref, mapped_comp):
43 | # Calculate metrics
44 | _, counts = np.unique(m_comp, return_counts=True)
45 | tp += len(m_ref)
46 | fn += len(ref) - len(m_ref)
47 | fp += len(comp) - len(m_comp) + np.sum(counts[counts > 1] - 1)
48 | multi_assignments += np.sum(counts[counts > 1] - 1)
49 | idsw += np.sum((matches[m_ref] != m_comp) & (matches[m_ref] != 0))
50 | # Update the match cache
51 | matches[m_ref] = m_comp
52 |
53 | mota_score = 1 - (fn + fp + idsw + multi_assignments) / (tp + fn)
54 | precision = tp / (tp + fp)
55 | recall = tp / (tp + fn)
56 |
57 | res = {
58 | "MOTA": mota_score,
59 | "TP": tp,
60 | "FP": fp,
61 | "FN": fn,
62 | "IDSW": idsw,
63 | "MULTI-ASSIGNMENTS": multi_assignments,
64 | "Precision": precision,
65 | "Recall": recall
66 | }
67 | return res
68 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/hota/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/ctc_metrics/metrics/hota/__init__.py
--------------------------------------------------------------------------------
/ctc_metrics/metrics/hota/chota.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from ctc_metrics.utils.representations import track_confusion_matrix
4 |
5 |
6 | def cluster_clique(
7 | tracks: np.ndarray
8 | ):
9 | """
10 | Clusters the tracks into cliques. A clique is a set of tracks that are
11 | connected by parent-child relationships.
12 |
13 | Args:
14 | tracks: The result tracks.
15 |
16 | Returns:
17 | The cliques. A dictionary with the track id as key and the clique as
18 | value.
19 | """
20 | # Cluster ref cliques
21 | track_id = np.unique(tracks[:, 0]).astype(int)
22 | track_parents = np.asarray(
23 | [tracks[tracks[:, 0] == x][0, 3] for x in track_id]).astype(int)
24 | cliques = {x: [] for x in track_id}
25 |
26 | # forward
27 | for track in track_id:
28 | clique = np.asarray([track])
29 | while True:
30 | is_child = np.isin(track_parents, clique)
31 | is_not_in_clique = np.isin(track_id, clique, invert=True)
32 | new_children = track_id[is_child & is_not_in_clique]
33 | if len(new_children) == 0:
34 | break
35 | clique = np.concatenate((clique, new_children))
36 | cliques[track] = clique
37 |
38 | # backward
39 | for track in track_id:
40 | clique = np.zeros(0)
41 | for ancestor in track_id:
42 | if track in cliques[ancestor]:
43 | clique = np.concatenate((clique, [ancestor]))
44 | cliques[track] = np.concatenate((cliques[track], clique))
45 |
46 | for track in track_id:
47 | cliques[track] = np.unique(cliques[track]).astype(int)
48 |
49 | return cliques
50 |
51 |
52 | def chota(
53 | ref_tracks: np.ndarray,
54 | comp_tracks: np.ndarray,
55 | labels_ref: list,
56 | labels_comp: list,
57 | mapped_ref: list,
58 | mapped_comp: list
59 | ):
60 | """
61 |
62 |
63 | Args:
64 | ref_tracks: The ground truth tracks. A (n,4) numpy ndarray with columns:
65 | - label
66 | - birth frame
67 | - end frame
68 | - parent
69 | comp_tracks: The computed tracks. A (n,4) numpy ndarray with columns:
70 | - label
71 | - birth frame
72 | - end frame
73 | - parent
74 | labels_comp: The labels of the computed masks. A list of length equal
75 | to the number of frames. Each element is a list with the labels of
76 | the computed masks in the respective frame.
77 | labels_ref: The labels of the ground truth masks. A list of length
78 | equal to the number of frames. Each element is a list with the
79 | labels of the ground truth masks in the respective frame.
80 | mapped_ref: The matched labels of the ground truth masks. A list of
81 | length equal to the number of frames. Each element is a list with
82 | the matched labels of the ground truth masks in the respective
83 | frame. The elements are in the same order as the corresponding
84 | elements in mapped_comp.
85 | mapped_comp: The matched labels of the result masks. A list of length
86 | equal to the number of frames. Each element is a list with the
87 | matched labels of the result masks in the respective frame. The
88 | elements are in the same order as the corresponding elements in
89 | mapped_ref.
90 |
91 | Returns:
92 | The CHOTA tracks metric.
93 | """
94 | # Gather association data
95 | cliques_ref = cluster_clique(ref_tracks)
96 | cliques_comp = cluster_clique(comp_tracks)
97 | track_intersection = track_confusion_matrix(
98 | labels_ref, labels_comp, mapped_ref, mapped_comp)
99 | # Calculate Association scores
100 | chota_score = 0
101 | for i in range(1, int(np.max(np.concatenate(labels_ref))) + 1):
102 | for j in range(1, int(np.max(np.concatenate(labels_comp))) + 1):
103 | if track_intersection[i, j] > 0:
104 | cliques_ref_i = cliques_ref[i]
105 | cliques_comp_j = cliques_comp[j]
106 | roi1 = np.zeros_like(track_intersection, dtype=bool)
107 | roi2 = np.zeros_like(track_intersection, dtype=bool)
108 | roi1[cliques_ref_i, :] = True
109 | roi2[:, cliques_comp_j] = True
110 | # Calculate the HOTA score
111 | tpa = np.sum(track_intersection[roi1 & roi2])
112 | fna = np.sum(track_intersection[cliques_ref_i, :]) - tpa
113 | fpa = np.sum(track_intersection[:, cliques_comp_j]) - tpa
114 | # Reweight and add to hota score
115 | num_pixels = track_intersection[i, j]
116 | a_corr = tpa / (tpa + fna + fpa)
117 | chota_score += num_pixels * a_corr
118 | # Calculate the finalCHOTA score
119 | tp = track_intersection[1:, 1:].sum()
120 | fp = track_intersection[0, 1:].sum()
121 | fn = track_intersection[1:, 0].sum()
122 | chota_score = np.sqrt(chota_score / (tp + fp + fn))
123 | return {"CHOTA": chota_score}
124 |
125 |
126 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/hota/hota.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from ctc_metrics.utils.representations import track_confusion_matrix
4 |
5 |
6 | def hota(
7 | labels_ref: list,
8 | labels_comp: list,
9 | mapped_ref: list,
10 | mapped_comp: list,
11 | ):
12 | """
13 | Computes the HOTA metric. As described in the paper,
14 | "HOTA: A Higher Order Metric for Evaluating Multi-Object Tracking"
15 | - Luiten et al., {International Journal of Computer Vision 2020
16 |
17 | Args:
18 | labels_comp: The labels of the computed masks. A list of length equal
19 | to the number of frames. Each element is a list with the labels of
20 | the computed masks in the respective frame.
21 | labels_ref: The labels of the ground truth masks. A list of length
22 | equal to the number of frames. Each element is a list with the
23 | labels of the ground truth masks in the respective frame.
24 | mapped_ref: The matched labels of the ground truth masks. A list of
25 | length equal to the number of frames. Each element is a list with
26 | the matched labels of the ground truth masks in the respective
27 | frame. The elements are in the same order as the corresponding
28 | elements in mapped_comp.
29 | mapped_comp: The matched labels of the result masks. A list of length
30 | equal to the number of frames. Each element is a list with the
31 | matched labels of the result masks in the respective frame. The
32 | elements are in the same order as the corresponding elements in
33 | mapped_ref.
34 |
35 | Returns:
36 | The HOTA tracks metric.
37 | """
38 | # Gather association data
39 | max_label_ref = int(np.max(np.concatenate(labels_ref)))
40 | max_label_comp = int(np.max(np.concatenate(labels_comp)))
41 | track_intersection = track_confusion_matrix(
42 | labels_ref, labels_comp, mapped_ref, mapped_comp)
43 | # Calculate Association scores
44 | hota_score = 0
45 | for i in range(1, max_label_ref + 1):
46 | for j in range(1, max_label_comp + 1):
47 | if track_intersection[i, j] > 0:
48 | # Calculate the HOTA score
49 | tpa = track_intersection[i, j]
50 | fna = np.sum(track_intersection[i, :]) - tpa
51 | fpa = np.sum(track_intersection[:, j]) - tpa
52 | a_corr = tpa / (tpa + fna + fpa)
53 | hota_score += tpa * a_corr
54 | # Calculate the HOTA score
55 | tp = track_intersection[1:, 1:].sum()
56 | fp = track_intersection[0, 1:].sum()
57 | fn = track_intersection[1:, 0].sum()
58 | hota_score = np.sqrt(hota_score / (tp + fp + fn))
59 |
60 | res = {
61 | "HOTA": hota_score,
62 | }
63 | return res
64 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/identity_metrics/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/ctc_metrics/metrics/identity_metrics/__init__.py
--------------------------------------------------------------------------------
/ctc_metrics/metrics/identity_metrics/idf1.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.optimize import linear_sum_assignment
3 |
4 | from ctc_metrics.utils.representations import track_confusion_matrix
5 |
6 |
7 | def get_idf1_stats(
8 | track_intersection: np.ndarray,
9 | costs: np.ndarray,
10 | max_label_ref: int,
11 | max_label_comp: int
12 | ):
13 | """
14 | Computes the IDF1 stats.
15 |
16 | Args:
17 | track_intersection: The track intersection matrix.
18 | costs: The assignment costs matrix.
19 | max_label_ref: The maximum label of the reference masks.
20 | max_label_comp: The maximum label of the computed masks.
21 |
22 | Returns:
23 | The IDF1 stats.
24 | """
25 | # Get optimal assignment
26 | row_ind, col_ind = linear_sum_assignment(costs)
27 | # Accumulate trackwise number of matches
28 | all_gt = np.sum(track_intersection[1:], axis=1)
29 | all_comp = np.sum(track_intersection[:, 1:], axis=0)
30 | # Compute IDFN, IDFP
31 | IDFN = all_gt.sum()
32 | IDFP = all_comp.sum()
33 | for i, j in zip(row_ind, col_ind):
34 | if i < max_label_ref and j < max_label_comp:
35 | IDFN -= track_intersection[i + 1, j + 1]
36 | IDFP -= track_intersection[i + 1, j + 1]
37 |
38 | assert (track_intersection[1:, :].sum() - IDFN ==
39 | track_intersection[:, 1:].sum() - IDFP)
40 | # Compute IDTP
41 | IDTP = track_intersection[1:, :].sum() - IDFN
42 | # Compute IDF1, IDP, IDR
43 | IDP = IDTP / (IDTP + IDFP)
44 | IDR = IDTP / (IDTP + IDFN)
45 | IDF1 = 2 * IDTP / (2 * IDTP + IDFP + IDFN)
46 |
47 | return {
48 | "IDF1": IDF1,
49 | "IDP": IDP,
50 | "IDR": IDR,
51 | "IDTP": IDTP,
52 | "IDFP": IDFP,
53 | "IDFN": IDFN
54 | }
55 |
56 | def idf1(
57 | labels_ref: list,
58 | labels_comp: list,
59 | mapped_ref: list,
60 | mapped_comp: list
61 | ):
62 | """
63 | Computes the IDF1 metric. As described in the paper,
64 | "Performance Measures and a Data Set for
65 | Multi-Target, Multi-Camera Tracking"
66 | - Ristani et al., ECCV 2016
67 |
68 | Args:
69 | labels_comp: The labels of the computed masks. A list of length equal
70 | to the number of frames. Each element is a list with the labels of
71 | the computed masks in the respective frame.
72 | labels_ref: The labels of the ground truth masks. A list of length
73 | equal to the number of frames. Each element is a list with the
74 | labels of the ground truth masks in the respective frame.
75 | mapped_ref: The matched labels of the ground truth masks. A list of
76 | length equal to the number of frames. Each element is a list with
77 | the matched labels of the ground truth masks in the respective
78 | frame. The elements are in the same order as the corresponding
79 | elements in mapped_comp.
80 | mapped_comp: The matched labels of the result masks. A list of length
81 | equal to the number of frames. Each element is a list with the
82 | matched labels of the result masks in the respective frame. The
83 | elements are in the same order as the corresponding elements in
84 | mapped_ref.
85 |
86 | Returns:
87 | The idf1 tracks metric.
88 | """
89 | # Calculate the cop-ref track intersections
90 | max_label_ref = int(np.max(np.concatenate(labels_ref)))
91 | max_label_comp = int(np.max(np.concatenate(labels_comp)))
92 | track_intersection = track_confusion_matrix(
93 | labels_ref, labels_comp, mapped_ref, mapped_comp
94 | )
95 | # Create assignment costs matrx with default of inf costs + Line for the
96 | # dummy node (no association)
97 | total_ids = max_label_ref + max_label_comp
98 | costs = np.ones((total_ids, total_ids)) * np.inf
99 | # Fill in the costs of intersection between tracks
100 | subcosts = np.zeros_like(track_intersection[1:, 1:])
101 | subcosts[track_intersection[1:, 1:] == 0] = np.inf
102 | costs[:max_label_ref, :max_label_comp] = subcosts
103 | costs[:max_label_ref, :max_label_comp] -= 2 * track_intersection[1:, 1:]
104 | costs[:max_label_ref, :max_label_comp] += np.sum(
105 | track_intersection[1:, :], axis=1)[:, None]
106 | costs[:max_label_ref, :max_label_comp] += np.sum(
107 | track_intersection[:, 1:], axis=0)[None, :]
108 | # Set the assignment costs to the dummy nodes
109 | costs[max_label_ref:, max_label_comp:] = 0
110 | for i, c in enumerate(np.sum(track_intersection[1:, :], axis=1)):
111 | costs[i, max_label_comp + i] = c
112 | for j, c in enumerate(np.sum(track_intersection[:, 1:], axis=0)):
113 | costs[max_label_ref + j, j] = c
114 |
115 | return get_idf1_stats(
116 | track_intersection, costs, max_label_ref, max_label_comp
117 | )
118 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/others/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/ctc_metrics/metrics/others/__init__.py
--------------------------------------------------------------------------------
/ctc_metrics/metrics/others/faf.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def faf(
4 | labels_comp: list,
5 | mapped_comp: list
6 | ):
7 | """
8 | Computes average number of false alarms per frame. As described by
9 | [motchallenge](https://motchallenge.net/).
10 |
11 | Args:
12 | labels_comp: The labels of the computed masks.
13 | mapped_comp: The matched labels of the result masks.
14 |
15 | Returns:
16 | The FAF metric.
17 | """
18 |
19 | fp = 0
20 | frames = len(labels_comp)
21 |
22 | for comp, m_comp in zip(labels_comp, mapped_comp):
23 | uniques, counts = np.unique(m_comp, return_counts=True)
24 | uniques = uniques[counts == 1]
25 | fp += len(comp) - len(uniques)
26 |
27 | faf_score = fp / frames
28 |
29 | res = {
30 | "FAF": faf_score,
31 | "Frames": frames,
32 | }
33 |
34 | return res
35 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/others/mt_ml.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from ctc_metrics.utils.representations import track_confusion_matrix
4 |
5 |
6 | def mtml(
7 | labels_ref: list,
8 | labels_comp: list,
9 | mapped_ref: list,
10 | mapped_comp: list
11 | ):
12 | """
13 | Computes the mostly tracked and mostly lost metric. As described by
14 | [motchallenge](https://motchallenge.net/).
15 |
16 | Args:
17 | labels_comp: The labels of the computed masks.
18 | labels_ref: The labels of the ground truth masks.
19 | mapped_ref: The matched labels of the ground truth masks.
20 | mapped_comp: The matched labels of the result masks.
21 |
22 | Returns:
23 | The mt and ml metrics.
24 | """
25 | # Gather association data
26 | track_intersection = track_confusion_matrix(
27 | labels_ref, labels_comp, mapped_ref, mapped_comp)
28 | # Calculate the metrics
29 | total_ref = np.sum(track_intersection[1:, :], axis=1)
30 | ratio = np.max(track_intersection[1:, :], axis=1) / np.maximum(total_ref, 1)
31 | valid_ref = total_ref > 0
32 | mt = np.sum(ratio[valid_ref] >= 0.8) / np.sum(valid_ref)
33 | ml = np.sum(ratio[valid_ref] < 0.2) / np.sum(valid_ref)
34 |
35 | res = {
36 | "MT": mt,
37 | "ML": ml,
38 | }
39 | return res
40 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/technical/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/ctc_metrics/metrics/technical/__init__.py
--------------------------------------------------------------------------------
/ctc_metrics/metrics/technical/det.py:
--------------------------------------------------------------------------------
1 |
2 | def det(
3 | NS: int,
4 | FN: int,
5 | FP: int,
6 | num_vertices: int,
7 | **_ignored
8 | ):
9 | """
10 | Calculate Detection (DET) metric.
11 |
12 | According to
13 | Cell Tracking Accuracy Measurement Based on Comparison of Acyclic
14 | Oriented Graphs; Matula etal. 2015
15 |
16 | Args:
17 | NS: Split vertex operations.
18 | FN: Number of false negatives.
19 | FP: Number of false positives.
20 | num_vertices: Number of vertices in the graph.
21 | **_ignored: Ignored arguments.
22 |
23 | Returns:
24 | The Detection metric.
25 |
26 | """
27 | # Calculate AOGM_D
28 | w_ns = 5
29 | w_fn = 10
30 | w_fp = 1
31 | AOGM_D = w_ns * NS + w_fn * FN + w_fp * FP
32 | # Calculate AOGM_D0 (create graph from scratch)
33 | AOGM_D0 = w_fn * num_vertices # All false negatives
34 | # Calculate DET
35 | DET = 1 - min(AOGM_D, AOGM_D0) / AOGM_D0
36 | return float(DET)
37 |
38 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/technical/lnk.py:
--------------------------------------------------------------------------------
1 |
2 | def lnk(
3 | ED: int,
4 | EA: int,
5 | EC: int,
6 | num_edges: int,
7 | **_ignored
8 | ):
9 | """
10 | Calculate Linking (LNK) metric.
11 |
12 | According to
13 | Cell Tracking Accuracy Measurement Based on Comparison of Acyclic
14 | Oriented Graphs; Matula etal. 2015
15 |
16 | Args:
17 | ED: Number of redundant edge.
18 | EA: Number of missing edges.
19 | EC: Number of wrong edge semantics.
20 | num_edges: Number of edges in the graph.
21 | _ignored: Ignored arguments.
22 |
23 | Returns:
24 | The Linking metric.
25 | """
26 | # Calculate AOGM_A
27 | w_ed = 1
28 | w_ea = 1.5
29 | w_ec = 1
30 | AOGM_A = w_ed * ED + w_ea * EA + w_ec * EC
31 | # Calculate AOGM_0 (create graph from scratch)
32 | # i.e, all vertices and edges are false negatives
33 | AOGM_0 = w_ea * num_edges
34 | # Calculate DET
35 | LNK = 1 - min(AOGM_A, AOGM_0) / AOGM_0
36 | return float(LNK)
37 |
38 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/technical/op_csb.py:
--------------------------------------------------------------------------------
1 |
2 | def op_csb(
3 | seg: float,
4 | det: float,
5 | ):
6 | """
7 | Computes the OP_CSB metric. As described by
8 | [celltrackingchallenge](http://celltrackingchallenge.net/).
9 |
10 | Args:
11 | seg: The segmentation metric.
12 | det: The detection metric.
13 |
14 | Returns:
15 | The OP_CSB metric.
16 | """
17 |
18 | op = 0.5 * seg + 0.5 * det
19 | return op
20 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/technical/op_ctb.py:
--------------------------------------------------------------------------------
1 |
2 | def op_ctb(
3 | seg: float,
4 | tra: float,
5 | ):
6 | """
7 | Computes the OP_CTB metric. As described by
8 | [celltrackingchallenge](http://celltrackingchallenge.net/).
9 |
10 | Args:
11 | seg: The segmentation metric.
12 | tra: The tracking metric.
13 |
14 | Returns:
15 | The OP_CTB metric.
16 | """
17 |
18 | op = 0.5 * seg + 0.5 * tra
19 | return op
20 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/technical/seg.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def seg(
5 | labels_ref: list,
6 | intersection_over_unions: list,
7 | ):
8 | """
9 | Calculates the segmentation metric. The metric describes the average overlap
10 | between the reference labels and an assigned result labels.
11 |
12 | Args:
13 | labels_ref: A list of lists with the labels of the ground truth masks
14 | according to the respective frame.
15 | intersection_over_unions: A list of lists with the intersection over
16 | union values of matched reference and computed result labels.
17 |
18 | Returns:
19 | The segmentation metric, the number of true positives and the number of
20 | false negatives.
21 |
22 | """
23 | number_of_reference_labels = np.sum([len(x) for x in labels_ref])
24 | intersection_over_unions = np.concatenate(intersection_over_unions)
25 | total_intersection = np.sum(intersection_over_unions)
26 | seg_measure = total_intersection / np.maximum(number_of_reference_labels, 1)
27 | return float(seg_measure)
28 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/technical/tra.py:
--------------------------------------------------------------------------------
1 |
2 | def tra(
3 | NS: int,
4 | FN: int,
5 | FP: int,
6 | ED: int,
7 | EA: int,
8 | EC: int,
9 | num_vertices: int,
10 | num_edges: int,
11 | **_ignored
12 | ):
13 | """
14 | Calculate Tracking (TRA) metric.
15 |
16 | According to
17 | Cell Tracking Accuracy Measurement Based on Comparison of Acyclic
18 | Oriented Graphs; Matula etal. 2015
19 |
20 | Args:
21 | NS: Split vertex operations.
22 | FN: Number of false negatives.
23 | FP: Number of false positives.
24 | ED: Number of redundant edge.
25 | EA: Number of missing edges.
26 | EC: Number of wrong edge semantics.
27 | num_vertices: Number of vertices in the graph.
28 | num_edges: Number of edges in the graph.
29 | _ignored: Ignored arguments.
30 |
31 | Returns:
32 | The Tracking metric.
33 | """
34 | # Calculate AOGM
35 | w_ns = 5
36 | w_fn = 10
37 | w_fp = 1
38 | w_ed = 1
39 | w_ea = 1.5
40 | w_ec = 1
41 | AOGM = w_ns * NS + w_fn * FN + w_fp * FP + w_ed * ED + w_ea * EA + w_ec * EC
42 | # Calculate AOGM_0 (create graph from scratch)
43 | # i.e, all vertices and edges are false negatives
44 | AOGM_0 = w_fn * num_vertices + w_ea * num_edges
45 | # Calculate DET
46 | TRA = 1 - min(AOGM, AOGM_0) / AOGM_0
47 | return float(TRA), AOGM, AOGM_0
48 |
49 |
--------------------------------------------------------------------------------
/ctc_metrics/metrics/validation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/ctc_metrics/metrics/validation/__init__.py
--------------------------------------------------------------------------------
/ctc_metrics/metrics/validation/valid.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | import numpy as np
3 |
4 |
5 | def valid_parents(
6 | tracks: np.ndarray,
7 | ):
8 | """
9 | Checks if all parents are >= 0.
10 |
11 | Args:
12 | tracks: The result tracks. Numpy array (n x 4) the columns are:
13 | 0: Label
14 | 1: Birth
15 | 2: End
16 | 3: Parent
17 |
18 | Returns:
19 | 1 if all parents are >= 0, 0 otherwise.
20 |
21 | """
22 | is_valid = 1
23 | for track in tracks:
24 | if track[3] < 0:
25 | warnings.warn(f"Invalid parent: {track}", UserWarning)
26 | is_valid = 0
27 | return int(is_valid)
28 |
29 |
30 | def unique_labels(
31 | tracks: np.ndarray,
32 | ):
33 | """
34 | Checks if all labels are unique.
35 |
36 | Args:
37 | tracks: The result tracks. Numpy array (n x 4) the columns are:
38 | 0: Label
39 | 1: Birth
40 | 2: End
41 | 3: Parent
42 |
43 | Returns:
44 | 1 if all labels are unique, 0 otherwise.
45 |
46 | """
47 | is_valid = 1
48 | labels = tracks[:, 0]
49 | if len(np.unique(labels)) != len(labels):
50 | warnings.warn("Labels are not unique.", UserWarning)
51 | is_valid = 0
52 | return int(is_valid)
53 |
54 |
55 | def valid_parent_links(
56 | tracks: np.ndarray,
57 | ):
58 | """
59 | Checks if all parent links are valid.
60 |
61 | Args:
62 | tracks: The result tracks. Numpy array (n x 4) the columns are:
63 | 0: Label
64 | 1: Birth
65 | 2: End
66 | 3: Parent
67 |
68 | Returns:
69 | 1 if all parent links are valid, 0 otherwise.
70 |
71 | """
72 | is_valid = 1
73 | for track in tracks:
74 | _, birth, _, parent = track
75 | if parent != 0:
76 | parent_idx = np.argwhere(tracks[:, 0] == parent).squeeze()
77 | assert parent_idx.size == 1, parent_idx
78 | parent_track = tracks[parent_idx]
79 | _, _, parent_end, _ = parent_track
80 | if parent_end >= birth:
81 | warnings.warn(
82 | f"Parent ends after child starts: {track} {parent_track}",
83 | UserWarning)
84 | is_valid = 0
85 | return int(is_valid)
86 |
87 |
88 | def valid_ends(
89 | tracks: np.ndarray,
90 | ):
91 | """
92 | Checks if the end is not before the birth.
93 |
94 | Args:
95 | tracks: The result tracks. Numpy array (n x 4) with The columns are:
96 | 0: Label
97 | 1: Birth
98 | 2: End
99 | 3: Parent
100 |
101 | Returns:
102 | 1 if all parent links are valid, 0 otherwise.
103 |
104 | """
105 | is_valid = 1
106 | for track in tracks:
107 | i, birth, end, parent = track
108 | if end < birth:
109 | warnings.warn(
110 | f"Track ends before birth: {i} {birth} {end} {parent}",
111 | UserWarning)
112 | is_valid = 0
113 | return int(is_valid)
114 |
115 |
116 | def inspect_masks(
117 | frames: list,
118 | masks: list,
119 | labels_in_frames: list,
120 | ):
121 | """
122 | Inspects the masks for invalid labels.
123 |
124 | Args:
125 | frames: The frames to inspect.
126 | masks: The mask files to inspect.
127 | labels_in_frames: The present labels corresponding to the file in
128 | "masks".
129 |
130 | Returns:
131 | 1 if all labels are valid, 0 otherwise.
132 |
133 | """
134 | is_valid = 1
135 | for labels_in_frame, file, frame in zip(labels_in_frames, masks, frames):
136 | for label in labels_in_frame:
137 | if label != 0:
138 | if label not in frame:
139 | warnings.warn(
140 | f"Label {label} in mask but not in res_track {file}.",
141 | UserWarning)
142 | is_valid = 0
143 | for label in frame:
144 | if label not in labels_in_frame:
145 | warnings.warn(
146 | f"Label {label} in res_track but not in mask {file}.",
147 | UserWarning)
148 | is_valid = 0
149 | return int(is_valid)
150 |
151 |
152 | def no_empty_frames(
153 | frames: list,
154 | ):
155 | """
156 | Checks if there are empty frames.
157 |
158 | Args:
159 | frames: The frames to inspect.
160 |
161 | Returns:
162 | 1 if there are no empty frames, 0 otherwise.
163 |
164 | """
165 | is_valid = 1
166 | for i, f in enumerate(frames):
167 | if len(f) == 0:
168 | warnings.warn(f"Empty frame {i}.", UserWarning)
169 | is_valid = 0
170 | return int(is_valid)
171 |
172 |
173 | def no_empty_tracking_result(
174 | tracks: np.ndarray
175 | ):
176 | """
177 | Checks if there is at least one detection in th results.
178 |
179 | Args:
180 | tracks: The tracks to inspect
181 |
182 | Returns:
183 | 1 if there are detections, 0 otherwise.
184 | """
185 | is_valid = 1
186 | if len(tracks) == 0:
187 | warnings.warn("No tracks in result.", UserWarning)
188 | is_valid = 0
189 | return is_valid
190 |
191 |
192 | def valid(
193 | masks: list,
194 | tracks: np.ndarray,
195 | labels_in_frames: list,
196 | ):
197 | """
198 | Checks if the cell tracking result is valid. The result is valid if...
199 | - ...all parents are >= 0
200 | - ...all labels are unique
201 | - ...parents endet before children are born
202 | (- ...all labels are used)
203 | - ...all labels are in the frames they are used to be
204 | - ...frames are not empty
205 |
206 | Args:
207 | masks: The masks of the result.
208 | tracks: The result tracks.
209 | labels_in_frames: The present labels corresponding to the file in
210 | "masks".
211 |
212 | Returns:
213 | 1 if the result is valid, 0 otherwise.
214 |
215 | """
216 | is_valid = 1
217 | # If tracks is empty, the result is invalid
218 | is_valid = no_empty_tracking_result(tracks)
219 | # Get the labels in each frame
220 | num_frames = max(tracks[:, 2].max() + 1, len(masks))
221 | frames = [[] for _ in range(num_frames)]
222 | for track in tracks:
223 | label, birth, end, _ = track
224 | for frame in range(birth, end + 1):
225 | frames[frame].append(label)
226 | # Check if all parents are >= 0
227 | is_valid *= valid_parents(tracks)
228 | # Check if all labels are unique
229 | is_valid *= unique_labels(tracks)
230 | # Check if parents ends before children are born
231 | is_valid *= valid_parent_links(tracks)
232 | # Check if end is not before birth
233 | is_valid *= valid_ends(tracks)
234 | # Check if all labels are in the frames they are used to be
235 | is_valid *= inspect_masks(frames, masks, labels_in_frames)
236 | # Check if frames are empty
237 | no_empty_frames(frames) # Should this make the validation irregular?
238 | return int(is_valid)
239 |
--------------------------------------------------------------------------------
/ctc_metrics/scripts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/ctc_metrics/scripts/__init__.py
--------------------------------------------------------------------------------
/ctc_metrics/scripts/evaluate.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from os.path import join, basename
3 | from multiprocessing import Pool, cpu_count
4 | import numpy as np
5 |
6 | from ctc_metrics.metrics import (
7 | valid, det, seg, tra, ct, tf, bc, cca, mota, hota, idf1, chota, mtml, faf,
8 | op_ctb, op_csb, bio, op_clb, lnk
9 | )
10 | from ctc_metrics.metrics import ALL_METRICS
11 | from ctc_metrics.utils.handle_results import print_results, store_results
12 | from ctc_metrics.utils.filesystem import parse_directories, read_tracking_file,\
13 | parse_masks
14 | from ctc_metrics.utils.representations import match as match_tracks, \
15 | count_acyclic_graph_correction_operations, merge_tracks
16 |
17 |
18 | def match_computed_to_reference_masks(
19 | ref_masks: list,
20 | comp_masks: list,
21 | threads: int = 0,
22 | ):
23 | """
24 | Matches computed masks to reference masks.
25 |
26 | Args:
27 | ref_masks: The reference masks. A list of paths to the reference masks.
28 | comp_masks: The computed masks. A list of paths to the computed masks.
29 | threads: The number of threads to use. If 0, the number of threads
30 | is set to the number of available CPUs.
31 |
32 | Returns:
33 | The results stored in a dictionary. The dictionary contains the
34 | following keys:
35 | - labels_ref: The reference labels. A list of lists containing
36 | the labels of the reference masks.
37 | - labels_comp: The computed labels. A list of lists containing
38 | the labels of the computed masks.
39 | - mapped_ref: The mapped reference labels. A list of lists
40 | containing the mapped labels of the reference masks.
41 | - mapped_comp: The mapped computed labels. A list of lists
42 | containing the mapped labels of the computed masks.
43 | - ious: The intersection over union values. A list of lists
44 | containing the intersection over union values between mapped
45 | reference and computed masks.
46 | """
47 | labels_ref, labels_comp, mapped_ref, mapped_comp, ious = [], [], [], [], []
48 | if threads != 1:
49 | if threads == 0:
50 | threads = cpu_count()
51 | with Pool(threads) as p:
52 | matches = p.starmap(match_tracks, zip(ref_masks, comp_masks))
53 | else:
54 | matches = [match_tracks(*x) for x in zip(ref_masks, comp_masks)]
55 | for match in matches:
56 | labels_ref.append(match[0])
57 | labels_comp.append(match[1])
58 | mapped_ref.append(match[2])
59 | mapped_comp.append(match[3])
60 | ious.append(match[4])
61 | return {
62 | "labels_ref": labels_ref,
63 | "labels_comp": labels_comp,
64 | "mapped_ref": mapped_ref,
65 | "mapped_comp": mapped_comp,
66 | "ious": ious
67 | }
68 |
69 |
70 | def load_data(
71 | res: str,
72 | gt: str,
73 | trajectory_data: True,
74 | segmentation_data: True,
75 | threads: int = 0,
76 | ):
77 | """
78 | Load data that is necessary to calculate metrics from the given directories.
79 |
80 | Args:
81 | res: The path to the results.
82 | gt: The path to the ground truth.
83 | trajectory_data: A flag if trajectory data is available.
84 | segmentation_data: A flag if segmentation data is available.
85 | threads: The number of threads to use. If 0, the number of threads
86 | is set to the number of available CPUs.
87 |
88 | Returns:
89 | The computed tracks, the reference tracks, the trajectory data, the
90 | segmentation data, the computed masks and a flag if the results are
91 | valid.
92 |
93 | """
94 | # Read tracking files and parse mask files
95 | comp_tracks = read_tracking_file(join(res, "res_track.txt"))
96 | ref_tracks = read_tracking_file(join(gt, "TRA", "man_track.txt"))
97 | comp_masks = parse_masks(res)
98 | ref_tra_masks = parse_masks(join(gt, "TRA"))
99 | assert len(ref_tra_masks) > 0, f"{gt}: Ground truth masks is 0!)"
100 | assert len(ref_tra_masks) == len(comp_masks), (
101 | f"{res}: Number of result masks ({len(comp_masks)}) unequal to "
102 | f"the number of ground truth masks ({len(ref_tra_masks)})!)")
103 | # Match golden truth tracking masks to result masks
104 | traj = {}
105 | is_valid = 1
106 | if trajectory_data:
107 | traj = match_computed_to_reference_masks(
108 | ref_tra_masks, comp_masks, threads=threads)
109 | is_valid = valid(comp_masks, comp_tracks, traj["labels_comp"])
110 | # Match golden truth segmentation masks to result masks
111 | segm = {}
112 | if segmentation_data:
113 | ref_seg_masks = parse_masks(join(gt, "SEG"))
114 | _res_masks = [
115 | comp_masks[int(basename(x).replace(
116 | "man_seg", "").replace(".tif", "").replace("_", ""))]
117 | for x in ref_seg_masks
118 | ]
119 | segm = match_computed_to_reference_masks(
120 | ref_seg_masks, _res_masks, threads=threads)
121 | return comp_tracks, ref_tracks, traj, segm, comp_masks, is_valid
122 |
123 |
124 | def calculate_metrics(
125 | comp_tracks: np.ndarray,
126 | ref_tracks: np.ndarray,
127 | traj: dict,
128 | segm: dict,
129 | metrics: list = None,
130 | is_valid: bool = None,
131 | ): # pylint: disable=too-complex
132 | """
133 | Calculate metrics for given data.
134 |
135 | Args:
136 | comp_tracks: The computed tracks.A (n,4) numpy ndarray with columns:
137 | - label
138 | - birth frame
139 | - end frame
140 | - parent
141 | ref_tracks: The reference tracks. A (n,4) numpy ndarray with columns:
142 | - label
143 | - birth frame
144 | - end frame
145 | - parent
146 | traj: The frame-wise trajectory match data.
147 | segm: The frame-wise segmentation match data.
148 | metrics: The metrics to evaluate.
149 | is_valid: A Flag if the results are valid
150 |
151 | Returns:
152 | The results stored in a dictionary.
153 | """
154 | # Check if results are valid
155 | results = {x: None for x in metrics}
156 | if not is_valid:
157 | print("Invalid results!")
158 | results["Valid"] = 0
159 | return results
160 |
161 | # Create merge tracks
162 | if traj:
163 | new_tracks, new_labels, new_mapped = merge_tracks(
164 | ref_tracks, traj["labels_ref"], traj["mapped_ref"])
165 | traj["ref_tracks_merged"] = new_tracks
166 | traj["labels_ref_merged"] = new_labels
167 | traj["mapped_ref_merged"] = new_mapped
168 | new_tracks, new_labels, new_mapped = merge_tracks(
169 | comp_tracks, traj["labels_comp"], traj["mapped_comp"])
170 | traj["comp_tracks_merged"] = new_tracks
171 | traj["labels_comp_merged"] = new_labels
172 | traj["mapped_comp_merged"] = new_mapped
173 |
174 | # Prepare intermediate results
175 | graph_operations = {}
176 | if "DET" in metrics or "TRA" in metrics:
177 | graph_operations = \
178 | count_acyclic_graph_correction_operations(
179 | ref_tracks, comp_tracks,
180 | traj["labels_ref"], traj["labels_comp"],
181 | traj["mapped_ref"], traj["mapped_comp"]
182 | )
183 |
184 | # Calculate metrics
185 | if "Valid" in metrics:
186 | results["Valid"] = is_valid
187 |
188 | if "CHOTA" in metrics:
189 | results.update(chota(
190 | traj["ref_tracks_merged"], traj["comp_tracks_merged"],
191 | traj["labels_ref_merged"], traj["labels_comp_merged"],
192 | traj["mapped_ref_merged"], traj["mapped_comp_merged"]))
193 |
194 | if "DET" in metrics:
195 | results["DET"] = det(**graph_operations)
196 |
197 | if "SEG" in metrics:
198 | results["SEG"] = seg(segm["labels_ref"], segm["ious"])
199 |
200 | if "TRA" in metrics:
201 | _tra, _aogm, _aogm0 = tra(**graph_operations)
202 | results["TRA"] = _tra
203 | results["AOGM"] = _aogm
204 | results["AOGM_0"] = _aogm0
205 | for key in ("NS", "FN", "FP", "ED", "EA", "EC"):
206 | results[f"AOGM_{key}"] = graph_operations[key]
207 |
208 | if "LNK" in metrics:
209 | results["LNK"] = lnk(**graph_operations)
210 |
211 | if "DET" in metrics and "SEG" in metrics:
212 | results["OP_CSB"] = op_csb(results["SEG"], results["DET"])
213 |
214 | if "SEG" in metrics and "TRA" in metrics:
215 | results["OP_CTB"] = op_ctb(results["SEG"], results["TRA"])
216 |
217 | if "CT" in metrics:
218 | results["CT"] = ct(
219 | comp_tracks, ref_tracks,
220 | traj["labels_ref"], traj["mapped_ref"], traj["mapped_comp"])
221 |
222 | if "TF" in metrics:
223 | results["TF"] = tf(
224 | ref_tracks,
225 | traj["labels_ref"], traj["mapped_ref"], traj["mapped_comp"])
226 |
227 | if "BC" in metrics:
228 | for i in range(4):
229 | results[f"BC({i})"] = bc(
230 | comp_tracks, ref_tracks,
231 | traj["mapped_ref"], traj["mapped_comp"],
232 | i=i)
233 |
234 | if "CCA" in metrics:
235 | results["CCA"] = cca(comp_tracks, ref_tracks)
236 |
237 | if "CT" in metrics and "BC" in metrics and \
238 | "CCA" in metrics and "TF" in metrics:
239 | for i in range(4):
240 | results[f"BIO({i})"] = bio(
241 | results["CT"], results["TF"],
242 | results[f"BC({i})"], results["CCA"])
243 |
244 | if "BIO" in results and "LNK" in results:
245 | for i in range(4):
246 | results[f"OP_CLB({i})"] = op_clb(
247 | results["LNK"], results[f"BIO({i})"])
248 |
249 | if "MOTA" in metrics:
250 | results.update(mota(
251 | traj["labels_ref_merged"], traj["labels_comp_merged"],
252 | traj["mapped_ref_merged"], traj["mapped_comp_merged"]))
253 |
254 | if "HOTA" in metrics:
255 | results.update(hota(
256 | traj["labels_ref_merged"], traj["labels_comp_merged"],
257 | traj["mapped_ref_merged"], traj["mapped_comp_merged"]))
258 |
259 | if "IDF1" in metrics:
260 | results.update(idf1(
261 | traj["labels_ref_merged"], traj["labels_comp_merged"],
262 | traj["mapped_ref_merged"], traj["mapped_comp_merged"]))
263 |
264 | if "MTML" in metrics:
265 | results.update(mtml(
266 | traj["labels_ref_merged"], traj["labels_comp_merged"],
267 | traj["mapped_ref_merged"], traj["mapped_comp_merged"]))
268 |
269 | if "FAF" in metrics:
270 | results.update(faf(
271 | traj["labels_comp_merged"], traj["mapped_comp_merged"]))
272 |
273 | return results
274 |
275 |
276 | def evaluate_sequence(
277 | res: str,
278 | gt: str,
279 | metrics: list = None,
280 | threads: int = 0,
281 | ):
282 | """
283 | Evaluates a single sequence.
284 |
285 | Args:
286 | res: The path to the results.
287 | gt: The path to the ground truth.
288 | metrics: The metrics to evaluate.
289 | threads: The number of threads to use. If 0, the number of threads
290 | is set to the number of available CPUs.
291 |
292 | Returns:
293 | The results stored in a dictionary.
294 | """
295 |
296 | print("Evaluate sequence: ", res, " with ground truth: ", gt, end="")
297 | # Verify all metrics
298 | if metrics is None:
299 | metrics = ALL_METRICS
300 |
301 | trajectory_data = True
302 | segmentation_data = True
303 |
304 | if metrics in [["SEG"], ["CCA"]]:
305 | trajectory_data = False
306 |
307 | if "SEG" not in metrics:
308 | segmentation_data = False
309 |
310 | comp_tracks, ref_tracks, traj, segm, _, is_valid = load_data(
311 | res, gt, trajectory_data, segmentation_data, threads)
312 |
313 | results = calculate_metrics(
314 | comp_tracks, ref_tracks, traj, segm, metrics, is_valid)
315 |
316 | print("with results: ", results, " done!")
317 |
318 | return results
319 |
320 |
321 | def evaluate_all(
322 | res_root: str,
323 | gt_root: str,
324 | metrics: list = None,
325 | threads: int = 0
326 | ):
327 | """
328 | Evaluate all sequences in a directory
329 |
330 | Args:
331 | res_root: The root directory of the results.
332 | gt_root: The root directory of the ground truth.
333 | metrics: The metrics to evaluate.
334 | threads: The number of threads to use. If 0, the number of threads
335 | is set to the number of available CPUs.
336 |
337 | Returns:
338 | The results stored in a dictionary.
339 | """
340 | results = []
341 | ret = parse_directories(res_root, gt_root)
342 | for res, gt, name in zip(*ret):
343 | results.append([name, evaluate_sequence(res, gt, metrics, threads)])
344 | return results
345 |
346 |
347 | def parse_args():
348 | """ Parse arguments """
349 | parser = argparse.ArgumentParser(description='Evaluates CTC-Sequences.')
350 | parser.add_argument('--res', type=str, required=True)
351 | parser.add_argument('--gt', type=str, required=True)
352 | parser.add_argument('-r', '--recursive', action="store_true")
353 | parser.add_argument('--csv-file', type=str, default=None)
354 | parser.add_argument('-n', '--num-threads', type=int, default=0)
355 | parser.add_argument('--valid', action="store_true")
356 | parser.add_argument('--det', action="store_true")
357 | parser.add_argument('--seg', action="store_true")
358 | parser.add_argument('--tra', action="store_true")
359 | parser.add_argument('--ct', action="store_true")
360 | parser.add_argument('--tf', action="store_true")
361 | parser.add_argument('--bc', action="store_true")
362 | parser.add_argument('--cca', action="store_true")
363 | parser.add_argument('--mota', action="store_true")
364 | parser.add_argument('--hota', action="store_true")
365 | parser.add_argument('--idf1', action="store_true")
366 | parser.add_argument('--chota', action="store_true")
367 | parser.add_argument('--mtml', action="store_true")
368 | parser.add_argument('--faf', action="store_true")
369 | parser.add_argument('--lnk', action="store_true")
370 | args = parser.parse_args()
371 | return args
372 |
373 |
374 | def main():
375 | """
376 | Main function that is called when the script is executed.
377 | """
378 | args = parse_args()
379 | # Prepare metric selection
380 | metrics = [metric for metric, flag in (
381 | ("Valid", args.valid),
382 | ("DET", args.det),
383 | ("SEG", args.seg),
384 | ("TRA", args.tra),
385 | ("CT", args.ct),
386 | ("TF", args.tf),
387 | ("BC", args.bc),
388 | ("CCA", args.cca),
389 | ("MOTA", args.mota),
390 | ("HOTA", args.hota),
391 | ("CHOTA", args.chota),
392 | ("IDF1", args.idf1),
393 | ("MTML", args.mtml),
394 | ("FAF", args.faf),
395 | ("LNK", args.lnk),
396 | ) if flag]
397 | metrics = metrics if metrics else None
398 | # Evaluate sequence or whole directory
399 | if args.recursive:
400 | res = evaluate_all(
401 | res_root=args.res, gt_root=args.gt, metrics=metrics,
402 | threads=args.num_threads
403 | )
404 | else:
405 | res = evaluate_sequence(
406 | res=args.res, gt=args.gt, metrics=metrics, threads=args.num_threads)
407 | # Visualize and store results
408 | print_results(res)
409 | if args.csv_file is not None:
410 | store_results(args.csv_file, res)
411 |
412 |
413 | if __name__ == "__main__":
414 | main()
415 |
--------------------------------------------------------------------------------
/ctc_metrics/scripts/noise.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from os.path import join, basename, dirname, exists
3 | import copy
4 | from multiprocessing import Pool, cpu_count
5 | import numpy as np
6 | import pandas as pd
7 |
8 | from ctc_metrics.metrics import ALL_METRICS
9 | from ctc_metrics.utils.filesystem import parse_directories, read_tracking_file, \
10 | parse_masks
11 |
12 | from ctc_metrics.scripts.evaluate import match_computed_to_reference_masks, \
13 | calculate_metrics
14 |
15 |
16 | def load_data(
17 | gt: str,
18 | threads: int = 0,
19 | ):
20 | """
21 | Load the data from the ground truth and use it as computed and reference
22 | data.
23 |
24 | Args:
25 | gt: The path to the ground truth.
26 | threads: The number of threads to use for multiprocessing.
27 |
28 | Returns:
29 | The computed and reference tracks, the trajectories and the segmentation
30 | masks.
31 | """
32 | # Read tracking files and parse mask files
33 | ref_tracks = read_tracking_file(join(gt, "TRA", "man_track.txt"))
34 | comp_tracks = np.copy(ref_tracks)
35 | ref_tra_masks = parse_masks(join(gt, "TRA"))
36 | comp_masks = ref_tra_masks
37 | assert len(ref_tra_masks) > 0, f"{gt}: Ground truth masks is 0!)"
38 | # Match golden truth tracking masks to result masks
39 | traj = match_computed_to_reference_masks(
40 | ref_tra_masks, comp_masks, threads=threads)
41 | # Match golden truth segmentation masks to result masks
42 | segm = {}
43 | return comp_tracks, ref_tracks, traj, segm, comp_masks
44 |
45 |
46 | def remove_mitosis(
47 | comp_tracks: np.ndarray,
48 | num_to_remove: int,
49 | seed: int,
50 | ):
51 | """
52 | Remove mitosis events by removing the mother daughter relation.
53 |
54 | Args:
55 | comp_tracks: The computed tracks.
56 | num_to_remove: The number of mitosis events to remove.
57 | seed: The seed for the random number generator.
58 |
59 | Returns:
60 | The computed tracks with the mitosis events removed.
61 | """
62 | if num_to_remove == 0:
63 | return comp_tracks
64 | parents, counts = np.unique(
65 | comp_tracks[comp_tracks[:, 3] > 0, 3], return_counts=True)
66 | parents = parents[counts > 1]
67 | num_splits = min(num_to_remove, len(parents))
68 | np.random.seed(seed)
69 | np.random.shuffle(parents)
70 | for parent in parents[:num_splits]:
71 | comp_tracks[np.isin(comp_tracks[:, 3], parent), 3] = 0
72 | return comp_tracks
73 |
74 |
75 | def sample_fn(l_comp, max_num_candidates, seed):
76 | """
77 | Sample false negatives.
78 | """
79 | candidates = []
80 | for frame, x in enumerate(l_comp):
81 | for i, _ in enumerate(x):
82 | candidates.append((frame, i))
83 | np.random.seed(seed)
84 | np.random.shuffle(candidates)
85 | num_fn = min(max_num_candidates, len(candidates))
86 | return candidates[:num_fn]
87 |
88 |
89 | def add_false_negatives(
90 | comp_tracks: np.ndarray,
91 | traj: dict,
92 | noise_add_false_negative: int,
93 | seed: int
94 | ):
95 | """
96 | Add false negatives to the data.
97 |
98 | Args:
99 | comp_tracks: The computed tracks.
100 | traj: The trajectories.
101 | noise_add_false_negative: The number of false negatives to add.
102 | seed: The seed for the random number generator.
103 |
104 | Returns:
105 | The computed tracks and the trajectories with the false negatives added.
106 | """
107 | if noise_add_false_negative == 0:
108 | return comp_tracks, traj
109 |
110 | next_id = np.max(comp_tracks[:, 0]) + 1
111 | l_comp = traj["labels_comp"]
112 | m_comp = traj["mapped_comp"]
113 | m_ref = traj["mapped_ref"]
114 | for frame, i in sample_fn(l_comp, noise_add_false_negative, seed):
115 | if i >= len(l_comp[frame]):
116 | i = np.random.randint(len(l_comp[frame]))
117 | v = l_comp[frame][i]
118 | # Remove from current frame
119 | while v in m_comp[frame]:
120 | _i = m_comp[frame].index(v)
121 | m_comp[frame].pop(_i)
122 | m_ref[frame].pop(_i)
123 | l_comp[frame].pop(i)
124 | # Create new trajectory
125 | start, end = comp_tracks[comp_tracks[:, 0] == v, 1:3][0]
126 | if start == end:
127 | comp_tracks = comp_tracks[comp_tracks[:, 0] != v]
128 | comp_tracks[comp_tracks[:, 3] == v, 3] = 0
129 | elif frame == start:
130 | comp_tracks[comp_tracks[:, 0] == v, 1] += 1
131 | elif frame == end:
132 | comp_tracks[comp_tracks[:, 0] == v, 2] -= 1
133 | else:
134 | comp_tracks[comp_tracks[:, 0] == v, 2] = frame - 1
135 | comp_tracks[comp_tracks[:, 3] == v, 3] = next_id
136 | comp_tracks = np.concatenate(
137 | [comp_tracks, [[next_id, frame + 1, end, v]]], axis=0)
138 | for f in range(frame + 1, end + 1):
139 | _l_comp = np.asarray(l_comp[f])
140 | _m_comp = np.asarray(m_comp[f])
141 | _l_comp[_l_comp == v] = next_id
142 | _m_comp[_m_comp == v] = next_id
143 | l_comp[f] = _l_comp.tolist()
144 | m_comp[f] = _m_comp.tolist()
145 | next_id += 1
146 | return comp_tracks, traj
147 |
148 |
149 | def add_false_positives(
150 | comp_tracks: np.ndarray,
151 | traj: dict,
152 | noise_add_false_positive: int,
153 | seed: int,
154 | ):
155 | """
156 | Add false positives to the data.
157 |
158 | Args:
159 | comp_tracks: The computed tracks.
160 | traj: The trajectories.
161 | noise_add_false_positive: The number of false positives to add.
162 | seed: The seed for the random number generator.
163 |
164 | Returns:
165 | The computed tracks and the trajectories with the false positives added.
166 | """
167 | if noise_add_false_positive == 0:
168 | return comp_tracks, traj
169 |
170 | label = traj["labels_comp"]
171 | next_id = np.max(comp_tracks[:, 0]) + 1
172 | max_frame = np.max(comp_tracks[:, 2])
173 | fp_to_add = int(noise_add_false_positive)
174 | np.random.seed(seed)
175 | for _ in range(fp_to_add):
176 | frame = np.random.randint(max_frame + 1)
177 | comp_tracks = np.concatenate(
178 | [comp_tracks, [[next_id, frame, frame, 0]]], axis=0)
179 | label[frame].append(next_id)
180 | next_id += 1
181 |
182 | return comp_tracks, traj
183 |
184 |
185 | def remove_matches(
186 | comp_tracks: np.ndarray,
187 | traj: dict,
188 | noise_remove_matches: int,
189 | seed: int,
190 | ):
191 | """
192 | Remove ref-comp matches from the data.
193 |
194 | Args:
195 | comp_tracks: The computed tracks.
196 | traj: The trajectories.
197 | noise_remove_matches: The number of matches to remove.
198 | seed: The seed for the random number generator.
199 |
200 | Returns:
201 | The computed tracks and the trajectories with the matches removed.
202 | """
203 | if noise_remove_matches == 0:
204 | return comp_tracks, traj
205 |
206 | m_comp = traj["mapped_comp"]
207 | m_ref = traj["mapped_ref"]
208 | candidates = []
209 | for frame in range(1, len(m_comp)):
210 | for i in range(len(m_comp[frame])):
211 | candidates.append(frame)
212 | np.random.seed(seed)
213 | np.random.shuffle(candidates)
214 | num_unassoc = min(noise_remove_matches, len(candidates))
215 | for frame in candidates[:num_unassoc]:
216 | total_inds = len(m_comp[frame])
217 | i = np.random.randint(total_inds)
218 | m_comp[frame].pop(i)
219 | m_ref[frame].pop(i)
220 |
221 | return comp_tracks, traj
222 |
223 |
224 | def add_id_switches(
225 | comp_tracks: np.ndarray,
226 | traj: dict,
227 | noise_add_idsw: int,
228 | seed: int,
229 | ):
230 | """
231 | Add ID switches to the data.
232 |
233 | Args:
234 | comp_tracks: The computed tracks.
235 | traj: The trajectories.
236 | noise_add_idsw: The number of ID switches to add.
237 | seed: The seed for the random number generator.
238 |
239 | Returns:
240 | The computed tracks and the trajectories with the ID switches added.
241 | """
242 | if noise_add_idsw == 0:
243 | return comp_tracks, traj
244 |
245 | labels_comp = traj["labels_comp"]
246 | m_comp = traj["mapped_comp"]
247 | candidates = []
248 | for frame, x in enumerate(m_comp):
249 | if np.unique(x).shape[0] <= 1:
250 | continue
251 | for _ in range(len(np.unique(x)) - 1):
252 | candidates.append(frame)
253 | np.random.seed(seed)
254 | np.random.shuffle(candidates)
255 | num_unassoc = min(noise_add_idsw, len(candidates))
256 | for frame in candidates[:num_unassoc]:
257 | # Select two random indices
258 | comp = m_comp[frame]
259 | c1, c2 = np.random.choice(comp, 2, replace=False)
260 | end1 = int(comp_tracks[comp_tracks[:, 0] == c1, 2].squeeze())
261 | end2 = int(comp_tracks[comp_tracks[:, 0] == c2, 2].squeeze())
262 | children1 = comp_tracks[:, 3] == c1
263 | children2 = comp_tracks[:, 3] == c2
264 | # Swap the two indices
265 | for f in range(frame, max(end1, end2) + 1):
266 | _l_comp = np.asarray(labels_comp[f])
267 | _comp = np.asarray(m_comp[f])
268 | i1 = _comp == c1
269 | i2 = _comp == c2
270 | _comp[i1] = c2
271 | _comp[i2] = c1
272 | i1 = _l_comp == c1
273 | i2 = _l_comp == c2
274 | _l_comp[i1] = c2
275 | _l_comp[i2] = c1
276 | labels_comp[f] = _l_comp.tolist()
277 | m_comp[f] = _comp.tolist()
278 | i1 = comp_tracks[:, 0] == c1
279 | i2 = comp_tracks[:, 0] == c2
280 | comp_tracks[i1, 2] = end2
281 | comp_tracks[i2, 2] = end1
282 | comp_tracks[children1, 3] = c2
283 | comp_tracks[children2, 3] = c1
284 |
285 | return comp_tracks, traj
286 |
287 |
288 | def add_noise(
289 | comp_tracks: np.ndarray,
290 | traj: dict,
291 | seed: int = 0,
292 | noise_add_false_negative: int = 0,
293 | noise_add_false_positive: int = 0,
294 | noise_add_idsw: int = 0,
295 | noise_remove_matches: int = 0,
296 | noise_remove_mitosis: int = 0,
297 | ):
298 | """
299 | Add noise to the data.
300 |
301 | Args:
302 | comp_tracks: The computed tracks.
303 | traj: The trajectories.
304 | seed: The seed for the random number generator.
305 | noise_add_false_negative:
306 | Adds n false negatives to the data, where the parameter is n.
307 | noise_remove_mitosis:
308 | Removes parend daughter relations of n mitosis events, where the
309 | parameter describes n.
310 | noise_add_false_positive:
311 | Adds n false positives to the data, where the parameter is n.
312 | noise_add_idsw:
313 | Adds n ID switches to the data, where the parameter is n.
314 | noise_remove_matches:
315 | Removes n matches from the data, where the parameter is n.
316 | This produces n false negatives and n false positives.
317 |
318 | Returns:
319 | comp_tracks: Updated with noise applied.
320 | traj: Updated with noise applied.
321 | """
322 | comp_tracks = np.copy(comp_tracks)
323 | traj = copy.deepcopy(traj)
324 |
325 | # Remove children of mitosis events
326 | comp_tracks = remove_mitosis(
327 | comp_tracks, noise_remove_mitosis, seed)
328 |
329 | # Add false negatives
330 | comp_tracks, traj = add_false_negatives(
331 | comp_tracks, traj, noise_add_false_negative, seed)
332 |
333 | # Add false positives
334 | comp_tracks, traj = add_false_positives(
335 | comp_tracks, traj, noise_add_false_positive, seed)
336 |
337 | # Unmatch true positives
338 | comp_tracks, traj = remove_matches(
339 | comp_tracks, traj, noise_remove_matches, seed)
340 |
341 | # Add IDSw
342 | comp_tracks, traj = add_id_switches(
343 | comp_tracks, traj, noise_add_idsw, seed)
344 |
345 | return comp_tracks, traj
346 |
347 |
348 | def is_new_setting(
349 | setting: dict,
350 | path: str,
351 | name: str,
352 | df: pd.DataFrame = None,
353 | ):
354 | """
355 | Check if the setting parameter setting is already existing in the csv file.
356 |
357 | Args:
358 | setting: The setting to check.
359 | path: The path to the csv file.
360 | name: The name of the sequence.
361 | df: The dataframe to check.
362 |
363 | Returns:
364 | True if the setting is new, False otherwise.
365 | """
366 | if exists(path):
367 | setting["name"] = name
368 | if df is None:
369 | df = pd.read_csv(path, index_col="index", sep=";")
370 | _df = df.copy()
371 | for k, v in setting.items():
372 | _df = _df[_df[k] == v]
373 | if len(_df) == 0:
374 | return True, df
375 | return False, df
376 | return True, df
377 |
378 |
379 | def append_results(
380 | path: str,
381 | results: list,
382 | ):
383 | """
384 | Append the results to the csv file.
385 |
386 | Args:
387 | path: The path to the csv file.
388 | results: The results to append.
389 | """
390 | # Check if the file exists
391 | results = [pd.DataFrame.from_dict(r, orient="index").T for r in results]
392 | if exists(path):
393 | df = pd.read_csv(path, index_col="index", sep=";")
394 | df = pd.concat([df] + results)
395 | df.reset_index(drop=True, inplace=True)
396 | else:
397 | df = pd.concat(results)
398 | df.to_csv(path, index_label="index", sep=";")
399 |
400 |
401 | def run_noisy_sample(
402 | comp_tracks: np.ndarray,
403 | ref_tracks: np.ndarray,
404 | traj: dict,
405 | segm: dict,
406 | metrics: list,
407 | name: str,
408 | setting: dict,
409 | default_setting: dict,
410 | ):
411 | """
412 | Run a noisy sample
413 |
414 | Args:
415 | comp_tracks: The computed tracks.
416 | ref_tracks: The reference tracks.
417 | traj: The trajectories.
418 | segm: The segmentation masks.
419 | metrics: The metrics to calculate.
420 | name: The name of the sequence.
421 | setting: The noise setting.
422 | default_setting: The default setting without noise.
423 |
424 | Returns:
425 | The results stored in a dictionary.
426 | """
427 | # Add noise to the data and calculate the metrics
428 | n_comp_tracks, n_traj = add_noise(
429 | comp_tracks, traj, **setting)
430 |
431 | results = {"name": name}
432 | results.update(default_setting)
433 |
434 | resulting_metrics = calculate_metrics(
435 | n_comp_tracks, ref_tracks, n_traj, segm, metrics,
436 | is_valid=True
437 | )
438 | results.update(resulting_metrics)
439 | return results
440 |
441 |
442 | def filter_existing_noise_settings(
443 | noise_settings: list,
444 | csv_file: str,
445 | name: str,
446 | ):
447 | """
448 | Filter and remove existing noise settings from the list of noise settings.
449 |
450 | Args:
451 | noise_settings: The list of noise settings.
452 | csv_file: The path to the csv file.
453 | name: The name of the sequence.
454 |
455 | Returns:
456 | The list of new noise settings.
457 | """
458 | df = None
459 | new_noise_settings = []
460 |
461 | for _, setting in enumerate(noise_settings):
462 | # Check if noise setting is new
463 | default_setting = {
464 | "seed": 0,
465 | "noise_add_false_positive": 0,
466 | "noise_add_false_negative": 0,
467 | "noise_add_idsw": 0,
468 | "noise_remove_matches": 0,
469 | "noise_remove_mitosis": 0,
470 | }
471 |
472 | default_setting.update(setting)
473 |
474 | is_new, df = is_new_setting(default_setting, csv_file, name, df)
475 | if not is_new:
476 | continue
477 | new_noise_settings.append((setting, default_setting))
478 | return new_noise_settings
479 |
480 |
481 | def create_noise_settings(
482 | repeats: int,
483 | num_false_neg: int,
484 | num_false_pos: int,
485 | num_idsw: int,
486 | num_matches: int,
487 | comp_tracks: np.ndarray,
488 | ref_tracks: np.ndarray,
489 | ):
490 | """
491 | Create a list of noise settings that should be executed from the given
492 | parameters.
493 |
494 | Args:
495 | repeats: The number of repeats for each noise setting.
496 | num_false_neg: The number of false negatives to add.
497 | num_false_pos: The number of false positives to add.
498 | num_idsw: The number of ID switches to add.
499 | num_matches: The number of matches to remove.
500 | comp_tracks: The computed tracks.
501 | ref_tracks: The reference tracks.
502 |
503 | Returns:
504 | The list of noise settings.
505 | """
506 | # Extract some statistics
507 | parents, counts = np.unique(
508 | comp_tracks[comp_tracks[:, 3] > 0, 3], return_counts=True)
509 | num_false_negs_max = int(np.sum(ref_tracks[:, 2] - ref_tracks[:, 1] + 1))
510 |
511 | # Create dictionary
512 | noise_settings = [{}]
513 |
514 | for i in range(0, repeats):
515 | # Add mitosis detection noise
516 | for x in range(1, len(parents[counts > 1]) + 1):
517 | noise_settings.append({"seed": i, "noise_remove_mitosis": x})
518 |
519 | # Add false negative noise
520 | for x in range(1, min(num_false_neg, num_false_negs_max)):
521 | noise_settings.append({"seed": i, "noise_add_false_negative": x})
522 |
523 | # Add false positive noise
524 | for x in range(1, num_false_pos):
525 | noise_settings.append({"seed": i, "noise_add_false_positive": x})
526 |
527 | # Add matching noise
528 | for x in range(1, min(num_matches, num_false_negs_max)):
529 | noise_settings.append({"seed": i, "noise_remove_matches": x})
530 |
531 | # Add ID switch noise
532 | for x in range(1, num_idsw):
533 | noise_settings.append({"seed": i, "noise_add_idsw": x})
534 |
535 | return noise_settings
536 |
537 |
538 | def evaluate_sequence(
539 | gt: str,
540 | name: str,
541 | threads: int = 0,
542 | csv_file: str = None,
543 | save_after: int = 20,
544 | repeats: int = 10,
545 | num_false_neg: int = 500,
546 | num_false_pos: int = 500,
547 | num_idsw: int = 500,
548 | num_matches: int = 500,
549 | ):
550 | """
551 | Evaluates a single sequence
552 |
553 | Args:
554 | gt: The path to the ground truth.
555 | name: The name of the sequence.
556 | threads: The number of threads to use for multiprocessing.
557 | csv_file: The path to the csv file to store the results.
558 | save_after: Save results after n runs.
559 | repeats: The number of repeats for each noise setting.
560 | num_false_neg: The number of false negatives to add.
561 | num_false_pos: The number of false positives to add.
562 | num_idsw: The number of ID switches to add.
563 | num_matches: The number of matches to remove.
564 |
565 | """
566 |
567 | print("Run noise test on ", gt, end="...")
568 | # Prepare all metrics
569 | metrics = copy.deepcopy(ALL_METRICS)
570 | metrics.remove("Valid")
571 | metrics.remove("SEG")
572 |
573 | comp_tracks, ref_tracks, traj, segm, _ = load_data(gt, threads)
574 |
575 | # Selection of noise settings
576 | noise_settings = create_noise_settings(
577 | repeats, num_false_neg, num_false_pos, num_idsw, num_matches,
578 | comp_tracks, ref_tracks
579 | )
580 |
581 | # Filter existing noise settings
582 | new_noise_settings = filter_existing_noise_settings(
583 | noise_settings, csv_file, name
584 | )
585 |
586 | # Evaluate new noise settings
587 | if threads == 1:
588 | results_list = []
589 | for i, (setting, default_setting) in enumerate(new_noise_settings):
590 | print(
591 | f"\rRun noise test on {gt}, \t{i + 1}\t/ {len(new_noise_settings)}",
592 | end=""
593 | )
594 | # Add noise to the data and calculate the metrics
595 | results = run_noisy_sample(
596 | comp_tracks, ref_tracks, traj, segm, metrics,
597 | name, setting, default_setting
598 | )
599 | # Aggregate results and store them every n runs
600 | results_list.append(results)
601 | if len(results_list) == save_after or i + 1 == len(new_noise_settings):
602 | append_results(csv_file, results_list)
603 | results_list = []
604 |
605 | else:
606 | threads = cpu_count() if threads == 0 else threads
607 | with Pool(threads) as p:
608 | input_list = []
609 | for i, (setting, default_setting) in enumerate(new_noise_settings):
610 | print(
611 | f"\rRun noise test on {gt}, \t{i + 1}\t/ {len(new_noise_settings)}",
612 | end=""
613 | )
614 | # Add noise to the data and calculate the metrics
615 | input_list.append((
616 | comp_tracks, ref_tracks, traj, segm, metrics,
617 | name, setting, default_setting
618 | ))
619 | # Process in parallel and
620 | if len(input_list) == save_after or i + 1 == len(new_noise_settings):
621 | results_list = p.starmap(run_noisy_sample, input_list)
622 | append_results(csv_file, results_list)
623 | input_list = []
624 | print("")
625 |
626 |
627 | def evaluate_all(
628 | gt_root: str,
629 | csv_file: str = None,
630 | threads: int = 0,
631 | **kwargs
632 | ):
633 | """
634 | Evaluate all sequences in a directory
635 |
636 | Args:
637 | gt_root: The root directory of the ground truth.
638 | csv_file: The path to the csv file to store the results.
639 | threads: The number of threads to use for multiprocessing.
640 | **kwargs: The noise settings.
641 |
642 | """
643 | ret = parse_directories(gt_root, gt_root)
644 | for _, gt, name in zip(*ret):
645 | evaluate_sequence(gt, name, threads, csv_file, **kwargs)
646 |
647 |
648 | def parse_args():
649 | """ Parse arguments """
650 | parser = argparse.ArgumentParser(description='Evaluates CTC-Sequences.')
651 | parser.add_argument('--gt', type=str, required=True)
652 | parser.add_argument('-r', '--recursive', action="store_true")
653 | parser.add_argument('--csv-file', type=str, default=None)
654 | parser.add_argument('-n', '--num-threads', type=int, default=0)
655 | parser.add_argument('--num-false-pos', type=int, default=500)
656 | parser.add_argument('--num-false-neg', type=int, default=500)
657 | parser.add_argument('--num-idsw', type=int, default=500)
658 | parser.add_argument('--num-matches', type=int, default=500)
659 | parser.add_argument('--save-after', type=int, default=100)
660 | parser.add_argument('--repeats', type=int, default=10)
661 | args = parser.parse_args()
662 | return args
663 |
664 |
665 | def main():
666 | """
667 | Main function that is called when the script is executed.
668 | """
669 | args = parse_args()
670 |
671 | # Evaluate sequence or whole directory
672 | experiments = {
673 | "num_false_pos": args.num_false_pos,
674 | "num_false_neg": args.num_false_neg,
675 | "num_idsw": args.num_idsw,
676 | "num_matches": args.num_matches,
677 | "repeats": args.repeats,
678 | "save_after": args.save_after,
679 | }
680 | if args.recursive:
681 | evaluate_all(args.gt, args.csv_file, args.num_threads, **experiments)
682 | else:
683 | challenge = basename(dirname(args.gt))
684 | sequence = basename(args.gt).replace("_GT", "")
685 | name = challenge + "_" + sequence
686 | evaluate_sequence(
687 | args.gt, name, args.num_threads, args.csv_file, **experiments
688 | )
689 |
690 |
691 | if __name__ == "__main__":
692 | main()
693 |
--------------------------------------------------------------------------------
/ctc_metrics/scripts/validate.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from os.path import join
3 | from multiprocessing import Pool, cpu_count
4 |
5 | from ctc_metrics.metrics import valid
6 | from ctc_metrics.utils.handle_results import print_results
7 | from ctc_metrics.utils.filesystem import \
8 | parse_directories, read_tracking_file, parse_masks
9 | from ctc_metrics.utils.representations import match as match_tracks
10 |
11 |
12 | def validate_sequence(
13 | res: str,
14 | threads: int = 0,
15 | ):
16 | """
17 | Validates a single sequence
18 |
19 | Args:
20 | res: The path to the results.
21 | threads: The number of threads to use. If 0, the number of threads
22 | is set to the number of available CPUs.
23 |
24 | Returns:
25 | The results stored in a dictionary.
26 | """
27 | print("\r", res, end="")
28 | res_tracks = read_tracking_file(join(res, "res_track.txt"))
29 | res_masks = parse_masks(res)
30 | assert len(res_masks) > 0, res
31 | args = zip([None for x in res_masks], res_masks)
32 | if threads != 1:
33 | if threads == 0:
34 | threads = cpu_count()
35 | with Pool(threads) as p:
36 | matches = p.starmap(match_tracks, args)
37 | else:
38 | matches = [match_tracks(*x) for x in args]
39 | labels_gt, labels_res, mapped_gt, mapped_res = [], [], [], []
40 | for match in matches:
41 | labels_gt.append(match[0])
42 | labels_res.append(match[1])
43 | mapped_gt.append(match[2])
44 | mapped_res.append(match[3])
45 | results = {"Valid": valid(res_masks, res_tracks, labels_res)}
46 | print("\r", end="")
47 | return results
48 |
49 |
50 | def validate_all(
51 | res_root: str,
52 | threads: int = 0,
53 | ):
54 | """
55 | Evaluate all sequences in a directory
56 |
57 | Args:
58 | res_root: The path to the result directory.
59 | threads: The number of threads to use. If 0, the number of threads
60 | is set to the number of available CPUs.
61 |
62 | Returns:
63 | The results stored in a dictionary.
64 | """
65 | results = []
66 | ret = parse_directories(res_root, None)
67 | for res, _, name in zip(*ret):
68 | results.append([name, validate_sequence(res, threads)])
69 | return results
70 |
71 |
72 | def parse_args():
73 | """ Parses the arguments. """
74 | parser = argparse.ArgumentParser(description='Validates CTC-Sequences.')
75 | parser.add_argument('--res', type=str, required=True)
76 | parser.add_argument('-r', '--recursive', action="store_true")
77 | parser.add_argument('-n', '--num-threads', type=int, default=0)
78 | args = parser.parse_args()
79 | return args
80 |
81 |
82 | def main():
83 | """
84 | Main function that is called when the script is executed.
85 | """
86 | args = parse_args()
87 | if args.recursive:
88 | res = validate_all(args.res, args.num_threads)
89 | else:
90 | res = validate_sequence(args.res, args.num_threads)
91 | print_results(res)
92 |
93 |
94 | if __name__ == "__main__":
95 | main()
96 |
97 |
--------------------------------------------------------------------------------
/ctc_metrics/scripts/visualize.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from os import listdir, makedirs
3 | from os.path import join
4 | import tifffile as tiff
5 | import cv2
6 | import numpy as np
7 |
8 | from ctc_metrics.utils.filesystem import read_tracking_file
9 |
10 |
11 | SHOW_BORDER = True
12 | BORDER_WIDTH = {
13 | "BF-C2DL-HSC": 25,
14 | "BF-C2DL-MuSC": 25,
15 | "Fluo-N2DL-HeLa": 25,
16 | "PhC-C2DL-PSC": 25,
17 | "Fluo-N2DH-SIM+": 0,
18 | "DIC-C2DH-HeLa": 50,
19 | "Fluo-C2DL-Huh7": 50,
20 | "Fluo-C2DL-MSC": 50,
21 | "Fluo-N2DH-GOWT1": 50,
22 | "PhC-C2DH-U373": 50,
23 | }
24 |
25 | np.random.seed(0)
26 | PALETTE = np.random.randint(0, 256, (10000, 3))
27 |
28 |
29 | def get_palette_color(i):
30 | i = i % PALETTE.shape[0]
31 | return PALETTE[i]
32 |
33 |
34 | def visualize(
35 | img_dir: str,
36 | res_dir: str,
37 | viz_dir: str = None,
38 | video_name: str = None,
39 | border_width: str = None,
40 | show_labels: bool = True,
41 | show_parents: bool = True,
42 | ids_to_show: list = None,
43 | start_frame: int = 0,
44 | framerate: int = 30,
45 | opacity: float = 0.5,
46 | ): # pylint: disable=too-many-arguments,too-complex
47 | """
48 | Visualizes the tracking results.
49 |
50 | Args:
51 | img_dir: str
52 | The path to the images.
53 | res_dir: str
54 | The path to the results.
55 | viz_dir: str
56 | The path to save the visualizations.
57 | video_name: str
58 | The path to the video if a video should be created. Note that no
59 | visualization is available during video creation.
60 | border_width: str or int
61 | The width of the border. Either an integer or a string that
62 | describes the challenge name.
63 | show_labels: bool
64 | Print instance labels to the output.
65 | show_parents: bool
66 | Print parent labels to the output.
67 | ids_to_show: list
68 | The IDs of the instances to show. All others will be ignored.
69 | start_frame: int
70 | The frame to start the visualization.
71 | framerate: int
72 | The framerate of the video.
73 | opacity: float
74 | The opacity of the instance colors.
75 |
76 | """
77 | # Define initial video parameters
78 | wait_time = max(1, round(1000 / framerate))
79 | if border_width is None:
80 | border_width = 0
81 | elif isinstance(border_width, str):
82 | try:
83 | border_width = int(border_width)
84 | except ValueError as exc:
85 | if border_width in BORDER_WIDTH:
86 | border_width = BORDER_WIDTH[border_width]
87 | else:
88 | raise ValueError(
89 | f"Border width '{border_width}' not recognized. "
90 | f"Existing datasets: {BORDER_WIDTH.keys()}"
91 | ) from exc
92 |
93 | # Load image and tracking data
94 | images = [x for x in sorted(listdir(img_dir)) if x.endswith(".tif")]
95 | results = [x for x in sorted(listdir(res_dir)) if x.endswith(".tif")]
96 | parents = {
97 | l[0]: l[3] for l in read_tracking_file(join(res_dir, "res_track.txt"))
98 | }
99 |
100 | # Create visualization directory
101 | if viz_dir:
102 | makedirs(viz_dir, exist_ok=True)
103 |
104 | video_writer = None
105 |
106 | # Loop through all images
107 | while start_frame < len(images):
108 | # Read image file
109 | img_name, res_name = images[start_frame], results[start_frame]
110 | img_path, res_path, = join(img_dir, img_name), join(res_dir, res_name)
111 | print(f"\rFrame {img_name} (of {len(images)})", end="")
112 |
113 | # Visualize the image
114 | viz = create_colored_image(
115 | cv2.imread(img_path),
116 | tiff.imread(res_path),
117 | labels=show_labels,
118 | frame=start_frame,
119 | parents=parents if show_parents else None,
120 | ids_to_show=ids_to_show,
121 | opacity=opacity,
122 | )
123 | if border_width > 0:
124 | viz = cv2.rectangle(
125 | viz,
126 | (border_width, border_width),
127 | (viz.shape[1] - border_width, viz.shape[0] - border_width),
128 | (0, 0, 255), 1
129 | )
130 |
131 | # Save the visualization
132 | if video_name is not None:
133 | if video_writer is None:
134 | video_path = join(
135 | viz_dir, f"{video_name.replace('.mp4', '')}.mp4")
136 | video_writer = cv2.VideoWriter(
137 | video_path,
138 | cv2.VideoWriter_fourcc(*"mp4v"),
139 | framerate,
140 | (viz.shape[1], viz.shape[0])
141 | )
142 | video_writer.write(viz)
143 | start_frame += 1
144 | continue
145 |
146 | # Show the video
147 | cv2.imshow("VIZ", viz)
148 | key = cv2.waitKey(wait_time)
149 | if key == ord("q"):
150 | # Quit the visualization
151 | break
152 | if key == ord("w"):
153 | # Start or stop the auto visualization
154 | if wait_time == 0:
155 | wait_time = max(1, round(1000 / framerate))
156 | else:
157 | wait_time = 0
158 | elif key == ord("d"):
159 | # Move to the next frame
160 | start_frame += 1
161 | wait_time = 0
162 | elif key == ord("a"):
163 | # Move to the previous frame
164 | start_frame -= 1
165 | wait_time = 0
166 | elif key == ord("l"):
167 | # Toggle the show labels option
168 | show_labels = not show_labels
169 | elif key == ord("p"):
170 | # Toggle the show parents option
171 | show_parents = not show_parents
172 | elif key == ord("s"):
173 | # Save the visualization
174 | if viz_dir is None:
175 | print("Please define the '--viz' argument to save the "
176 | "visualizations.")
177 | continue
178 | viz_path = join(viz_dir, img_name) + ".jpg"
179 | cv2.imwrite(viz_path, viz)
180 | else:
181 | # Move to the next frame
182 | start_frame += 1
183 |
184 |
185 | def create_colored_image(
186 | img: np.ndarray,
187 | res: np.ndarray,
188 | labels: bool = False,
189 | opacity: float = 0.5,
190 | ids_to_show: list = None,
191 | frame: int = None,
192 | parents: dict = None,
193 | ):
194 | """
195 | Creates a colored image from the input image and the results.
196 |
197 | Args:
198 | img: np.ndarray
199 | The input image.
200 | res: np.ndarray
201 | The results.
202 | labels: bool
203 | Print instance labels to the output.
204 | opacity: float
205 | The opacity of the instance colors.
206 | ids_to_show: list
207 | The IDs of the instances to show. All others will be ignored.
208 | frame: int
209 | The frame number.
210 | parents: dict
211 | The parent dictionary.
212 |
213 | Returns:
214 | The colored image.
215 | """
216 | img = np.clip(img, 0, 255).astype(np.uint8)
217 | kernel = np.ones((3, 3), dtype=np.uint8)
218 | for i in np.unique(res):
219 | if i == 0:
220 | continue
221 | if ids_to_show is not None:
222 | if i not in ids_to_show:
223 | continue
224 | mask = res == i
225 | contour = (mask * 255).astype(np.uint8) - \
226 | cv2.erode((mask * 255).astype(np.uint8), kernel)
227 | contour = contour != 0
228 | img[mask] = (
229 | np.round((1 - opacity) * img[mask] + opacity * get_palette_color(i))
230 | )
231 | img[contour] = get_palette_color(i)
232 | if frame is not None:
233 | cv2.putText(img, str(frame), (10, 30),
234 | cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
235 | if labels:
236 | # Print label to the center of the object
237 | y, x = np.where(mask)
238 | y, x = np.mean(y), np.mean(x)
239 | text = str(i)
240 | if parents is not None:
241 | if i in parents:
242 | if parents[i] != 0:
243 | text += f"({parents[i]})"
244 | cv2.putText(img, text, (int(x), int(y)),
245 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
246 | return img
247 |
248 |
249 | def parse_args():
250 | """ Parses the arguments. """
251 | parser = argparse.ArgumentParser(description='Validates CTC-Sequences.')
252 | parser.add_argument(
253 | '--img', type=str, required=True,
254 | help='The path to the images.'
255 | )
256 | parser.add_argument(
257 | '--res', type=str, required=True, help='The path to the results.'
258 | )
259 | parser.add_argument(
260 | '--viz', type=str, default=None,
261 | help='The path to save the visualizations.'
262 | )
263 | parser.add_argument(
264 | '--video-name', type=str, default=None,
265 | help='The path to the video if a video should be created. Note that no '
266 | 'visualization is available during video creation.'
267 | )
268 | parser.add_argument(
269 | '--border-width', type=str, default=None,
270 | help='The width of the border. Either an integer or a string that '
271 | 'describes the challenge name.'
272 | )
273 | parser.add_argument(
274 | '--show-no-labels', action="store_false",
275 | help='Print no instance labels to the output.'
276 | )
277 | parser.add_argument(
278 | '--show-no-parents', action="store_false",
279 | help='Print no parent labels to the output.'
280 | )
281 | parser.add_argument(
282 | '--ids-to-show', type=int, nargs='+', default=None,
283 | help='The IDs of the instances to show. All others will be ignored.'
284 | )
285 | parser.add_argument(
286 | '--start-frame', type=int, default=0,
287 | help='The frame to start the visualization.'
288 | )
289 | parser.add_argument(
290 | '--framerate', type=int, default=10,
291 | help='The framerate of the video.'
292 | )
293 | parser.add_argument(
294 | '--opacity', type=float, default=0.5,
295 | help='The opacity of the instance colors.'
296 | )
297 | args = parser.parse_args()
298 | return args
299 |
300 |
301 | def main():
302 | """
303 | Main function that is called when the script is executed.
304 | """
305 | args = parse_args()
306 | visualize(
307 | args.img,
308 | args.res,
309 | viz_dir=args.viz,
310 | video_name=args.video_name,
311 | border_width=args.border_width,
312 | show_labels=args.show_no_labels,
313 | show_parents=args.show_no_parents,
314 | ids_to_show=args.ids_to_show,
315 | start_frame=args.start_frame,
316 | framerate=args.framerate,
317 | opacity=args.opacity,
318 | )
319 |
320 |
321 | if __name__ == "__main__":
322 | main()
323 |
--------------------------------------------------------------------------------
/ctc_metrics/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/ctc_metrics/utils/__init__.py
--------------------------------------------------------------------------------
/ctc_metrics/utils/filesystem.py:
--------------------------------------------------------------------------------
1 | import os
2 | from os import listdir
3 | from os.path import join, exists, isdir
4 | import numpy as np
5 |
6 |
7 | def parse_directories(
8 | input_dir: str,
9 | gt_dir: str = None,
10 | ):
11 | """
12 | Parses a directory and searches for challenges and their respective
13 | result/ground-truth subdirectories.
14 |
15 | Args:
16 | input_dir: The directory to parse.
17 | gt_dir: The directory containing the ground truth.
18 |
19 | Returns:
20 | A tuple of three lists containing the result directories, the
21 | ground-truth directories and the names of the challenges/sequences.
22 | """
23 |
24 | # Parse sequences to evaluate
25 | challenges = [x for x in sorted(listdir(input_dir))
26 | if isdir(join(input_dir, x))]
27 | assert len(challenges) > 0, f"No challenges found in {input_dir}"
28 | sequence_appendices = ["01", "02"]
29 | res_dirs, gt_dirs, names = [], [], []
30 | for challenge in challenges:
31 | sequences = [
32 | x[0:2] for x in sorted(listdir(join(input_dir, challenge))) if
33 | isdir(join(input_dir, challenge, x))
34 | ]
35 | for sequence in sequences:
36 | if sequence in sequence_appendices:
37 | res_dir = join(input_dir, challenge, sequence + "_RES")
38 | if res_dir in res_dirs:
39 | continue
40 | res_dirs.append(res_dir)
41 | if gt_dir is not None:
42 | _gt_dir = join(gt_dir, challenge, sequence + "_GT")
43 | if not exists(_gt_dir):
44 | _gt_dir = None
45 | gt_dirs.append(_gt_dir)
46 | else:
47 | gt_dirs.append(None)
48 | names.append(challenge + "_" + sequence)
49 | return res_dirs, gt_dirs, names
50 |
51 |
52 | def read_tracking_file(
53 | path: str,
54 | ):
55 | """
56 | Reads a text file representing an acyclic graph for the whole video.
57 | Every line corresponds to a single track that is encoded by four numbers
58 | separated by a space:
59 | L B E P where
60 | L - a unique label of the track (label of markers, 16-bit positive value)
61 | B - a zero-based temporal index of the frame in which the track begins
62 | E - a zero-based temporal index of the frame in which the track ends
63 | P - label of the parent track (0 is used when no parent is defined)
64 |
65 | Args:
66 | path: Path to the text file.
67 |
68 | Returns:
69 | A numpy array of shape (N, 4) where N is the number of tracks.
70 | Each row represents a track and contains four numbers: L B E P
71 | """
72 | if not exists(path):
73 | return None
74 | with open(path, "r", encoding="utf-8") as f:
75 | lines = f.readlines()
76 | if len(lines) == 0:
77 | return np.zeros((0, 4))
78 | seperator = " " if " " in lines[0] else "\t"
79 | lines = [x.strip().split(seperator) for x in lines]
80 | lines = [[int(y) for y in x if y != ""] for x in lines]
81 | return np.array(lines)
82 |
83 |
84 | def parse_masks(
85 | directory: str
86 | ):
87 | """
88 | Reads all frame files in a directory and returns a list of frames.
89 |
90 | Args:
91 | directory: The directory to read.
92 |
93 | Returns:
94 | A sorted list of frame paths.
95 | """
96 | files = sorted(os.listdir(directory))
97 | files = [x for x in files if x.endswith(".tif")]
98 | _files = []
99 | for x in files:
100 | if x.count("_") == 3:
101 | # This is a 3D mask file with slices. Remove the slice number.
102 | x = "_".join(x.split("_")[0:3]) + ".tif"
103 | if x not in _files:
104 | _files.append(join(directory, x))
105 | files = sorted(_files)
106 | return files
107 |
--------------------------------------------------------------------------------
/ctc_metrics/utils/handle_results.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | def print_results(results: dict):
4 | """
5 | Prints the results in a nice table.
6 |
7 | Args:
8 | results: A dictionary containing the results.
9 | """
10 |
11 | def print_line(metrics: dict):
12 | """
13 | Prints a line of the table.
14 |
15 | Args:
16 | metrics: A list containing the arguments for the line.
17 | """
18 |
19 | print(*[f"{k}: {'N/A' if v is None else float(v):.5},\t" for k, v
20 | in metrics.items()])
21 |
22 | if isinstance(results, dict):
23 | print_line(results)
24 | elif isinstance(results, list):
25 | for res in results:
26 | print(res[0], end=":\t\t")
27 | print_line(res[1])
28 |
29 |
30 | def store_results(
31 | path: str,
32 | results: dict,
33 | delimiter: str = ";"):
34 | """
35 | Stores the results in a csv file.
36 |
37 | Args:
38 | path: The path to the csv file.
39 | results: A dictionary containing the results.
40 | delimiter: The delimiter for the csv file.
41 | """
42 | if not path.endswith(".csv"):
43 | path += ".csv"
44 | if isinstance(results, dict):
45 | keys = results.keys()
46 | with open(path, "w+", encoding="utf-8") as f:
47 | f.write(delimiter.join(keys) + "\n")
48 | f.write(delimiter.join([str(v) for v in results.values()]) + "\n")
49 | elif isinstance(results, list):
50 | keys = results[0][1].keys()
51 | with open(path, "w+", encoding="utf-8") as f:
52 | f.write('dataset'+delimiter+delimiter.join(keys) + "\n")
53 | for dataset, res in results:
54 | f.write(dataset+delimiter)
55 | f.write(delimiter.join([str(v) for v in res.values()]) + "\n")
56 |
--------------------------------------------------------------------------------
/ctc_metrics/utils/representations.py:
--------------------------------------------------------------------------------
1 | import os.path
2 |
3 | import numpy as np
4 | import tifffile as tiff
5 | from sklearn.metrics import confusion_matrix
6 | from scipy.sparse import lil_array
7 |
8 |
9 | def track_confusion_matrix(
10 | labels_ref: list,
11 | labels_comp: list,
12 | mapped_ref: list,
13 | mapped_comp: list
14 | ):
15 | """
16 | Computes the confusion matrix for the input data.
17 |
18 | Args:
19 | labels_ref: The labels of the ground truth masks.
20 | labels_comp: The labels of the result masks.
21 | mapped_ref: The matched labels of the ground truth masks.
22 | mapped_comp: The matched labels of the result masks.
23 |
24 | Returns:
25 | The confusion matrix. The size if the confusion matrix is
26 | max_label_ref + 1 x max_label_comp + 1
27 | where [:, 0] contains the false negatives and [0, :] contains the
28 | false positives. The rest of the matrix contains the true positives.
29 |
30 | """
31 | max_label_ref = int(np.max(np.concatenate(labels_ref)))
32 | max_label_comp = int(np.max(np.concatenate(labels_comp)))
33 |
34 | # Gather association data
35 | track_intersection = np.zeros((max_label_ref + 1, max_label_comp + 1))
36 |
37 | for ref, comp, m_ref, m_comp in zip(
38 | labels_ref, labels_comp, mapped_ref, mapped_comp):
39 | # Fill track intersection matrix
40 | ref = np.asarray(ref)
41 | comp = np.asarray(comp).astype(int)
42 | m_ref = np.asarray(m_ref).astype(int)
43 | m_comp = np.asarray(m_comp).astype(int)
44 | if len(m_ref) > 0:
45 | track_intersection[m_ref, m_comp] += 1
46 | fna = ref[np.isin(ref, m_ref, invert=True)]
47 | track_intersection[fna, 0] += 1
48 | fpa = comp[np.isin(comp, m_comp, invert=True)].astype(int)
49 | track_intersection[0, fpa] += 1
50 |
51 | return track_intersection
52 |
53 |
54 | def match(
55 | ref_path: str,
56 | comp_path: str
57 | ):
58 | """
59 | Matches the labels of the masks from the reference and computed result path.
60 | A label is matched if the intersection of a computed and reference mask is
61 | greater than 50% of the area of the reference mask.
62 |
63 | Args:
64 | ref_path: Path to the reference mask.
65 | comp_path: Path to the computed mask.
66 |
67 | Returns:
68 | A tuple of five numpy arrays. The first array contains the existing
69 | labels in the reference mask. The second array contains the
70 | existing labels in the computed mask. The third array contains the
71 | matched labels in the referenced mask. The fourth array contains
72 | the corresponding matched labels in the computed mask. The fifth
73 | array contains the intersection over union (IoU) for each matched
74 | label pair.
75 | """
76 | # Read the input data
77 | if comp_path is None:
78 | map_ref = tiff.imread(ref_path)
79 | labels_ref = np.unique(map_ref)
80 | labels_ref = labels_ref[labels_ref != 0]
81 | return labels_ref.tolist(), [], [], [], []
82 | if ref_path is None:
83 | # For trivial cases where only one mask should be analysed
84 | ref_path = comp_path
85 | if os.path.exists(ref_path):
86 | # No slices
87 | map_ref = tiff.imread(ref_path)
88 | map_com = tiff.imread(comp_path)
89 | else:
90 | # Slices in 3D dataset segmentation data
91 | dirname, base = os.path.dirname(ref_path), os.path.basename(ref_path)
92 | base = base.replace(".tif", "")
93 | slice_files = [x for x in os.listdir(dirname) if
94 | x.endswith(".tif") and x.startswith(base)]
95 | slice_files = sorted(slice_files)
96 | map_ref = [tiff.imread(os.path.join(dirname, x)) for x in slice_files]
97 | map_ref = np.stack(map_ref, axis=0)
98 | slices = [x.replace(base+"_", "").replace(".tif", "")
99 | for x in slice_files]
100 | slices = [int(x) for x in slices]
101 | map_com = tiff.imread(comp_path)
102 | _map_com = [map_com[x] for x in slices]
103 | map_com = np.stack(_map_com, axis=0)
104 | # Get the labels of the two masks (including background label 0)
105 | labels_ref, labels_comp = np.unique(map_ref), np.unique(map_com)
106 | if ref_path == comp_path:
107 | # For trivial cases where only one mask should be analysed
108 | iou = np.ones(len(labels_ref))
109 | labels_ref = labels_ref[labels_ref != 0]
110 | labels_comp = labels_comp[labels_comp != 0]
111 | return labels_ref.tolist(), labels_comp.tolist(), labels_ref.tolist(),\
112 | labels_comp.tolist(), iou.tolist()
113 | # Add offset to separate the labels of the two masks
114 | offset = int(np.max(labels_ref) + 1)
115 | map_com += offset
116 | # Compute the confusion matrix
117 | cm = confusion_matrix(map_ref.flatten(), map_com.flatten())
118 | sum_ref = np.sum(cm, axis=1, keepdims=True)
119 | sum_comp = np.sum(cm, axis=0, keepdims=True)
120 | # Compute the intersection over reference
121 | intersection_over_ref = cm / np.maximum(sum_ref, 1)
122 | # Compute the intersection over union (relevant to calculate SEG)
123 | intersection_over_union = cm / np.maximum(sum_ref + sum_comp - cm, 1)
124 | # Remove the background label and redundant parts of the matrix
125 | intersection_over_ref = \
126 | intersection_over_ref[1:len(labels_ref), 1 + len(labels_ref):]
127 | intersection_over_union = \
128 | intersection_over_union[1:len(labels_ref), 1 + len(labels_ref):]
129 | # Find matches according to AOGM (min 50% of ref needs to be covered)
130 | intersection_over_ref[intersection_over_ref <= 0.5] = 0
131 | intersection_over_ref[intersection_over_ref > 0.5] = 1
132 | # Create mapping between reference and computed labels
133 | rows, cols = np.nonzero(intersection_over_ref)
134 | labels_ref = labels_ref[1:]
135 | labels_comp = labels_comp[1:]
136 | mapped_ref = labels_ref[rows].tolist()
137 | mapped_comp = labels_comp[cols].tolist()
138 | iou = intersection_over_union[rows, cols].tolist()
139 | assert np.unique(mapped_ref).size == len(mapped_ref), \
140 | f"Reference node assigned to multiple computed nodes! " \
141 | f"{ref_path} {labels_ref, labels_comp, mapped_ref, mapped_comp}"
142 | labels_ref = labels_ref.tolist()
143 | labels_comp = labels_comp.tolist()
144 | return labels_ref, labels_comp, mapped_ref, mapped_comp, iou
145 |
146 |
147 | def create_edge_mapping(
148 | tracks: np.ndarray,
149 | labels: list,
150 | V_tp: np.ndarray,
151 | cum_inds: np.ndarray,
152 | ):
153 | """
154 | Creates the edge mapping for the input tracks. The edge mapping is a
155 | nd.array with the following style:
156 | [[ind1, id1, det_test1, t1, ind2, id2, det_test2, t2, semantic]]
157 | where ind1 and ind2 are the indices of the nodes in V_tp, id1 and
158 | id2 are the labels of the vertices, det_test1 and det_test2 are the
159 | detection test of the vertices, t1 and t2 are the time steps of the
160 | vertices and semantic is the semantic label of the edge (0 for track link,
161 | 1 for parent link).
162 |
163 | Args:
164 | tracks: The tracks. If None, no edges will be created
165 | labels: The labels of the ground truth masks.
166 | V_tp: The detection test matrix.
167 | cum_inds: The cumulative indices of the vertices per frame.
168 |
169 | Returns:
170 | The edge mapping.
171 | """
172 | if tracks is None:
173 | return np.zeros((0, 9))
174 | all_edges = []
175 | # Add track links
176 | ind_v = 0
177 | current_t = 0
178 | for l_gt1, l_gt2 in zip(labels[:-1], labels[1:]):
179 | l_gt1, l_gt2 = np.array(l_gt1), np.array(l_gt2)
180 | mapping = l_gt1[:, None] == l_gt2[None, :]
181 | ind1, ind2 = np.where(mapping)
182 | id1, id2 = l_gt1[ind1], l_gt2[ind2]
183 | t1, t2 = np.ones_like(id1) * current_t, np.ones_like(id1) * current_t + 1
184 | ind1, ind2 = ind1 + ind_v, ind2 + ind_v + len(l_gt1)
185 | edges = np.stack([
186 | ind1, id1, V_tp[ind1], t1,
187 | ind2, id2, V_tp[ind2], t2,
188 | np.zeros_like(id1)], axis=1)
189 | all_edges.append(edges)
190 | ind_v += len(l_gt1)
191 | current_t += 1
192 | # Add track and parent links from track file
193 | for track in tracks:
194 | label2, birth2, _, parent2 = track
195 | if parent2 == 0:
196 | continue
197 | label1, _, end1, _ = tracks[tracks[:, 0] == parent2][0]
198 | ind1 = np.argwhere(labels[end1] == label1)[0] + cum_inds[end1]
199 | ind2 = np.argwhere(labels[birth2] == label2)[0] + cum_inds[birth2]
200 | ind1, ind2 = int(ind1), int(ind2)
201 | edges = np.asarray(
202 | [ind1, label1, int(V_tp[ind1]), end1, ind2, label2, int(V_tp[ind2]), birth2, 1]
203 | )[None, :]
204 | all_edges.append(edges)
205 | return np.concatenate(all_edges, axis=0).astype(int)
206 |
207 |
208 | def create_detection_test_matrix(
209 | num_V_C: int,
210 | num_V_R: int,
211 | labels_ref: list,
212 | labels_comp: list,
213 | mapped_ref: list,
214 | mapped_comp: list,
215 | ):
216 | """
217 | Creates the detection test matrix for the input tracks. The detection test
218 | is stored as a sparse matrix with num_V_C rows and num_V_R columns. The
219 | matrix is filled with 1 if the vertex is a match and 0 otherwise.
220 |
221 | Args:
222 | num_V_C: The number of vertices in the computed graph.
223 | num_V_R: The number of vertices in the reference graph.
224 | labels_ref: The labels of the ground truth masks.
225 | labels_comp: The labels of the result masks.
226 | mapped_ref: The matched labels of the ground truth masks.
227 | mapped_comp: The matched labels of the result masks.
228 |
229 | Returns:
230 | The detection test matrix.
231 | """
232 | det_test = lil_array((num_V_C, num_V_R))
233 | ind_v_r = 0
234 | ind_v_c = 0
235 | for l_gt, l_res, m_gt, m_res in zip(
236 | labels_ref, labels_comp, mapped_ref, mapped_comp
237 | ):
238 | i_r = np.searchsorted(l_gt, m_gt, sorter=np.argsort(l_gt)) + ind_v_r
239 | i_c = np.searchsorted(l_res, m_res, sorter=np.argsort(l_res)) + ind_v_c
240 | det_test[i_c, i_r] = 1
241 | ind_v_r += len(l_gt)
242 | ind_v_c += len(l_res)
243 | return det_test
244 |
245 |
246 | def count_acyclic_graph_correction_operations(
247 | ref_tracks: np.ndarray,
248 | comp_tracks: np.ndarray,
249 | labels_ref: list,
250 | labels_comp: list,
251 | mapped_ref: list,
252 | mapped_comp: list,
253 | ):
254 | """
255 | Calculates the necessary operations to correct the result tracks to match
256 | the ground truth tracks. The operations are counted according to:
257 | Cell Tracking Accuracy Measurement Based on Comparison of Acyclic
258 | Oriented Graphs; Matula etal. 2015
259 |
260 | Args:
261 | comp_tracks: The result tracks.
262 | ref_tracks: The ground truth tracks.
263 | labels_ref: The labels of the ground truth masks.
264 | labels_comp: The labels of the result masks.
265 | mapped_ref: The matched labels of the ground truth masks.
266 | mapped_comp: The matched labels of the result masks.
267 |
268 | Returns:
269 | NS, FN, FP, ED, EA, EC, num_vertices, num_edges
270 | """
271 | # Count vertices in the input data
272 | stats = {}
273 | stats["num_vertices_R"] = np.sum([len(l) for l in labels_ref])
274 | stats["num_vertices_C"] = np.sum([len(l) for l in labels_comp])
275 | stats["num_vertices"] = stats["num_vertices_R"]
276 | # Cumulate the number of vertices per frame
277 | cum_inds_R = np.cumsum([0] + [len(l) for l in labels_ref])
278 | cum_inds_C = np.cumsum([0] + [len(l) for l in labels_comp])
279 | # Perform "detection test"
280 | det_test = create_detection_test_matrix(
281 | stats["num_vertices_C"], stats["num_vertices_R"],
282 | labels_ref, labels_comp, mapped_ref, mapped_comp
283 | )
284 | # Classify vertices to tp, fp, fn and vs
285 | assignments_r = np.sum(det_test, axis=0)
286 | assignments_c = np.sum(det_test, axis=1)
287 | assert np.max(assignments_r) <= 1
288 | V_tp_r = assignments_r == 1
289 | V_tp_c = assignments_c == 1
290 | stats["TP"] = np.sum(V_tp_r)
291 | stats["FN"] = np.sum(~V_tp_r)
292 | stats["FP"] = np.sum(assignments_c == 0)
293 | stats["VS"] = np.sum(assignments_c > 1)
294 | stats["NS"] = stats["TP"] - (np.sum(V_tp_c) + stats["VS"])
295 | # Mapping from reference to computed
296 | det_test[~V_tp_c, :] = 0
297 | comp, ref = det_test.nonzero()
298 | assert len(comp) == np.sum(V_tp_c)
299 | comp_to_ref = np.zeros(stats["num_vertices_C"]) * np.nan
300 | comp_to_ref[comp] = ref
301 | assert np.all(np.sort(comp) == comp)
302 | # Create edge mapping ...
303 | # ... for reference
304 | E_R = create_edge_mapping(ref_tracks, labels_ref, V_tp_r, cum_inds_R)
305 | # ... for computed
306 | E_C = create_edge_mapping(comp_tracks, labels_comp, V_tp_c, cum_inds_C)
307 | # Stop calculation if no edges are present in the computed graph (e.g. if
308 | # only segmentation is present)
309 | if E_C.size == 0:
310 | stats["ED"] = 0
311 | stats["EA"] = len(E_R)
312 | stats["EC"] = 0
313 | stats["num_edges"] = len(E_R)
314 | return stats
315 | # Reduce the computed graph to an induced subgraph with only uniquely
316 | # matched vertices
317 | E_C = E_C[(E_C[:, 2] * E_C[:, 6]) == 1]
318 | # Add mapping to Reference graph such that E_C is:
319 | # ind1,id1,det_tst1,t1,ind2,id2,det_test2,t2,sem,ind1_R,ind2_R
320 | E_C = np.concatenate([
321 | E_C,
322 | comp_to_ref[E_C[:, 0]][:, None].astype(int),
323 | comp_to_ref[E_C[:, 4]][:, None].astype(int)
324 | ], axis=1)
325 | assert not np.any(np.isnan(E_C))
326 | # Map the edges to edges
327 | unique_edge_ids_R = (E_R[:, 0] * 10 ** len(str(stats["num_vertices_R"]))
328 | + E_R[:, 4])
329 | unique_edge_ids_C = (E_C[:, 9] * 10 ** len(str(stats["num_vertices_R"]))
330 | + E_C[:, 10])
331 | if unique_edge_ids_R.size > 0:
332 | assert np.max(np.unique(unique_edge_ids_R, return_counts=True)[1]) == 1
333 | if unique_edge_ids_C.size > 0:
334 | assert np.max(np.unique(unique_edge_ids_C, return_counts=True)[1]) == 1
335 | isin_R = np.isin(unique_edge_ids_C, unique_edge_ids_R)
336 | isin_C = np.isin(unique_edge_ids_R, unique_edge_ids_C)
337 | E_R_mapped = E_R[isin_C]
338 | E_C_mapped = E_C[isin_R]
339 | E_R_mapped = E_R_mapped[np.argsort(unique_edge_ids_R[isin_C])]
340 | E_C_mapped = E_C_mapped[np.argsort(unique_edge_ids_C[isin_R])]
341 | # Calculate relevant edge statistics
342 | stats["ED"] = len(E_C[~isin_R])
343 | stats["EA"] = np.sum(~isin_C)
344 | stats["EC"] = len(E_C_mapped[E_C_mapped[:, 8] != E_R_mapped[:, 8]])
345 | stats["num_edges"] = len(E_R)
346 | return stats
347 |
348 |
349 | def assign_comp_to_ref(
350 | labels_ref: list,
351 | mapped_ref: list,
352 | mapped_comp: list,
353 | ):
354 | """
355 | Assigns the computed labels to the reference labels.
356 |
357 | Args:
358 | labels_ref: The labels of the ground truth masks.
359 | mapped_ref: The matched labels of the ground truth masks.
360 | mapped_comp: The matched labels of the result masks.
361 |
362 | Returns:
363 | A dictionary with the reference labels as keys and the computed
364 | labels as values.
365 | """
366 | all_labels = np.unique(np.concatenate(labels_ref))
367 | max_frame = len(labels_ref)
368 | track_assignments = {
369 | k: np.zeros(max_frame) * np.nan for k in all_labels
370 | }
371 | frame = 0
372 | for l_gt, m_gt, m_res in zip(
373 | labels_ref, mapped_ref, mapped_comp
374 | ):
375 | for i in l_gt:
376 | if i in m_gt:
377 | m = m_res[int(np.argwhere(np.asarray(m_gt) == i)[0])]
378 | track_assignments[i][frame] = m
379 | counts = np.sum(np.asarray(m_res) == m)
380 | if counts > 1:
381 | track_assignments[i][frame] = 0
382 | else:
383 | track_assignments[i][frame] = 0
384 | frame += 1
385 | return track_assignments
386 |
387 |
388 | def merge_tracks(
389 | tracks: np.ndarray,
390 | labels: list,
391 | mapped: list
392 | ):
393 | """
394 | Merges tracks that belong to the same cell trajectory. This can happen, if
395 | a cell is invisible for a few frames and the tracking algorithm splits the
396 | track into two or more tracks. The resulting data is a relabelled version
397 | of the input data.
398 |
399 | Args:
400 | tracks: The tracks.
401 | labels: The labels of the masks.
402 | mapped: The matched labels of the masks.
403 |
404 | Returns:
405 | As input data, but relabelled.
406 | """
407 | tracks = np.copy(tracks)
408 | labels = [np.copy(x) for x in labels]
409 | mapped = [np.copy(x) for x in mapped]
410 | # Find tracks that belong together
411 | parents, cnts = np.unique(tracks[tracks[:, 3] > 0, 3], return_counts=True)
412 | parents = parents[cnts == 1].tolist()
413 | children = [tracks[tracks[:, 3] == p][0, 0] for p in parents]
414 |
415 | # Merge tracks
416 | mapping = {x: x for x in np.unique(tracks[:, 0])}
417 | for parent, child in zip(parents, children):
418 | for k, v in mapping.items():
419 | if v == child:
420 | mapping[k] = mapping[parent]
421 |
422 | # Relabel such that the labels are continuous
423 | remaining_labels = sorted(np.unique(list(mapping.values())))
424 | new_labels = list(range(1, len(remaining_labels) + 1))
425 | for k, v in mapping.items():
426 | new_v = new_labels[remaining_labels.index(v)]
427 | mapping[k] = new_v
428 |
429 | lut = np.zeros(np.max(list(mapping.keys())) + 1, dtype=int)
430 | for k, v in mapping.items():
431 | lut[k] = v
432 |
433 | # Relabel tracks
434 | new_tracks = np.copy(tracks)
435 | for k, v in mapping.items():
436 | new_tracks[tracks[:, 0] == k, 0] = v
437 | new_tracks[tracks[:, 3] == k, 3] = v
438 | ids, counts = np.unique(new_tracks[:, 0], return_counts=True)
439 | ids = ids[counts > 1]
440 | for i in ids:
441 | inds = np.argwhere(new_tracks[:, 0] == i).flatten()
442 | start = np.min(new_tracks[inds, 1])
443 | end = np.max(new_tracks[inds, 2])
444 | new_tracks[inds, 1] = start
445 | new_tracks[inds, 2] = end
446 | new_tracks = np.delete(new_tracks, inds[1:], axis=0)
447 |
448 | # Relabel labels and mapped
449 | new_labels = [[lut[x] for x in i] for i in labels]
450 | new_mapped = [[lut[x] for x in i] for i in mapped]
451 |
452 | return new_tracks, new_labels, new_mapped
453 |
454 |
455 |
456 |
457 |
--------------------------------------------------------------------------------
/py-ctcmetrics.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools", "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/requirements.txt
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name="py-ctcmetrics",
5 | version="1.1.0",
6 | packages=find_packages(),
7 | install_requires=[
8 | "numpy",
9 | "opencv-python",
10 | "scikit-learn",
11 | "scipy",
12 | "tifffile",
13 | "imagecodecs",
14 | "pandas"
15 | ],
16 | author="Timo Kaiser",
17 | author_email="kaiser@tnt.uni-hannover.de",
18 | description="Metrics for Cell Tracking Challenges",
19 | long_description="Metrics for Cell Tracking Challenges",
20 | entry_points={
21 | 'console_scripts': [
22 | 'ctc_evaluate = ctc_metrics.scripts.evaluate:main',
23 | 'ctc_validate = ctc_metrics.scripts.validate:main',
24 | 'ctc_noise = ctc_metrics.scripts.noise:main',
25 | 'ctc_visualize = ctc_metrics.scripts.visualize:main',
26 | # Add more scripts here if needed
27 | ],
28 | },
29 | )
30 |
--------------------------------------------------------------------------------
/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/test/__init__.py
--------------------------------------------------------------------------------
/test/prepare_test_data.sh:
--------------------------------------------------------------------------------
1 | DOWNLOAD_LINK="https://www.tnt.uni-hannover.de/de/project/MPT/data/CTC/test_dataset_ctc.zip"
2 | wget -q -O test_dataset_ctc.zip $DOWNLOAD_LINK
3 | unzip -q test_dataset_ctc.zip
4 |
--------------------------------------------------------------------------------
/test/test_metrics.py:
--------------------------------------------------------------------------------
1 | from test.utils import test_seq_res, test_seq_gt, test_det, test_seg, \
2 | test_tra, test_ct, test_tf, test_bc0, test_bc1, test_bc2, test_bc3, test_cca
3 | from ctc_metrics import evaluate_sequence
4 |
5 |
6 | def test_metric_det():
7 | max_d = 0.00001
8 | metrics = evaluate_sequence(
9 | test_seq_res, test_seq_gt, metrics=["DET"]
10 | )
11 | assert abs(metrics["DET"] - test_det) < max_d, \
12 | f"{metrics['DET']} != {test_det}"
13 |
14 |
15 | def test_metric_seg():
16 | max_d = 0.00001
17 | metrics = evaluate_sequence(
18 | test_seq_res, test_seq_gt, metrics=["SEG"]
19 | )
20 | assert abs(metrics["SEG"] - test_seg) < max_d, \
21 | f"{metrics['SEG']} != {test_seg}"
22 |
23 |
24 | def test_metric_tra():
25 | max_d = 0.00001
26 | metrics = evaluate_sequence(
27 | test_seq_res, test_seq_gt, metrics=["TRA"]
28 | )
29 | assert abs(metrics["TRA"] - test_tra) < max_d, \
30 | f"{metrics['TRA']} != {test_tra}"
31 |
32 |
33 | def test_metric_ct():
34 | max_d = 0.00001
35 | metrics = evaluate_sequence(
36 | test_seq_res, test_seq_gt, metrics=["CT"]
37 | )
38 | assert abs(metrics["CT"] - test_ct) < max_d, \
39 | f"{metrics['CT']} != {test_ct}"
40 |
41 |
42 | def test_metric_tf():
43 | max_d = 0.00001
44 | metrics = evaluate_sequence(
45 | test_seq_res, test_seq_gt, metrics=["TF"]
46 | )
47 | assert abs(metrics["TF"] - test_tf) < max_d, \
48 | f"{metrics['TF']} != {test_tf}"
49 |
50 |
51 | def test_metric_bc():
52 | max_d = 0.00001
53 | metrics = evaluate_sequence(
54 | test_seq_res, test_seq_gt, metrics=["BC"]
55 | )
56 | assert abs(metrics["BC(0)"] - test_bc0) < max_d, \
57 | f"{metrics['BC(0)']} != {test_bc0}"
58 | assert abs(metrics["BC(1)"] - test_bc1) < max_d, \
59 | f"{metrics['BC(1)']} != {test_bc1}"
60 | assert abs(metrics["BC(2)"] - test_bc2) < max_d, \
61 | f"{metrics['BC(2)']} != {test_bc2}"
62 | assert abs(metrics["BC(3)"] - test_bc3) < max_d, \
63 | f"{metrics['BC(3)']} != {test_bc3}"
64 |
65 |
66 | def test_metric_cca():
67 | metrics = evaluate_sequence(
68 | test_seq_res, test_seq_gt, metrics=["CCA"]
69 | )
70 | assert abs(metrics["CCA"] - test_cca) < 0.00001, \
71 | f"{metrics['CCA']} != {test_cca}"
72 |
73 |
--------------------------------------------------------------------------------
/test/test_validate.py:
--------------------------------------------------------------------------------
1 | from test.utils import test_seq_res
2 | from ctc_metrics import validate_sequence
3 |
4 |
5 | def test_validate_sequence():
6 | res = validate_sequence(test_seq_res)
7 | assert bool(res["Valid"]) is True, f"{bool(res['Valid'])} != True"
8 |
--------------------------------------------------------------------------------
/test/utils.py:
--------------------------------------------------------------------------------
1 |
2 | test_root = "test_dataset_ctc/train"
3 | test_seq_res = "test_dataset_ctc/train/BF-C2DL-HSC/01_RES"
4 | test_seq_gt = "test_dataset_ctc/train/BF-C2DL-HSC/01_GT"
5 |
6 |
7 | test_det = 0.95377521
8 | test_seg = 0.903990207454052
9 | test_tra = 0.9588616
10 | test_ct = 0.0190476
11 | test_tf = 0.72417699
12 | test_bc0 = 0.434782
13 | test_bc1 = 0.47826086
14 | test_bc2 = 0.47826086
15 | test_bc3 = 0.47826086
16 | test_cca = 0.060606060606061
17 |
18 |
--------------------------------------------------------------------------------
/third_party/Evaluation software.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Evaluation software.pdf
--------------------------------------------------------------------------------
/third_party/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 2-Clause License
2 |
3 | Copyright (c) 2023, Timo Kaiser
4 |
5 | Redistribution and use in source and binary forms, with or without
6 | modification, are permitted provided that the following conditions are met:
7 |
8 | 1. Redistributions of source code must retain the above copyright notice, this
9 | list of conditions and the following disclaimer.
10 |
11 | 2. Redistributions in binary form must reproduce the above copyright notice,
12 | this list of conditions and the following disclaimer in the documentation
13 | and/or other materials provided with the distribution.
14 |
15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 |
--------------------------------------------------------------------------------
/third_party/Linux/DETMeasure:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Linux/DETMeasure
--------------------------------------------------------------------------------
/third_party/Linux/SEGMeasure:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Linux/SEGMeasure
--------------------------------------------------------------------------------
/third_party/Linux/TRAMeasure:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Linux/TRAMeasure
--------------------------------------------------------------------------------
/third_party/Win/DETMeasure.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Win/DETMeasure.exe
--------------------------------------------------------------------------------
/third_party/Win/SEGMeasure.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Win/SEGMeasure.exe
--------------------------------------------------------------------------------
/third_party/Win/TRAMeasure.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Win/TRAMeasure.exe
--------------------------------------------------------------------------------
/third_party/Win/cbia.lib.i3dalgo.dyn.rel.x64.15.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Win/cbia.lib.i3dalgo.dyn.rel.x64.15.dll
--------------------------------------------------------------------------------
/third_party/Win/cbia.lib.i3dcore.dyn.rel.x64.15.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Win/cbia.lib.i3dcore.dyn.rel.x64.15.dll
--------------------------------------------------------------------------------
/third_party/Win/cbia.lib.tiff.dyn.rel.x64.15.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Win/cbia.lib.tiff.dyn.rel.x64.15.dll
--------------------------------------------------------------------------------
/third_party/Win/cbia.lib.z.dyn.rel.x64.15.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Win/cbia.lib.z.dyn.rel.x64.15.dll
--------------------------------------------------------------------------------
/third_party/Win/wxbase311u_vc_x64_cbia.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CellTrackingChallenge/py-ctcmetrics/0b3acb005199c71dcf516c533de35dfae176aeaf/third_party/Win/wxbase311u_vc_x64_cbia.dll
--------------------------------------------------------------------------------