├── tests
├── __init__.py
├── fixtures
│ ├── __init__.py
│ ├── roi.py
│ ├── plots.py
│ └── helpers.py
├── test_unit
│ ├── __init__.py
│ ├── test_validators
│ │ ├── __init__.py
│ │ └── test_array_validators.py
│ ├── test_napari_plugin
│ │ └── test_meta_widget.py
│ ├── test_deprecations.py
│ ├── test_roi
│ │ ├── test_normal.py
│ │ ├── test_polygon_boundary.py
│ │ ├── test_instantiate.py
│ │ ├── test_plot.py
│ │ ├── test_points_within_roi.py
│ │ └── test_conditions.py
│ ├── conftest.py
│ ├── test_cli_entrypoint.py
│ ├── test_logging.py
│ ├── test_kinematics
│ │ └── test_kinetic_energy.py
│ ├── test_plots
│ │ └── test_trajectory.py
│ └── test_reports.py
├── test_integration
│ ├── __init__.py
│ ├── test_filtering.py
│ ├── test_netcdf.py
│ ├── test_kinematics_vector_transform.py
│ └── test_io.py
└── conftest.py
├── movement
├── io
│ └── __init__.py
├── napari
│ ├── __init__.py
│ ├── napari.yaml
│ ├── meta_widget.py
│ └── convert.py
├── utils
│ ├── __init__.py
│ ├── reports.py
│ └── logging.py
├── validators
│ ├── __init__.py
│ └── arrays.py
├── plots
│ ├── __init__.py
│ └── trajectory.py
├── roi
│ ├── __init__.py
│ ├── conditions.py
│ ├── polygon.py
│ └── line.py
├── __init__.py
├── kinematics
│ └── __init__.py
└── cli_entrypoint.py
├── docs
├── source
│ ├── _static
│ │ ├── data_icon.png
│ │ ├── Forward-Vector.png
│ │ ├── dark-logo-niu.png
│ │ ├── dark-logo-swc.png
│ │ ├── dark-logo-ucl.png
│ │ ├── light-logo-niu.png
│ │ ├── light-logo-swc.png
│ │ ├── light-logo-ucl.png
│ │ ├── movement_logo.png
│ │ ├── dark-logo-gatsby.png
│ │ ├── dataset_structure.png
│ │ ├── light-logo-gatsby.png
│ │ ├── movement_favicon.png
│ │ ├── movement_overview.png
│ │ ├── Cartesian-vs-Polar.png
│ │ ├── Vector-Subtraction.png
│ │ ├── dark-wellcome-logo.png
│ │ ├── light-wellcome-logo.png
│ │ ├── napari_bboxes_layer.png
│ │ ├── napari_plugin_data_tracks.png
│ │ ├── napari_plugin_video_reader.png
│ │ ├── napari_plugin_video_slider.png
│ │ ├── napari_points_layer_tooltip.png
│ │ ├── blog_posts
│ │ │ ├── roadmap-jan-feb-2025.png
│ │ │ └── displacement_old_vs_new.png
│ │ ├── napari_tracks_layer_head_length.png
│ │ ├── js
│ │ │ └── contributors.js
│ │ └── css
│ │ │ └── custom.css
│ ├── community
│ │ ├── contributing.rst
│ │ ├── code-of-conduct.rst
│ │ ├── license.md
│ │ ├── related-projects.md
│ │ ├── index.md
│ │ ├── mission-scope.md
│ │ ├── roadmaps.md
│ │ └── resources.md
│ ├── blog
│ │ ├── index.md
│ │ ├── displacement-vectors.md
│ │ └── movement-v0_0_21.md
│ ├── environment.yml
│ ├── _templates
│ │ ├── autosummary
│ │ │ ├── function.rst
│ │ │ ├── module.rst
│ │ │ └── class.rst
│ │ ├── api_index_head.rst
│ │ ├── footer_start.html
│ │ └── footer_end.html
│ ├── snippets
│ │ └── connect-with-us.md
│ ├── user_guide
│ │ ├── index.md
│ │ └── installation.md
│ └── index.md
├── requirements.txt
├── Makefile
├── make.bat
├── convert_admonitions.py
└── make_api.py
├── examples
├── GALLERY_HEADER.rst
├── advanced
│ └── GALLERY_HEADER.rst
└── load_and_explore_poses.py
├── MANIFEST.in
├── .github
├── dependabot.yml
└── workflows
│ ├── update_contributors_list.yml
│ ├── conda_install_check.yml
│ ├── docs_build_and_deploy.yml
│ └── test_and_deploy.yml
├── .cruft.json
├── CITATION.CFF
├── LICENSE
├── .gitignore
├── .pre-commit-config.yaml
├── README.md
└── pyproject.toml
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/movement/io/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/movement/napari/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/movement/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/fixtures/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_unit/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/movement/validators/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_integration/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_unit/test_validators/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/source/_static/data_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/data_icon.png
--------------------------------------------------------------------------------
/docs/source/_static/Forward-Vector.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/Forward-Vector.png
--------------------------------------------------------------------------------
/docs/source/_static/dark-logo-niu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/dark-logo-niu.png
--------------------------------------------------------------------------------
/docs/source/_static/dark-logo-swc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/dark-logo-swc.png
--------------------------------------------------------------------------------
/docs/source/_static/dark-logo-ucl.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/dark-logo-ucl.png
--------------------------------------------------------------------------------
/docs/source/_static/light-logo-niu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/light-logo-niu.png
--------------------------------------------------------------------------------
/docs/source/_static/light-logo-swc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/light-logo-swc.png
--------------------------------------------------------------------------------
/docs/source/_static/light-logo-ucl.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/light-logo-ucl.png
--------------------------------------------------------------------------------
/docs/source/_static/movement_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/movement_logo.png
--------------------------------------------------------------------------------
/docs/source/_static/dark-logo-gatsby.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/dark-logo-gatsby.png
--------------------------------------------------------------------------------
/docs/source/_static/dataset_structure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/dataset_structure.png
--------------------------------------------------------------------------------
/docs/source/_static/light-logo-gatsby.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/light-logo-gatsby.png
--------------------------------------------------------------------------------
/docs/source/_static/movement_favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/movement_favicon.png
--------------------------------------------------------------------------------
/docs/source/_static/movement_overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/movement_overview.png
--------------------------------------------------------------------------------
/docs/source/community/contributing.rst:
--------------------------------------------------------------------------------
1 | .. _target-contributing:
2 | .. include:: ../../../CONTRIBUTING.md
3 | :parser: myst_parser.sphinx_
4 |
--------------------------------------------------------------------------------
/docs/source/_static/Cartesian-vs-Polar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/Cartesian-vs-Polar.png
--------------------------------------------------------------------------------
/docs/source/_static/Vector-Subtraction.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/Vector-Subtraction.png
--------------------------------------------------------------------------------
/docs/source/_static/dark-wellcome-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/dark-wellcome-logo.png
--------------------------------------------------------------------------------
/docs/source/_static/light-wellcome-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/light-wellcome-logo.png
--------------------------------------------------------------------------------
/docs/source/_static/napari_bboxes_layer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/napari_bboxes_layer.png
--------------------------------------------------------------------------------
/docs/source/community/code-of-conduct.rst:
--------------------------------------------------------------------------------
1 | .. _target-code-of-conduct:
2 | .. include:: ../../../CODE_OF_CONDUCT.md
3 | :parser: myst_parser.sphinx_
4 |
--------------------------------------------------------------------------------
/docs/source/_static/napari_plugin_data_tracks.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/napari_plugin_data_tracks.png
--------------------------------------------------------------------------------
/docs/source/_static/napari_plugin_video_reader.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/napari_plugin_video_reader.png
--------------------------------------------------------------------------------
/docs/source/_static/napari_plugin_video_slider.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/napari_plugin_video_slider.png
--------------------------------------------------------------------------------
/docs/source/_static/napari_points_layer_tooltip.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/napari_points_layer_tooltip.png
--------------------------------------------------------------------------------
/docs/source/_static/blog_posts/roadmap-jan-feb-2025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/blog_posts/roadmap-jan-feb-2025.png
--------------------------------------------------------------------------------
/docs/source/_static/napari_tracks_layer_head_length.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/napari_tracks_layer_head_length.png
--------------------------------------------------------------------------------
/docs/source/community/license.md:
--------------------------------------------------------------------------------
1 | # License
2 |
3 | [The 3-Clause BSD License](https://opensource.org/license/bsd-3-clause/)
4 |
5 | ```{include} ../../../LICENSE
6 | ```
7 |
--------------------------------------------------------------------------------
/docs/source/_static/blog_posts/displacement_old_vs_new.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neuroinformatics-unit/movement/HEAD/docs/source/_static/blog_posts/displacement_old_vs_new.png
--------------------------------------------------------------------------------
/examples/GALLERY_HEADER.rst:
--------------------------------------------------------------------------------
1 | .. _target-examples:
2 |
3 | Examples
4 | --------
5 |
6 | Below is a gallery of examples showcasing the core functionality and concepts
7 | of ``movement``.
8 |
--------------------------------------------------------------------------------
/docs/source/blog/index.md:
--------------------------------------------------------------------------------
1 | # Blog
2 |
3 | ```{postlist}
4 | :list-style: circle
5 | :category:
6 | :date: "%B %d, %Y"
7 | :format: "{date} | {title}, by {author}"
8 | :excerpts:
9 | ```
10 |
--------------------------------------------------------------------------------
/examples/advanced/GALLERY_HEADER.rst:
--------------------------------------------------------------------------------
1 | .. target-examples-advanced:
2 |
3 | Advanced
4 | ~~~~~~~~
5 |
6 | These examples are intended for experienced users who want to leverage the
7 | full power of ``movement``.
8 |
--------------------------------------------------------------------------------
/docs/source/environment.yml:
--------------------------------------------------------------------------------
1 | # Requirements for Binder
2 | # i.e. what is needed to run the example notebooks
3 | channels:
4 | - conda-forge
5 | dependencies:
6 | - python=3.12
7 | - pytables
8 | - pip:
9 | - movement
10 | - networkx
11 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | -e .[napari]
2 | ablog
3 | linkify-it-py
4 | myst-parser
5 | nbsphinx
6 | pydata-sphinx-theme
7 | setuptools-scm
8 | sphinx
9 | sphinx-autodoc-typehints
10 | sphinx-design
11 | sphinx-gallery
12 | sphinx-notfound-page
13 | sphinx-sitemap
14 |
--------------------------------------------------------------------------------
/docs/source/_templates/autosummary/function.rst:
--------------------------------------------------------------------------------
1 | {{ name | escape | underline}}
2 |
3 | .. currentmodule:: {{ module }}
4 |
5 | .. auto{{ objtype }}:: {{ objname }}
6 |
7 | .. minigallery:: {{ module }}.{{ objname }}
8 | :add-heading: Examples using ``{{ objname }}``
9 |
--------------------------------------------------------------------------------
/movement/plots/__init__.py:
--------------------------------------------------------------------------------
1 | """Plotting utilities for ``movement`` datasets."""
2 |
3 | from movement.plots.occupancy import plot_occupancy
4 | from movement.plots.trajectory import plot_centroid_trajectory
5 |
6 | __all__ = ["plot_occupancy", "plot_centroid_trajectory"]
7 |
--------------------------------------------------------------------------------
/movement/napari/napari.yaml:
--------------------------------------------------------------------------------
1 | name: movement
2 | display_name: movement
3 | contributions:
4 | commands:
5 | - id: movement.make_widget
6 | python_name: movement.napari.meta_widget:MovementMetaWidget
7 | title: movement
8 | widgets:
9 | - command: movement.make_widget
10 | display_name: movement
11 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include *.md
3 | include CITATION.CFF
4 | include movement/napari/napari.yaml
5 | exclude .pre-commit-config.yaml
6 | exclude .cruft.json
7 |
8 | recursive-exclude * __pycache__
9 | recursive-exclude * *.py[co]
10 | recursive-exclude docs *
11 | recursive-exclude examples *
12 | recursive-exclude tests *
13 |
--------------------------------------------------------------------------------
/docs/source/_templates/api_index_head.rst:
--------------------------------------------------------------------------------
1 | ..
2 | This file is auto-generated.
3 |
4 | .. _target-api:
5 |
6 | API reference
7 | =============
8 |
9 | Information on specific functions, classes, and methods.
10 |
11 | .. rubric:: Modules
12 |
13 | .. autosummary::
14 | :toctree: api
15 | :recursive:
16 | :nosignatures:
17 |
--------------------------------------------------------------------------------
/docs/source/community/related-projects.md:
--------------------------------------------------------------------------------
1 | # Related projects
2 |
3 | The following projects cover related needs and served as inspiration for this project:
4 | * [DLC2Kinematics](https://github.com/AdaptiveMotorControlLab/DLC2Kinematics)
5 | * [PyRat](https://github.com/pyratlib/pyrat)
6 | * [Kino](https://github.com/BrancoLab/Kino)
7 | * [WAZP](https://github.com/SainsburyWellcomeCentre/WAZP)
8 |
--------------------------------------------------------------------------------
/docs/source/_templates/footer_start.html:
--------------------------------------------------------------------------------
1 |
2 | {% trans sphinx_version=sphinx_version|e %}Created using Sphinx {{ sphinx_version }}.{% endtrans %}
3 |
4 |
5 |
6 | {{ _("Built with the") }}
7 | PyData Sphinx Theme
8 | {{ theme_version }}.
9 |
10 |
--------------------------------------------------------------------------------
/movement/roi/__init__.py:
--------------------------------------------------------------------------------
1 | """Utilities for representing and analysing regions of interest."""
2 |
3 | from movement.roi.base import BaseRegionOfInterest
4 | from movement.roi.conditions import compute_region_occupancy
5 | from movement.roi.line import LineOfInterest
6 | from movement.roi.polygon import PolygonOfInterest
7 |
8 | __all__ = [
9 | "compute_region_occupancy",
10 | "LineOfInterest",
11 | "PolygonOfInterest",
12 | "BaseRegionOfInterest",
13 | ]
14 |
--------------------------------------------------------------------------------
/movement/__init__.py:
--------------------------------------------------------------------------------
1 | from importlib.metadata import PackageNotFoundError, version
2 |
3 | from movement.utils.logging import logger
4 |
5 | try:
6 | __version__ = version("movement")
7 | except PackageNotFoundError: # pragma: no cover
8 | # package is not installed
9 | pass
10 |
11 | # set xarray global options
12 | import xarray as xr
13 |
14 | xr.set_options(keep_attrs=True, display_expand_data=False)
15 |
16 | # Configure logging to stderr and a file
17 | logger.configure()
18 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "github-actions" # See documentation for possible values
9 | directory: "/" # Location of package manifests
10 | schedule:
11 | interval: "monthly"
12 |
--------------------------------------------------------------------------------
/tests/test_unit/test_napari_plugin/test_meta_widget.py:
--------------------------------------------------------------------------------
1 | """Test the napari plugin meta widget."""
2 |
3 | from movement.napari.meta_widget import MovementMetaWidget
4 |
5 |
6 | def test_meta_widget_instantiation(make_napari_viewer_proxy):
7 | """Test that the meta widget can be properly instantiated."""
8 | viewer = make_napari_viewer_proxy()
9 | meta_widget = MovementMetaWidget(viewer)
10 |
11 | assert len(meta_widget.collapsible_widgets) == 1
12 |
13 | first_widget = meta_widget.collapsible_widgets[0]
14 | assert first_widget._text == "Load tracked data"
15 | assert first_widget.isExpanded()
16 |
--------------------------------------------------------------------------------
/docs/source/_templates/autosummary/module.rst:
--------------------------------------------------------------------------------
1 | {{ fullname | escape | underline }}
2 |
3 | .. rubric:: Description
4 |
5 | .. automodule:: {{ fullname }}
6 |
7 | .. currentmodule:: {{ fullname }}
8 |
9 | {% if classes %}
10 | .. rubric:: Classes
11 |
12 | .. autosummary::
13 | :toctree: .
14 | :nosignatures:
15 | {% for class in classes %}
16 | {{ class.split('.')[-1] }}
17 | {% endfor %}
18 |
19 | {% endif %}
20 |
21 | {% if functions %}
22 | .. rubric:: Functions
23 |
24 | .. autosummary::
25 | :toctree: .
26 | :nosignatures:
27 | {% for function in functions %}
28 | {{ function.split('.')[-1] }}
29 | {% endfor %}
30 |
31 | {% endif %}
32 |
--------------------------------------------------------------------------------
/.cruft.json:
--------------------------------------------------------------------------------
1 | {
2 | "template": "https://github.com/neuroinformatics-unit/python-cookiecutter",
3 | "commit": "ac40e82e88d29ce57d6cf8aa372aa1af4bde8308",
4 | "checkout": null,
5 | "context": {
6 | "cookiecutter": {
7 | "full_name": "Niko Sirmpilatze",
8 | "email": "niko.sirbiladze@gmail.com",
9 | "github_username_or_organization": "neuroinformatics-unit",
10 | "package_name": "movement",
11 | "github_repository_url": "provide later",
12 | "module_name": "movement",
13 | "short_description": "Analysis of body movement",
14 | "license": "BSD-3",
15 | "create_docs": "yes",
16 | "_copy_without_render": [
17 | ".github/*"
18 | ],
19 | "_template": "https://github.com/neuroinformatics-unit/python-cookiecutter"
20 | }
21 | },
22 | "directory": null
23 | }
24 |
--------------------------------------------------------------------------------
/docs/source/community/index.md:
--------------------------------------------------------------------------------
1 | # Community
2 |
3 | `movement` is made possible by the generous contributions of [many people](target-people).
4 |
5 | We welcome and encourage contributions in any form—whether it is fixing a bug,
6 | developing a new feature, or improving the documentation—as long as you follow our
7 | [code of conduct](target-code-of-conduct).
8 |
9 | To help you get started, we have prepared a statement on the project's [mission and scope](target-mission),
10 | a [roadmap](target-roadmaps) outlining our current priorities, and a detailed [contributing guide](target-contributing).
11 |
12 | (target-connect-with-us)=
13 | ## Connect with us
14 | ```{include} ../snippets/connect-with-us.md
15 | ```
16 |
17 |
18 | ```{toctree}
19 | :maxdepth: 2
20 | :hidden:
21 |
22 | people
23 | mission-scope
24 | roadmaps
25 | contributing
26 | resources
27 | related-projects
28 | code-of-conduct
29 | license
30 | ```
31 |
--------------------------------------------------------------------------------
/docs/source/_templates/autosummary/class.rst:
--------------------------------------------------------------------------------
1 | {{ name | escape | underline}}
2 |
3 | .. currentmodule:: {{ module }}
4 |
5 | .. autoclass:: {{ objname }}
6 | :members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 | {% block methods %}
11 | {% set ns = namespace(has_public_methods=false) %}
12 |
13 | {% if methods %}
14 | {% for item in methods %}
15 | {% if not item.startswith('_') %}
16 | {% set ns.has_public_methods = true %}
17 | {% endif %}
18 | {%- endfor %}
19 | {% endif %}
20 |
21 | {% if ns.has_public_methods %}
22 | .. rubric:: {{ _('Methods') }}
23 |
24 | .. autosummary::
25 | {% for item in methods %}
26 | {% if not item.startswith('_') %}
27 | ~{{ name }}.{{ item }}
28 | {% endif %}
29 | {%- endfor %}
30 | {% endif %}
31 | {% endblock %}
32 |
33 | .. minigallery:: {{ module }}.{{ objname }}
34 | :add-heading: Examples using ``{{ objname }}``
35 |
--------------------------------------------------------------------------------
/docs/source/_static/js/contributors.js:
--------------------------------------------------------------------------------
1 | document.addEventListener('DOMContentLoaded', () => {
2 | const contributorsDiv = document.querySelector('.contributors-table');
3 | const contributorsTable = document.createElement('table');
4 | const tbody = document.createElement('tbody');
5 | // Get all elements
6 | const allContributors = Array.from(contributorsDiv.querySelectorAll('td'));
7 | const rows = [];
8 | while (allContributors.length) {
9 | const row = allContributors.splice(0, 5); // 5 columns per row
10 | rows.push(row);
11 | }
12 | rows.forEach(row => {
13 | const tr = document.createElement('tr');
14 | row.forEach(td => tr.appendChild(td));
15 | tbody.appendChild(tr);
16 | });
17 | // Replace existing content with the new table
18 | contributorsDiv.innerHTML = '';
19 | contributorsTable.appendChild(tbody);
20 | contributorsDiv.appendChild(contributorsTable);
21 | });
22 |
--------------------------------------------------------------------------------
/movement/napari/meta_widget.py:
--------------------------------------------------------------------------------
1 | """The main napari widget for the ``movement`` package."""
2 |
3 | from napari.viewer import Viewer
4 | from qt_niu.collapsible_widget import CollapsibleWidgetContainer
5 |
6 | from movement.napari.loader_widgets import DataLoader
7 |
8 |
9 | class MovementMetaWidget(CollapsibleWidgetContainer):
10 | """The widget to rule all ``movement`` napari widgets.
11 |
12 | This is a container of collapsible widgets, each responsible
13 | for handing specific tasks in the movement napari workflow.
14 | """
15 |
16 | def __init__(self, napari_viewer: Viewer, parent=None):
17 | """Initialize the meta-widget."""
18 | super().__init__()
19 |
20 | # Add the data loader widget
21 | self.add_widget(
22 | DataLoader(napari_viewer, parent=self),
23 | collapsible=True,
24 | widget_title="Load tracked data",
25 | )
26 |
27 | self.loader = self.collapsible_widgets[0]
28 | self.loader.expand() # expand the loader widget by default
29 |
--------------------------------------------------------------------------------
/docs/source/snippets/connect-with-us.md:
--------------------------------------------------------------------------------
1 | | | Platform | Come here to |
2 | |---|----------|---------------|
3 | | {fas}`comments` | [Zulip](movement-zulip:) | Ask general questions, seek user support, chat with `movement` users and developers. |
4 | | {fab}`github` | [GitHub](movement-github:) | [Open an issue](https://github.com/neuroinformatics-unit/movement/issues) to report a bug or request a new feature. Open a pull request to contribute code or documentation (see [contributing guide](target-contributing)). Star the [repository](movement-github:) if you like `movement`. |
5 | | {fas}`video` | Community Calls | Meet the team and chat about any aspect of `movement` development. [Follow this Zulip topic](movement-community-calls:) to receive updates about upcoming calls. These typically run every other Friday from 11:00 to 11:45 (London, U.K. time). |
6 | | {fab}`bluesky` {fab}`mastodon` | Social Media | Follow us on [Bluesky](https://bsky.app/profile/neuroinformatics.dev) and [Mastodon](https://mastodon.online/@neuroinformatics) for project announcements, opportunities and upcoming events. |
7 |
--------------------------------------------------------------------------------
/.github/workflows/update_contributors_list.yml:
--------------------------------------------------------------------------------
1 | name: Contributors
2 |
3 | # Update the contributors list in the documentation monthly
4 | # on the default branch main using the Contributors-Readme-Action GitHub Action.
5 | # As the branch is protected, the action will create a pull request.
6 | # Alternatively, the action can be triggered manually using the workflow_dispatch event.
7 | on:
8 | schedule:
9 | - cron: '0 0 1 * *' # Runs at midnight on the first day of every month
10 | workflow_dispatch:
11 |
12 | jobs:
13 | update_contributors_list:
14 | name: Update Contributors List
15 | runs-on: ubuntu-latest
16 | permissions:
17 | contents: write
18 | pull-requests: write
19 | steps:
20 | - name: Contribute List
21 | uses: akhilmhdh/contributors-readme-action@v2.3.11
22 | with:
23 | readme_path: docs/source/community/people.md
24 | commit_message: 'Update contributors list'
25 | pr_title_on_protected: 'Contributors-Readme-Action: Update contributors list'
26 | env:
27 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
28 |
--------------------------------------------------------------------------------
/movement/kinematics/__init__.py:
--------------------------------------------------------------------------------
1 | """Compute variables derived from ``position`` data."""
2 |
3 | from movement.kinematics.distances import compute_pairwise_distances
4 | from movement.kinematics.kinematics import (
5 | compute_acceleration,
6 | compute_displacement,
7 | compute_forward_displacement,
8 | compute_backward_displacement,
9 | compute_path_length,
10 | compute_speed,
11 | compute_time_derivative,
12 | compute_velocity,
13 | )
14 | from movement.kinematics.orientation import (
15 | compute_forward_vector,
16 | compute_forward_vector_angle,
17 | compute_head_direction_vector,
18 | )
19 | from movement.kinematics.kinetic_energy import compute_kinetic_energy
20 |
21 | __all__ = [
22 | "compute_displacement",
23 | "compute_forward_displacement",
24 | "compute_backward_displacement",
25 | "compute_velocity",
26 | "compute_acceleration",
27 | "compute_speed",
28 | "compute_path_length",
29 | "compute_time_derivative",
30 | "compute_pairwise_distances",
31 | "compute_forward_vector",
32 | "compute_head_direction_vector",
33 | "compute_forward_vector_angle",
34 | "compute_kinetic_energy",
35 | ]
36 |
--------------------------------------------------------------------------------
/docs/source/_templates/footer_end.html:
--------------------------------------------------------------------------------
1 |
19 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | # -W: if there are warnings, treat them as errors and exit with status 1.
7 | SPHINXOPTS ?= -W
8 | SPHINXBUILD ?= sphinx-build
9 | SOURCEDIR = source
10 | BUILDDIR = build
11 |
12 | # Put it first so that "make" without argument is like "make help".
13 | help:
14 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
15 |
16 | .PHONY: help Makefile
17 |
18 | # Generate the API documentation
19 | api_index.rst:
20 | python make_api.py
21 |
22 | # Generate the snippets/admonitions.md file
23 | # by converting the admonitions in the repo's README.md to MyST format
24 | admonitions.md:
25 | python convert_admonitions.py
26 |
27 | # Remove all generated files
28 | clean:
29 | rm -rf ./build
30 | rm -f ./source/api_index.rst
31 | rm -rf ./source/api
32 | rm -rf ./source/examples
33 | rm -rf ./source/snippets/admonitions.md
34 |
35 | # Catch-all target: route all unknown targets to Sphinx using the new
36 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
37 | %: Makefile api_index.rst admonitions.md
38 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
39 |
--------------------------------------------------------------------------------
/CITATION.CFF:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | title: movement
3 | message: >-
4 | If you use movement in your work, please cite the following Zenodo DOI.
5 | type: software
6 | authors:
7 | - given-names: Nikoloz
8 | family-names: Sirmpilatze
9 | orcid: 'https://orcid.org/0000-0003-1778-2427'
10 | email: niko.sirbiladze@gmail.com
11 | - given-names: Chang Huan
12 | family-names: Lo
13 | - given-names: Sofía
14 | family-names: Miñano
15 | - given-names: Brandon D.
16 | family-names: Peri
17 | - given-names: Dhruv
18 | family-names: Sharma
19 | - given-names: Laura
20 | family-names: Porta
21 | - given-names: Iván
22 | family-names: Varela
23 | - given-names: Adam L.
24 | family-names: Tyson
25 | email: code@adamltyson.com
26 | identifiers:
27 | - type: doi
28 | value: 10.5281/zenodo.12755724
29 | description: 'A collection of archived snapshots of movement on Zenodo.'
30 | repository-code: 'https://github.com/neuroinformatics-unit/movement'
31 | url: 'https://movement.neuroinformatics.dev/'
32 | abstract: >-
33 | Python tools for analysing body movements across space and time.
34 | keywords:
35 | - behavior
36 | - behaviour
37 | - kinematics
38 | - neuroscience
39 | - animal
40 | - motion
41 | - tracking
42 | - pose
43 | license: BSD-3-Clause
44 |
--------------------------------------------------------------------------------
/docs/source/user_guide/index.md:
--------------------------------------------------------------------------------
1 | # User guide
2 |
3 | Start by [installing the package](installation.md) and
4 | [loading your own tracking data](input_output.md), or playing with some
5 | [sample data](target-sample-data) provided with the package.
6 |
7 | Before you dive deeper, we highly recommend reading about the structure
8 | and usage of [movement datasets](movement_dataset.md), which are a central
9 | concept in the package.
10 |
11 | ::::{grid} 1 1 2 2
12 | :gutter: 3
13 |
14 | :::{grid-item-card} {fas}`wrench;sd-text-primary` Installation
15 | :link: installation
16 | :link-type: doc
17 |
18 | Install the package with `conda` or `pip`.
19 | :::
20 |
21 | :::{grid-item-card} {fas}`download;sd-text-primary` Input/Output
22 | :link: input_output
23 | :link-type: doc
24 |
25 | Load and save tracking data.
26 | :::
27 |
28 | :::{grid-item-card} {fas}`table;sd-text-primary` The movement datasets
29 | :link: movement_dataset
30 | :link-type: doc
31 |
32 | Learn about our data structures.
33 | :::
34 |
35 | :::{grid-item-card} {fas}`line-chart;sd-text-primary` Graphical User Interface
36 | :link: gui
37 | :link-type: doc
38 |
39 | Use our `napari` plugin to view and explore your data interactively.
40 | :::
41 |
42 | ::::
43 |
44 |
45 | ```{toctree}
46 | :maxdepth: 2
47 | :hidden:
48 |
49 | installation
50 | input_output
51 | movement_dataset
52 | gui
53 | ```
54 |
--------------------------------------------------------------------------------
/.github/workflows/conda_install_check.yml:
--------------------------------------------------------------------------------
1 | # Run weekly checks to ensure movement can be installed from conda-forge
2 | # alongside napari and a Qt backend without conflicts.
3 | name: conda install check
4 |
5 | on:
6 | schedule:
7 | # Weekly cron job at 12:00 AM UTC on Mondays.
8 | - cron: '0 0 * * 1'
9 | workflow_dispatch:
10 |
11 | jobs:
12 | conda_install_check:
13 | name: Conda install check (${{ matrix.os }} py${{ matrix.python-version }})
14 | runs-on: ${{ matrix.os }}
15 |
16 | strategy:
17 | matrix:
18 | os: [ubuntu-latest, windows-latest, macos-latest]
19 | python-version: ["3.11", "3.12", "3.13"]
20 |
21 | defaults:
22 | run:
23 | shell: bash -l {0} # Required for conda activation
24 |
25 | steps:
26 | - uses: actions/checkout@v6
27 | # pinning to a specific sha1 is recommended by this action's docs
28 | - uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0
29 | with:
30 | python-version: ${{ matrix.python-version }}
31 | auto-update-conda: true
32 | channels: conda-forge
33 | conda-remove-defaults: true
34 | activate-environment: "movement-test"
35 | - name: Check conda installation
36 | run: |
37 | conda install -c conda-forge movement napari pyqt
38 | movement info
39 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 | set SPHINXOPTS=-W
13 |
14 | %SPHINXBUILD% >NUL 2>NUL
15 | if errorlevel 9009 (
16 | echo.
17 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
18 | echo.installed, then set the SPHINXBUILD environment variable to point
19 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
20 | echo.may add the Sphinx directory to PATH.
21 | echo.
22 | echo.If you don't have Sphinx installed, grab it from
23 | echo.https://www.sphinx-doc.org/
24 | exit /b 1
25 | )
26 |
27 | if "%1" == "" goto help
28 |
29 | :process_targets
30 | if "%1" == "clean" (
31 | echo Removing auto-generated files...
32 | rmdir /S /Q %BUILDDIR%
33 | del /Q %SOURCEDIR%\api_index.rst
34 | rmdir /S /Q %SOURCEDIR%\api\
35 | rmdir /S /Q %SOURCEDIR%\examples\
36 | del /Q %SOURCEDIR%\snippets\admonitions.md
37 | ) else (
38 | echo Generating API documentation...
39 | python make_api.py
40 |
41 | echo Converting admonitions...
42 | python convert_admonitions.py
43 |
44 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
45 | )
46 |
47 | shift
48 | if not "%1" == "" goto process_targets
49 |
50 | goto end
51 |
52 | :help
53 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
54 |
55 | :end
56 | popd
57 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Copyright (c) 2023, University College London
3 |
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of movement nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 | local_settings.py
55 |
56 | # Flask instance folder
57 | instance/
58 |
59 | # Sphinx documentation
60 | docs/build/
61 | docs/source/examples/
62 | docs/source/api/
63 | docs/source/api_index.rst
64 | docs/source/snippets/admonitions.md
65 | sg_execution_times.rst
66 |
67 | # MkDocs documentation
68 | /site/
69 |
70 | # PyBuilder
71 | target/
72 |
73 | # Pycharm and VSCode
74 | .idea/
75 | venv/
76 | .vscode/
77 |
78 | # IPython Notebook
79 | .ipynb_checkpoints
80 |
81 | # pyenv
82 | .python-version
83 |
84 | # OS
85 | .DS_Store
86 |
87 | # written by setuptools_scm
88 | **/_version.py
89 |
90 | # Vale configuration
91 | .vale.ini
92 |
93 | # pre-commit and pytest cache
94 | .*_cache/
95 |
96 | # uv related
97 | uv.lock
98 |
--------------------------------------------------------------------------------
/docs/source/_static/css/custom.css:
--------------------------------------------------------------------------------
1 | html[data-theme=dark] {
2 | --pst-color-primary: #04B46D;
3 | --pst-color-link: var(--pst-color-primary);
4 | }
5 |
6 | html[data-theme=light] {
7 | --pst-color-primary: #03A062;
8 | --pst-color-link: var(--pst-color-primary);
9 | }
10 |
11 | body .bd-article-container {
12 | max-width: 100em !important;
13 | }
14 |
15 | .col {
16 | flex: 0 0 50%;
17 | max-width: 50%;
18 | }
19 |
20 | .img-sponsor {
21 | height: 50px;
22 | padding: 5px;
23 | }
24 |
25 | .img-sponsor-large {
26 | height: 10vh;
27 | padding: 5px;
28 | }
29 |
30 | .things-in-a-row {
31 | display: flex;
32 | flex-wrap: wrap;
33 | justify-content: space-between;
34 | }
35 |
36 | .contributors-table table {
37 | width: 100%;
38 | }
39 |
40 | .contributors-table td{
41 | padding: 2px;
42 | min-width: 90px;
43 | vertical-align: top;
44 | text-align: center;
45 | }
46 |
47 | /* Disable decoration for all but movement backrefs */
48 | a[class^="sphx-glr-backref-module-"],
49 | a[class^="sphx-glr-backref-type-"] {
50 | text-decoration: none;
51 | }
52 |
53 | a[class^="sphx-glr-backref-module-movement"] {
54 | text-decoration: underline;
55 | }
56 |
57 | /* Container for colour swatches */
58 | .colour-grid {
59 | display: flex;
60 | flex-wrap: wrap;
61 | gap: 2rem;
62 | justify-content: center; /* or flex-start if you prefer left-aligned */
63 | margin: 1.5rem 0;
64 | background: transparent;
65 | }
66 |
67 | /* Individual swatch card */
68 | .colour-card {
69 | text-align: center;
70 | background: transparent;
71 | }
72 |
73 | /* Make sure the SVGs don't get weird inline spacing */
74 | .colour-card svg {
75 | display: block;
76 | margin: 0 auto 0.5rem;
77 | }
78 |
--------------------------------------------------------------------------------
/docs/source/index.md:
--------------------------------------------------------------------------------
1 | (target-movement)=
2 | # movement
3 |
4 | A Python toolbox for analysing animal body movements across space and time.
5 |
6 | ::::{grid} 1 2 2 3
7 | :gutter: 3
8 |
9 | :::{grid-item-card} {fas}`book;sd-text-primary` User guide
10 | :link: user_guide/index
11 | :link-type: doc
12 |
13 | Installation, supported formats and key concepts.
14 | :::
15 |
16 | :::{grid-item-card} {fas}`chalkboard-user;sd-text-primary` Examples
17 | :link: examples/index
18 | :link-type: doc
19 |
20 | A gallery of examples using `movement`.
21 | :::
22 |
23 | :::{grid-item-card} {fas}`comments;sd-text-primary` Join the movement
24 | :link: community/index
25 | :link-type: doc
26 |
27 | How to connect with us and contribute.
28 | :::
29 | ::::
30 |
31 | 
32 |
33 | ## Overview
34 |
35 | Deep learning methods for motion tracking have revolutionised a range of
36 | scientific disciplines, from neuroscience and biomechanics, to conservation
37 | and ethology. Tools such as [DeepLabCut](dlc:) and [SLEAP](sleap:)
38 | now allow researchers to track animal movements
39 | in videos with remarkable accuracy, without requiring physical markers.
40 | However, there is still a need for standardised, easy-to-use methods
41 | to process the tracks generated by these tools.
42 |
43 | `movement` aims to provide a consistent, modular interface for analysing
44 | motion tracks, enabling steps such as data cleaning, visualisation,
45 | and motion quantification. We aim to support all popular animal tracking
46 | frameworks and file formats.
47 |
48 | Find out more on our [mission and scope](target-mission) statement and our [roadmap](target-roadmaps).
49 |
50 | ```{include} /snippets/admonitions.md
51 | ```
52 |
53 | ## Citation
54 | ```{include} ../../README.md
55 | :start-after: '## Citation'
56 | :end-before: '## License'
57 | ```
58 |
59 | ```{toctree}
60 | :maxdepth: 2
61 | :hidden:
62 |
63 | user_guide/index
64 | examples/index
65 | community/index
66 | api_index
67 | blog/index
68 | ```
69 |
--------------------------------------------------------------------------------
/tests/test_unit/test_deprecations.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import MagicMock
2 |
3 | import pytest
4 |
5 | import movement.kinematics as kinematics
6 |
7 |
8 | @pytest.mark.parametrize(
9 | "deprecated_function, mocked_inputs, check_in_message",
10 | [
11 | (
12 | kinematics.compute_displacement,
13 | {"data": MagicMock(dims=["time", "space"])},
14 | ["compute_forward_displacement", "compute_backward_displacement"],
15 | ),
16 | ],
17 | )
18 | def test_deprecation(deprecated_function, mocked_inputs, check_in_message):
19 | """Test that calling median_filter raises a DeprecationWarning.
20 | And that it forwards to rolling_filter with statistic='median'.
21 | """
22 | with pytest.warns(DeprecationWarning) as record:
23 | _ = deprecated_function(**mocked_inputs)
24 |
25 | assert len(record) == 1
26 | assert isinstance(record[0].message, DeprecationWarning)
27 | assert f"{deprecated_function.__name__}` is deprecated" in str(
28 | record[0].message
29 | )
30 |
31 | assert all(
32 | message in str(record[0].message) for message in check_in_message
33 | )
34 |
35 |
36 | # ---------------- Backwards compatibility tests ----------------
37 |
38 |
39 | @pytest.mark.parametrize(
40 | "valid_dataset",
41 | ["valid_poses_dataset", "valid_bboxes_dataset"],
42 | )
43 | def test_backwards_compatibility_displacement(valid_dataset, request):
44 | """Test that compute_displacement produces the same output as
45 | the negative of compute_backward_displacement.
46 | """
47 | position = request.getfixturevalue(valid_dataset).position
48 |
49 | with pytest.warns(DeprecationWarning):
50 | result = kinematics.compute_displacement(position)
51 |
52 | expected_result = -kinematics.compute_backward_displacement(position)
53 | assert result.equals(expected_result), (
54 | "compute_displacement should produce the same output as "
55 | "the negative of compute_backward_displacement"
56 | )
57 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Fixtures and configurations shared by the entire test suite."""
2 |
3 | from glob import glob
4 |
5 | import numpy as np
6 | import pytest
7 | from _pytest.logging import LogCaptureFixture
8 |
9 | from movement.sample_data import fetch_dataset_paths, list_datasets
10 | from movement.utils.logging import logger
11 |
12 |
13 | def _to_module_string(path: str) -> str:
14 | """Convert a file path to a module string."""
15 | return path.replace("/", ".").replace("\\", ".").replace(".py", "")
16 |
17 |
18 | pytest_plugins = [
19 | _to_module_string(fixture)
20 | for fixture in glob("tests/fixtures/*.py")
21 | if "__" not in fixture
22 | ]
23 |
24 |
25 | def pytest_sessionstart(session):
26 | """Set up logging to file and fetch test dataset file paths."""
27 | # Set up log file in a temporary directory
28 | tmp_path_factory = session.config._tmp_path_factory
29 | pytest.LOG_FILE = logger.configure(
30 | log_file_name=".movement-test",
31 | log_directory=tmp_path_factory.mktemp(".movement"),
32 | console=False,
33 | )
34 | # Fetch test dataset file paths as a dictionary
35 | pytest.DATA_PATHS = {}
36 | for file_name in list_datasets():
37 | paths_dict = fetch_dataset_paths(file_name)
38 | data_path = paths_dict.get("poses") or paths_dict.get("bboxes")
39 | pytest.DATA_PATHS[file_name] = data_path
40 |
41 |
42 | @pytest.fixture
43 | def caplog(caplog: LogCaptureFixture):
44 | """Override the caplog fixture by adding a sink
45 | that propagates loguru to the caplog handler.
46 | """
47 | handler_id = logger.add(
48 | caplog.handler,
49 | format="{message}",
50 | level="DEBUG",
51 | filter=lambda record: record["level"].no >= caplog.handler.level,
52 | enqueue=False,
53 | )
54 | yield caplog
55 | logger.remove(handler_id)
56 |
57 |
58 | @pytest.fixture(scope="session")
59 | def rng():
60 | """Return a random number generator with a fixed seed."""
61 | return np.random.default_rng(seed=42)
62 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | exclude: 'conf.py'
2 | # Configuring https://pre-commit.ci/ bot
3 | ci:
4 | autoupdate_schedule: monthly
5 | repos:
6 | - repo: https://github.com/pre-commit/pre-commit-hooks
7 | rev: v6.0.0
8 | hooks:
9 | - id: check-added-large-files
10 | - id: check-docstring-first
11 | - id: check-executables-have-shebangs
12 | - id: check-case-conflict
13 | - id: check-merge-conflict
14 | - id: check-symlinks
15 | - id: check-yaml
16 | - id: check-toml
17 | - id: debug-statements
18 | - id: end-of-file-fixer
19 | - id: mixed-line-ending
20 | args: [--fix=lf]
21 | - id: name-tests-test
22 | args: ["--pytest-test-first"]
23 | exclude: ^tests/fixtures/
24 | - id: requirements-txt-fixer
25 | - id: trailing-whitespace
26 | - repo: https://github.com/pre-commit/pygrep-hooks
27 | rev: v1.10.0
28 | hooks:
29 | - id: rst-backticks
30 | - id: rst-directive-colons
31 | - id: rst-inline-touching-normal
32 | - repo: https://github.com/astral-sh/ruff-pre-commit
33 | rev: v0.14.7
34 | hooks:
35 | - id: ruff
36 | - id: ruff-format
37 | - repo: https://github.com/pre-commit/mirrors-mypy
38 | rev: v1.19.0
39 | hooks:
40 | - id: mypy
41 | additional_dependencies:
42 | - attrs
43 | - types-setuptools
44 | - pandas-stubs
45 | - types-attrs
46 | - types-PyYAML
47 | - types-requests
48 | - repo: https://github.com/mgedmin/check-manifest
49 | rev: "0.51"
50 | hooks:
51 | - id: check-manifest
52 | args: [--no-build-isolation]
53 | additional_dependencies: [setuptools-scm, wheel]
54 | - repo: https://github.com/codespell-project/codespell
55 | # Configuration for codespell is in pyproject.toml
56 | rev: v2.4.1
57 | hooks:
58 | - id: codespell
59 |
--------------------------------------------------------------------------------
/tests/test_unit/test_roi/test_normal.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | import numpy as np
4 | import pytest
5 | from numpy.typing import ArrayLike
6 |
7 | from movement.roi import LineOfInterest
8 |
9 | SQRT_2 = np.sqrt(2.0)
10 |
11 |
12 | @pytest.mark.parametrize(
13 | ["segment", "point", "expected_normal"],
14 | [
15 | pytest.param(
16 | "segment_of_y_equals_x",
17 | (0.0, 1.0),
18 | (-1.0 / SQRT_2, 1.0 / SQRT_2),
19 | id="Normal pointing to half-plane with point (0, 1)",
20 | ),
21 | pytest.param(
22 | "segment_of_y_equals_x",
23 | (1.0, 0.0),
24 | (1.0 / SQRT_2, -1.0 / SQRT_2),
25 | id="Normal pointing to half-plane with point (1, 0)",
26 | ),
27 | pytest.param(
28 | LineOfInterest([(0.5, 0.5), (1.0, 1.0)]),
29 | (1.0, 0.0),
30 | (1.0 / SQRT_2, -1.0 / SQRT_2),
31 | id="Segment does not start at origin",
32 | ),
33 | pytest.param(
34 | "segment_of_y_equals_x",
35 | (1.0, 2.0),
36 | (-1.0 / SQRT_2, 1.0 / SQRT_2),
37 | id="Necessary to extend segment to compute normal.",
38 | ),
39 | pytest.param(
40 | LineOfInterest([(0.0, 0.0), (1.0, 0.0), (2.0, 0.0)]),
41 | (0.5, 0.5),
42 | ValueError("Normal is not defined for multi-segment geometries."),
43 | id="Multi-segment lines do not have normals.",
44 | ),
45 | ],
46 | )
47 | def test_normal(
48 | segment: LineOfInterest,
49 | point: ArrayLike,
50 | expected_normal: np.ndarray | Exception,
51 | request,
52 | ) -> None:
53 | if isinstance(segment, str):
54 | segment = request.getfixturevalue(segment)
55 |
56 | if isinstance(expected_normal, Exception):
57 | with pytest.raises(
58 | type(expected_normal), match=re.escape(str(expected_normal))
59 | ):
60 | segment.normal(point)
61 | else:
62 | computed_normal = segment.normal(point)
63 | assert np.allclose(computed_normal, expected_normal)
64 |
--------------------------------------------------------------------------------
/.github/workflows/docs_build_and_deploy.yml:
--------------------------------------------------------------------------------
1 | name: Docs
2 |
3 | # Generate the documentation on all merges to main, all pull requests, or by
4 | # manual workflow dispatch. The build job can be used as a CI check that the
5 | # docs still build successfully. The deploy job which moves the generated
6 | # html to the gh-pages branch and triggers a GitHub pages deployment
7 | # only runs when a tag is pushed or when the workflow is manually dispatched
8 | # from the main branch.
9 | on:
10 | push:
11 | branches:
12 | - main
13 | tags:
14 | - '*'
15 | pull_request:
16 | merge_group:
17 | workflow_dispatch:
18 |
19 | jobs:
20 |
21 | linting:
22 | # scheduled workflows should not run on forks
23 | if: |
24 | (github.event_name == 'schedule' &&
25 | github.repository_owner == 'neuroinformatics-unit' &&
26 | github.ref == 'refs/heads/main') ||
27 | (github.event_name != 'schedule')
28 | runs-on: ubuntu-latest
29 | steps:
30 | - uses: neuroinformatics-unit/actions/lint@v2
31 |
32 | build_sphinx_docs:
33 | name: Build Sphinx Docs
34 | runs-on: ubuntu-latest
35 | steps:
36 | - uses: actions/cache@v4
37 | with:
38 | path: |
39 | ~/.movement/*
40 | key: cached-test-data-${{ runner.os }}
41 | restore-keys: cached-test-data
42 | - uses: neuroinformatics-unit/actions/build_sphinx_docs@main
43 | with:
44 | python-version: 3.12
45 | use-make: true
46 | fetch-tags: true
47 | use-artifactci: lazy
48 |
49 | deploy_sphinx_docs:
50 | name: Deploy Sphinx Docs
51 | needs: build_sphinx_docs
52 | permissions:
53 | contents: write
54 | if: |
55 | (github.event_name == 'push' && github.ref_type == 'tag') ||
56 | (github.event_name == 'push' && github.ref == 'refs/heads/main') ||
57 | (github.event_name == 'workflow_dispatch' && github.ref == 'refs/heads/main')
58 | runs-on: ubuntu-latest
59 | steps:
60 | - uses: neuroinformatics-unit/actions/deploy_sphinx_docs_multiversion@main
61 | with:
62 | secret_input: ${{ secrets.GITHUB_TOKEN }}
63 | use-make: true
64 | switcher-url: https://movement.neuroinformatics.dev/latest/_static/switcher.json
65 |
--------------------------------------------------------------------------------
/tests/test_unit/conftest.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Callable
2 |
3 | import numpy as np
4 | import pytest
5 | import xarray as xr
6 |
7 |
8 | @pytest.fixture(scope="session")
9 | def push_into_range() -> Callable[
10 | [xr.DataArray | np.ndarray, float, float], xr.DataArray | np.ndarray
11 | ]:
12 | """Return a function for wrapping angles.
13 |
14 | This is a factory fixture that returns a method for wrapping angles
15 | into a user-specified range.
16 | """
17 |
18 | def _push_into_range(
19 | numeric_values: xr.DataArray | np.ndarray,
20 | lower: float = -180.0,
21 | upper: float = 180.0,
22 | ) -> xr.DataArray | np.ndarray:
23 | """Coerce values into the range (lower, upper].
24 |
25 | Primarily used to wrap returned angles into a particular range,
26 | such as (-pi, pi].
27 |
28 | The interval width is the value ``upper - lower``.
29 | Each element in ``values`` that starts less than or equal to the
30 | ``lower`` bound has multiples of the interval width added to it,
31 | until the result lies in the desirable interval.
32 |
33 | Each element in ``values`` that starts greater than the ``upper``
34 | bound has multiples of the interval width subtracted from it,
35 | until the result lies in the desired interval.
36 | """
37 | translated_values = (
38 | numeric_values.values.copy()
39 | if isinstance(numeric_values, xr.DataArray)
40 | else numeric_values.copy()
41 | )
42 |
43 | interval_width = upper - lower
44 | if interval_width <= 0:
45 | raise ValueError(
46 | f"Upper bound ({upper}) must be strictly "
47 | f"greater than lower bound ({lower})"
48 | )
49 |
50 | while np.any(
51 | (translated_values <= lower) | (translated_values > upper)
52 | ):
53 | translated_values[translated_values <= lower] += interval_width
54 | translated_values[translated_values > upper] -= interval_width
55 |
56 | if isinstance(numeric_values, xr.DataArray):
57 | translated_values = numeric_values.copy(
58 | deep=True, data=translated_values
59 | )
60 | return translated_values
61 |
62 | return _push_into_range
63 |
--------------------------------------------------------------------------------
/.github/workflows/test_and_deploy.yml:
--------------------------------------------------------------------------------
1 | name: Tests
2 |
3 | on:
4 | push:
5 | branches:
6 | - '*'
7 | tags:
8 | - '*'
9 | pull_request:
10 | merge_group:
11 |
12 | jobs:
13 | linting:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: neuroinformatics-unit/actions/lint@v2
17 |
18 | manifest:
19 | name: Check Manifest
20 | runs-on: ubuntu-latest
21 | steps:
22 | - uses: neuroinformatics-unit/actions/check_manifest@v2
23 |
24 | test:
25 | needs: [linting, manifest]
26 | name: ${{ matrix.os }} py${{ matrix.python-version }}
27 | runs-on: ${{ matrix.os }}
28 | strategy:
29 | matrix:
30 | # Run all supported Python versions on linux
31 | python-version: ["3.11", "3.12", "3.13"]
32 | os: [ubuntu-latest]
33 | # Include 1 MacOS Silicon (latest) and 1 Windows run
34 | include:
35 | - os: macos-latest
36 | python-version: "3.13"
37 | - os: windows-latest
38 | python-version: "3.13"
39 |
40 | steps:
41 | # these libraries enable testing on Qt on linux
42 | - uses: pyvista/setup-headless-display-action@v4
43 | with:
44 | qt: true
45 | - name: Cache Test Data
46 | uses: actions/cache@v4
47 | with:
48 | path: |
49 | ~/.movement/*
50 | key: cached-test-data-${{ runner.os }}
51 | restore-keys: cached-test-data
52 | - uses: neuroinformatics-unit/actions/test@v2
53 | with:
54 | python-version: ${{ matrix.python-version }}
55 | secret-codecov-token: ${{ secrets.CODECOV_TOKEN }}
56 |
57 | build_sdist_wheels:
58 | name: Build source distribution
59 | needs: [test]
60 | if: github.event_name == 'push' && github.ref_type == 'tag'
61 | runs-on: ubuntu-latest
62 | steps:
63 | - uses: neuroinformatics-unit/actions/build_sdist_wheels@v2
64 |
65 |
66 | upload_all:
67 | name: Publish build distributions
68 | needs: [build_sdist_wheels]
69 | if: github.event_name == 'push' && github.ref_type == 'tag'
70 | runs-on: ubuntu-latest
71 | steps:
72 | - uses: actions/download-artifact@v6
73 | with:
74 | name: artifact
75 | path: dist
76 | - uses: pypa/gh-action-pypi-publish@release/v1
77 | with:
78 | user: __token__
79 | password: ${{ secrets.TWINE_API_KEY }}
80 |
--------------------------------------------------------------------------------
/tests/test_unit/test_validators/test_array_validators.py:
--------------------------------------------------------------------------------
1 | import re
2 | from contextlib import nullcontext as does_not_raise
3 |
4 | import pytest
5 |
6 | from movement.validators.arrays import validate_dims_coords
7 |
8 |
9 | def expect_value_error_with_message(error_msg):
10 | """Expect a ValueError with the specified error message."""
11 | return pytest.raises(ValueError, match=re.escape(error_msg))
12 |
13 |
14 | # dims_coords: dict, exact_coords: bool, expected_exception
15 | valid_cases = [
16 | ({"time": []}, False, does_not_raise()),
17 | ({"time": []}, True, does_not_raise()),
18 | ({"time": [0, 1]}, False, does_not_raise()),
19 | ({"space": ["x", "y"]}, False, does_not_raise()),
20 | ({"space": ["x", "y"]}, True, does_not_raise()),
21 | ({"time": [], "space": []}, False, does_not_raise()),
22 | ({"time": [], "space": ["x", "y"]}, False, does_not_raise()),
23 | ] # Valid cases (no error)
24 |
25 | invalid_cases = [
26 | (
27 | {"spacetime": []},
28 | False,
29 | expect_value_error_with_message(
30 | "Input data must contain ['spacetime'] as dimensions."
31 | ),
32 | ),
33 | (
34 | {"time": [0, 100], "space": ["x", "y"]},
35 | False,
36 | expect_value_error_with_message(
37 | "Input data must contain [100] in the 'time' coordinates."
38 | ),
39 | ),
40 | (
41 | {"space": ["x", "y", "z"]},
42 | False,
43 | expect_value_error_with_message(
44 | "Input data must contain ['z'] in the 'space' coordinates."
45 | ),
46 | ),
47 | (
48 | {"space": ["x"]},
49 | True,
50 | expect_value_error_with_message(
51 | "Dimension 'space' must only contain ['x'] as coordinates, "
52 | ),
53 | ),
54 | ] # Invalid cases (raise ValueError)
55 |
56 |
57 | @pytest.mark.parametrize(
58 | "required_dims_coords, exact_coords, expected_exception",
59 | valid_cases + invalid_cases,
60 | )
61 | def test_validate_dims_coords(
62 | valid_poses_dataset, # fixture from conftest.py
63 | required_dims_coords,
64 | exact_coords,
65 | expected_exception,
66 | ):
67 | """Test validate_dims_coords for both valid and invalid inputs."""
68 | position_array = valid_poses_dataset["position"]
69 | with expected_exception:
70 | validate_dims_coords(
71 | position_array, required_dims_coords, exact_coords=exact_coords
72 | )
73 |
--------------------------------------------------------------------------------
/movement/utils/reports.py:
--------------------------------------------------------------------------------
1 | """Utility functions for reporting missing data."""
2 |
3 | import numpy as np
4 | import xarray as xr
5 |
6 | from movement.utils.logging import logger
7 | from movement.validators.arrays import validate_dims_coords
8 |
9 |
10 | def report_nan_values(da: xr.DataArray, label: str | None = None) -> str:
11 | """Report the number and percentage of data points that are NaN.
12 |
13 | The number of NaNs are counted for each element along the required
14 | ``time`` dimension.
15 | If the DataArray has the ``space`` dimension, the ``space`` dimension
16 | is reduced by checking if any values in the ``space`` coordinates
17 | are NaN, e.g. a 2D point is considered as NaN if any of its x or y
18 | coordinates are NaN.
19 |
20 | Parameters
21 | ----------
22 | da : xarray.DataArray
23 | The input data with ``time`` as a required dimension.
24 | label : str, optional
25 | Label to identify the data in the report. If not provided,
26 | the name of the DataArray is used. If the DataArray has no
27 | name, "data" is used as the label.
28 |
29 | Returns
30 | -------
31 | str
32 | A string containing the report.
33 |
34 | """
35 | validate_dims_coords(da, {"time": []})
36 | label = label or da.name or "data"
37 | nan_report = f"Missing points (marked as NaN) in {label}:"
38 | nan_count = (
39 | da.isnull().any("space").sum("time")
40 | if "space" in da.dims
41 | else da.isnull().sum("time")
42 | )
43 | # Drop coord labels without NaNs
44 | nan_count = nan_count.where(nan_count > 0, other=0, drop=True)
45 | if nan_count.size == 0 or nan_count.isnull().all():
46 | return f"No missing points (marked as NaN) in {label}."
47 | total_count = da.time.size
48 | nan_count_str = (
49 | nan_count.astype(int).astype(str)
50 | + f"/{total_count} ("
51 | + (nan_count / total_count * 100).round(2).astype(str)
52 | + "%)"
53 | )
54 | # Stack all dimensions except for the last
55 | nan_count_df = (
56 | nan_count_str.stack(new_dim=nan_count_str.dims[:-1])
57 | if len(nan_count_str.dims) > 1
58 | else nan_count_str
59 | ).to_pandas()
60 | nan_count_df = (
61 | nan_count_df.to_string()
62 | if not isinstance(nan_count_df, np.ndarray)
63 | else nan_count_df
64 | )
65 | nan_report += f"\n\n{nan_count_df}"
66 | logger.info(nan_report)
67 | return nan_report
68 |
--------------------------------------------------------------------------------
/tests/fixtures/roi.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import xarray as xr
4 |
5 | from movement.roi import LineOfInterest, PolygonOfInterest
6 |
7 |
8 | @pytest.fixture
9 | def segment_of_y_equals_x() -> LineOfInterest:
10 | """Line segment from (0,0) to (1,1)."""
11 | return LineOfInterest([(0, 0), (1, 1)])
12 |
13 |
14 | @pytest.fixture()
15 | def unit_square_pts() -> np.ndarray:
16 | """Points that define the 4 corners of a unit-length square.
17 |
18 | The points have the lower-left corner positioned at (0,0),
19 | and run clockwise around the centre of the would-be square.
20 | """
21 | return np.array(
22 | [
23 | [0.0, 0.0],
24 | [0.0, 1.0],
25 | [1.0, 1.0],
26 | [1.0, 0.0],
27 | ],
28 | dtype=float,
29 | )
30 |
31 |
32 | @pytest.fixture()
33 | def triangle_pts():
34 | """Vertices of a right-angled triangle."""
35 | return [(0.0, 0.0), (1.0, 0.0), (0.0, 1.0)]
36 |
37 |
38 | @pytest.fixture()
39 | def unit_square_hole(unit_square_pts: np.ndarray) -> np.ndarray:
40 | """Hole in the shape of a 0.5 side-length square centred on (0.5, 0.5)."""
41 | return 0.25 + (unit_square_pts.copy() * 0.5)
42 |
43 |
44 | @pytest.fixture
45 | def unit_square(unit_square_pts: xr.DataArray) -> PolygonOfInterest:
46 | return PolygonOfInterest(unit_square_pts, name="Unit square")
47 |
48 |
49 | @pytest.fixture
50 | def unit_square_with_hole(
51 | unit_square_pts: xr.DataArray, unit_square_hole: xr.DataArray
52 | ) -> PolygonOfInterest:
53 | return PolygonOfInterest(
54 | unit_square_pts, holes=[unit_square_hole], name="Unit square with hole"
55 | )
56 |
57 |
58 | @pytest.fixture()
59 | def triangle(triangle_pts) -> PolygonOfInterest:
60 | """Triangle."""
61 | return PolygonOfInterest(triangle_pts, name="triangle")
62 |
63 |
64 | @pytest.fixture()
65 | def triangle_different_name(triangle_pts) -> PolygonOfInterest:
66 | """Triangle with a different name."""
67 | return PolygonOfInterest(triangle_pts, name="pizza_slice")
68 |
69 |
70 | @pytest.fixture()
71 | def triangle_moved_01(triangle_pts) -> PolygonOfInterest:
72 | """Triangle moved by 0.01 on the x and y axis."""
73 | return PolygonOfInterest(
74 | [(x + 0.01, y + 0.01) for x, y in triangle_pts], name="triangle"
75 | )
76 |
77 |
78 | @pytest.fixture()
79 | def triangle_moved_100(triangle_pts) -> PolygonOfInterest:
80 | """Triangle moved by 1.00 on the x and y axis."""
81 | return PolygonOfInterest(
82 | [(x + 1.0, y + 1.0) for x, y in triangle_pts], name="triangle"
83 | )
84 |
--------------------------------------------------------------------------------
/examples/load_and_explore_poses.py:
--------------------------------------------------------------------------------
1 | """Load and explore pose tracks
2 | ===============================
3 |
4 | Load and explore an example dataset of pose tracks.
5 | """
6 |
7 | # %%
8 | # Imports
9 | # -------
10 |
11 | from movement import sample_data
12 | from movement.io import load_poses
13 | from movement.plots import plot_centroid_trajectory
14 |
15 | # %%
16 | # Define the file path
17 | # --------------------
18 | # This should be a file output by one of our supported pose estimation
19 | # frameworks (e.g., DeepLabCut, SLEAP), containing predicted pose tracks.
20 | # For example, the path could be something like:
21 |
22 | # uncomment and edit the following line to point to your own local file
23 | # file_path = "/path/to/my/data.h5"
24 |
25 | # %%
26 | # For the sake of this example, we will use the path to one of
27 | # the sample datasets provided with ``movement``.
28 |
29 | file_path = sample_data.fetch_dataset_paths(
30 | "SLEAP_three-mice_Aeon_proofread.analysis.h5"
31 | )["poses"]
32 | print(file_path)
33 |
34 | # %%
35 | # Load the data into movement
36 | # ---------------------------
37 |
38 | ds = load_poses.from_sleap_file(file_path, fps=50)
39 | print(ds)
40 |
41 | # %%
42 | # The loaded dataset contains two data variables:
43 | # ``position`` and ``confidence``.
44 | # To get the position data:
45 | position = ds.position
46 |
47 | # %%
48 | # Select and plot data with xarray
49 | # --------------------------------
50 | # You can use :meth:`xarray.DataArray.sel` or :meth:`xarray.Dataset.sel` to
51 | # index into ``xarray`` data arrays and datasets.
52 | # For example, we can get a ``DataArray`` containing only data
53 | # for a single keypoint of the first individual:
54 |
55 | da = position.sel(individuals="AEON3B_NTP", keypoints="centroid")
56 | print(da)
57 |
58 | # %%
59 | # We could plot the x, y coordinates of this keypoint over time,
60 | # using ``xarray``'s built-in plotting methods:
61 | da.plot.line(x="time", row="space", aspect=2, size=2.5)
62 |
63 | # %%
64 | # Similarly we could plot the same keypoint's x, y coordinates
65 | # for all individuals:
66 |
67 | da = position.sel(keypoints="centroid")
68 | da.plot.line(x="time", row="individuals", aspect=2, size=2.5)
69 |
70 | # %%
71 | # Trajectory plots
72 | # ----------------
73 | # We are not limited to ``xarray``'s built-in plots.
74 | # The :mod:`movement.plots` module provides some additional
75 | # visualisations, like :func:`plot_centroid_trajectory()\
76 | # `.
77 |
78 | mouse_name = "AEON3B_TP1"
79 | fig, ax = plot_centroid_trajectory(position, individual=mouse_name)
80 | fig.show()
81 |
--------------------------------------------------------------------------------
/tests/test_unit/test_roi/test_polygon_boundary.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import shapely
4 |
5 | from movement.roi.line import LineOfInterest
6 | from movement.roi.polygon import PolygonOfInterest
7 |
8 |
9 | @pytest.mark.parametrize(
10 | ["exterior_boundary", "interior_boundaries"],
11 | [
12 | pytest.param("unit_square_pts", tuple(), id="No holes"),
13 | pytest.param(
14 | "unit_square_pts", tuple(["unit_square_hole"]), id="One hole"
15 | ),
16 | pytest.param(
17 | "unit_square_pts",
18 | (
19 | np.array([[0.0, 0.0], [0.0, 0.25], [0.25, 0.0]]),
20 | np.array([[0.75, 0.0], [1.0, 0.25], [1.0, 0.0]]),
21 | ),
22 | id="Corners shaved off",
23 | ),
24 | ],
25 | )
26 | def test_boundary(exterior_boundary, interior_boundaries, request) -> None:
27 | if isinstance(exterior_boundary, str):
28 | exterior_boundary = request.getfixturevalue(exterior_boundary)
29 | interior_boundaries = tuple(
30 | request.getfixturevalue(ib) if isinstance(ib, str) else ib
31 | for ib in interior_boundaries
32 | )
33 | tolerance = 1.0e-8
34 |
35 | polygon = PolygonOfInterest(
36 | exterior_boundary, holes=interior_boundaries, name="Holey"
37 | )
38 | expected_exterior = shapely.LinearRing(exterior_boundary)
39 | expected_interiors = tuple(
40 | shapely.LinearRing(ib) for ib in interior_boundaries
41 | )
42 | expected_holes = tuple(shapely.Polygon(ib) for ib in interior_boundaries)
43 |
44 | computed_exterior = polygon.exterior_boundary
45 | computed_interiors = polygon.interior_boundaries
46 | computed_holes = polygon.holes
47 |
48 | assert isinstance(computed_exterior, LineOfInterest)
49 | assert expected_exterior.equals_exact(computed_exterior.region, tolerance)
50 | assert isinstance(computed_interiors, tuple)
51 | assert isinstance(computed_holes, tuple)
52 | assert len(computed_interiors) == len(expected_interiors)
53 | assert len(computed_holes) == len(expected_holes)
54 | assert len(computed_holes) == len(computed_interiors)
55 | for i, interior_line in enumerate(computed_interiors):
56 | assert isinstance(interior_line, LineOfInterest)
57 |
58 | assert expected_interiors[i].equals_exact(
59 | interior_line.region, tolerance
60 | )
61 | for i, interior_hole in enumerate(computed_holes):
62 | assert isinstance(interior_hole, PolygonOfInterest)
63 |
64 | assert expected_holes[i].equals_exact(interior_hole.region, tolerance)
65 |
--------------------------------------------------------------------------------
/tests/fixtures/plots.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import xarray as xr
4 |
5 |
6 | @pytest.fixture
7 | def one_individual():
8 | """Sample data for plot testing.
9 |
10 | Data has five keypoints for one cross shaped mouse that is centered
11 | around the origin and moves forwards along the positive y axis with
12 | steps of 1.
13 |
14 | Keypoint starting position (x, y):
15 | - left (-1, 0)
16 | - centre (0, 0)
17 | - right (1, 0)
18 | - snout (0, 1)
19 | - tail (0, -1)
20 |
21 | """
22 | time_steps = 4
23 | individuals = ["id_0"]
24 | keypoints = ["left", "centre", "right", "snout", "tail"]
25 | space = ["x", "y"]
26 | positions = {
27 | "left": {"x": -1, "y": np.arange(time_steps)},
28 | "centre": {"x": 0, "y": np.arange(time_steps)},
29 | "right": {"x": 1, "y": np.arange(time_steps)},
30 | "snout": {"x": 0, "y": np.arange(time_steps) + 1},
31 | "tail": {"x": 0, "y": np.arange(time_steps) - 1},
32 | }
33 |
34 | time = np.arange(time_steps)
35 | position_data = np.zeros(
36 | (time_steps, len(space), len(keypoints), len(individuals))
37 | )
38 |
39 | # Create x and y coordinates arrays
40 | x_coords = np.array([positions[key]["x"] for key in keypoints])
41 | y_coords = np.array([positions[key]["y"] for key in keypoints])
42 |
43 | for i, _ in enumerate(keypoints):
44 | position_data[:, 0, i, 0] = x_coords[i] # x-coordinates
45 | position_data[:, 1, i, 0] = y_coords[i] # y-coordinates
46 |
47 | da = xr.DataArray(
48 | position_data,
49 | name="position",
50 | dims=["time", "space", "keypoints", "individuals"],
51 | coords={
52 | "time": time,
53 | "space": space,
54 | "keypoints": keypoints,
55 | "individuals": individuals,
56 | },
57 | )
58 | return da
59 |
60 |
61 | @pytest.fixture
62 | def two_individuals(one_individual):
63 | """Return a position array with two cross-shaped mice.
64 |
65 | The 0-th mouse is moving forwards along the positive y axis, i.e. same as
66 | in sample_data_one_cross, the 1-st mouse is moving in the opposite
67 | direction, i.e. with it's snout towards the negative side of the y axis.
68 |
69 | The left and right keypoints are not mirrored for id_1, so this
70 | mouse is moving flipped around on it's back.
71 | """
72 | da_id1 = one_individual.copy()
73 | da_id1.loc[dict(space="y")] = da_id1.sel(space="y") * -1
74 | da_id1 = da_id1.assign_coords(individuals=["id_1"])
75 | return xr.concat([one_individual.copy(), da_id1], "individuals")
76 |
--------------------------------------------------------------------------------
/tests/test_unit/test_cli_entrypoint.py:
--------------------------------------------------------------------------------
1 | import builtins
2 | import subprocess
3 | import sys
4 | from contextlib import nullcontext as does_not_raise
5 | from unittest.mock import patch
6 |
7 | import pytest
8 |
9 | from movement.cli_entrypoint import main
10 |
11 |
12 | @pytest.mark.parametrize(
13 | "command, expected_exception",
14 | [
15 | (
16 | ["movement", "info"],
17 | does_not_raise("Platform: "),
18 | ), # Valid arg
19 | (
20 | ["movement", "invalid"],
21 | pytest.raises(SystemExit),
22 | ), # Invalid arg
23 | (["movement"], does_not_raise("usage: movement")), # Empty arg
24 | ],
25 | )
26 | def test_entrypoint_command(command, expected_exception):
27 | """Test the entrypoint with different commands: 'info', 'invalid', ''."""
28 | with (
29 | patch("sys.argv", command),
30 | patch("builtins.print") as mock_print,
31 | expected_exception as e,
32 | ):
33 | main()
34 | printed_message = " ".join(map(str, mock_print.call_args.args))
35 | assert e in printed_message
36 |
37 |
38 | original_import = builtins.__import__
39 |
40 |
41 | def fake_import(name, globals, locals, fromlist, level):
42 | """Pretend that napari is not installed."""
43 | if name == "napari":
44 | raise ImportError("No module named 'napari'")
45 | return original_import(name, globals, locals, fromlist, level)
46 |
47 |
48 | def test_info_without_napari_installed():
49 | """Test the 'movement info' can report that napari is not installed."""
50 | with (
51 | patch("sys.argv", ["movement", "info"]),
52 | patch("builtins.print") as mock_print,
53 | patch("builtins.__import__", side_effect=fake_import),
54 | ):
55 | main()
56 | printed_message = " ".join(map(str, mock_print.call_args.args))
57 | assert "napari: not installed" in printed_message
58 |
59 |
60 | @pytest.mark.parametrize(
61 | "run_side_effect, expected_message",
62 | [
63 | (None, ""), # No error
64 | (subprocess.CalledProcessError(1, "napari"), "error occurred while"),
65 | ],
66 | )
67 | def test_launch_command(run_side_effect, expected_message, capsys):
68 | """Test the 'launch' command.
69 |
70 | We mock the subprocess.run function to avoid actually launching napari.
71 | """
72 | with (
73 | patch("sys.argv", ["movement", "launch"]),
74 | patch("subprocess.run", side_effect=run_side_effect) as mock_run,
75 | ):
76 | main()
77 | # Assert that subprocess.run was called with the correct arguments
78 | mock_run.assert_called_once()
79 | args = mock_run.call_args[0][0]
80 | assert args[0] == sys.executable
81 | assert args[1:] == ["-m", "napari", "-w", "movement"]
82 | # Assert that the expected message was printed
83 | captured = capsys.readouterr()
84 | assert expected_message in captured.out
85 |
--------------------------------------------------------------------------------
/tests/test_integration/test_filtering.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from movement.filtering import (
4 | filter_by_confidence,
5 | interpolate_over_time,
6 | savgol_filter,
7 | )
8 | from movement.io import load_poses
9 | from movement.sample_data import fetch_dataset_paths
10 |
11 |
12 | @pytest.fixture
13 | def sample_dataset():
14 | """Return a single-animal sample dataset, with time unit in frames."""
15 | ds_path = fetch_dataset_paths("DLC_single-mouse_EPM.predictions.h5")[
16 | "poses"
17 | ]
18 | ds = load_poses.from_dlc_file(ds_path)
19 | return ds
20 |
21 |
22 | @pytest.mark.parametrize("window", [3, 5, 6, 13])
23 | def test_nan_propagation_through_filters(sample_dataset, window, helpers):
24 | """Test NaN propagation is as expected when passing a DataArray through
25 | filter by confidence, Savgol filter and interpolation.
26 | For the ``savgol_filter``, the number of NaNs is expected to increase
27 | at most by the filter's window length minus one (``window - 1``)
28 | multiplied by the number of consecutive NaNs in the input data.
29 | """
30 | # Compute number of low confidence keypoints
31 | n_low_confidence_kpts = (sample_dataset.confidence.data < 0.6).sum()
32 |
33 | # Check filter position by confidence creates correct number of NaNs
34 | sample_dataset.update(
35 | {
36 | "position": filter_by_confidence(
37 | sample_dataset.position,
38 | sample_dataset.confidence,
39 | )
40 | }
41 | )
42 | n_total_nans_input = helpers.count_nans(sample_dataset.position)
43 |
44 | assert (
45 | n_total_nans_input
46 | == n_low_confidence_kpts * sample_dataset.sizes["space"]
47 | )
48 |
49 | # Compute maximum expected increase in NaNs due to filtering
50 | n_consecutive_nans_input = helpers.count_consecutive_nans(
51 | sample_dataset.position
52 | )
53 | max_nans_increase = (window - 1) * n_consecutive_nans_input
54 |
55 | # Apply savgol filter and check that number of NaNs is within threshold
56 | sample_dataset.update(
57 | {
58 | "position": savgol_filter(
59 | sample_dataset.position, window, polyorder=2
60 | )
61 | }
62 | )
63 |
64 | n_total_nans_savgol = helpers.count_nans(sample_dataset.position)
65 |
66 | # Check that filtering does not reduce number of nans
67 | assert n_total_nans_savgol >= n_total_nans_input
68 | # Check that the increase in nans is below the expected threshold
69 | assert n_total_nans_savgol - n_total_nans_input <= max_nans_increase
70 |
71 | # Interpolate data (without max_gap) and with extrapolation
72 | # and check it eliminates all NaNs
73 | sample_dataset.update(
74 | {
75 | "position": interpolate_over_time(
76 | sample_dataset.position, fill_value="extrapolate"
77 | )
78 | }
79 | )
80 | assert helpers.count_nans(sample_dataset.position) == 0
81 |
--------------------------------------------------------------------------------
/tests/test_integration/test_netcdf.py:
--------------------------------------------------------------------------------
1 | """Test saving movement datasets to NetCDF files."""
2 |
3 | import pandas as pd
4 | import pytest
5 | import xarray as xr
6 |
7 | from movement.filtering import filter_by_confidence, rolling_filter
8 | from movement.kinematics import compute_forward_vector, compute_speed
9 | from movement.transforms import scale
10 |
11 |
12 | @pytest.fixture
13 | def processed_dataset(valid_poses_dataset):
14 | """Process a valid poses dataset by applying filters and transforms."""
15 | ds = valid_poses_dataset.copy()
16 | ds["position_filtered"] = filter_by_confidence(
17 | ds["position"], ds["confidence"], threshold=0.5
18 | )
19 | ds["position_smoothed"] = rolling_filter(
20 | ds["position"], window=3, min_periods=2, statistic="median"
21 | )
22 | ds["position_scaled"] = scale(
23 | ds["position_smoothed"], factor=1 / 10, space_unit="cm"
24 | )
25 | return ds
26 |
27 |
28 | @pytest.fixture
29 | def dataset_with_derived_variables(valid_poses_dataset):
30 | """Create a dataset with some derived variables."""
31 | ds = valid_poses_dataset.copy()
32 | ds["speed"] = compute_speed(ds["position"])
33 | ds["forward_vector"] = compute_forward_vector(
34 | ds["position"], "left", "right"
35 | )
36 | return ds
37 |
38 |
39 | @pytest.fixture
40 | def dataset_with_datetime_index(valid_poses_dataset):
41 | """Create a dataset with a pd.DateTimeIndex as the time coordinate."""
42 | ds = valid_poses_dataset.copy()
43 | timestamps = pd.date_range(
44 | start=pd.Timestamp.now(),
45 | periods=ds.sizes["time"],
46 | freq=pd.Timedelta(seconds=1),
47 | )
48 | ds.assign_coords(time=timestamps)
49 | return ds
50 |
51 |
52 | @pytest.mark.parametrize(
53 | "dataset",
54 | [
55 | "valid_poses_dataset",
56 | "valid_poses_dataset_with_nan",
57 | "valid_bboxes_dataset", # time unit is in frames
58 | "valid_bboxes_dataset_in_seconds",
59 | "valid_bboxes_dataset_with_nan",
60 | "processed_dataset",
61 | "dataset_with_derived_variables",
62 | "dataset_with_datetime_index",
63 | ],
64 | )
65 | @pytest.mark.parametrize("engine", ["netcdf4", "scipy", "h5netcdf"])
66 | def test_ds_save_and_load_netcdf(dataset, engine, tmp_path, request):
67 | """Test that saving a movement dataset to a NetCDF file and then
68 | loading it back returns the same Dataset.
69 |
70 | We test across all 3 NetCDF engines supported by xarray.
71 | """
72 | ds = request.getfixturevalue(dataset)
73 | netcdf_file = tmp_path / "test_dataset.nc"
74 | ds.to_netcdf(netcdf_file, engine=engine)
75 | loaded_ds = xr.load_dataset(netcdf_file)
76 | xr.testing.assert_allclose(loaded_ds, ds)
77 | assert loaded_ds.attrs == ds.attrs
78 |
79 |
80 | def test_da_save_and_load_netcdf(valid_poses_dataset, tmp_path):
81 | """Test saving a DataArray to a NetCDF file and loading it back."""
82 | da = valid_poses_dataset["position"]
83 | netcdf_file = tmp_path / "test_dataarray.nc"
84 | da.to_netcdf(netcdf_file)
85 | loaded_da = xr.load_dataarray(netcdf_file)
86 | xr.testing.assert_allclose(loaded_da, da)
87 | assert loaded_da.attrs == da.attrs
88 |
--------------------------------------------------------------------------------
/docs/convert_admonitions.py:
--------------------------------------------------------------------------------
1 | """Convert admonitions GitHub Flavored Markdown (GFM) to MyST Markdown."""
2 |
3 | import re
4 | from pathlib import Path
5 |
6 | # Valid admonition types supported by both GFM and MyST (case-insensitive)
7 | VALID_TYPES = {"note", "tip", "important", "warning", "caution"}
8 |
9 |
10 | def convert_gfm_admonitions_to_myst_md(
11 | input_path: Path, output_path: Path, exclude: set[str] | None = None
12 | ):
13 | """Convert admonitions from GitHub Flavored Markdown to MyST.
14 |
15 | Extracts GitHub Flavored Markdown admonitions from the input file and
16 | writes them to the output file as MyST Markdown admonitions.
17 | The original admonition type and order are preserved.
18 |
19 | Parameters
20 | ----------
21 | input_path : Path
22 | Path to the input file containing GitHub Flavored Markdown.
23 | output_path : Path
24 | Path to the output file to write the MyST Markdown admonitions.
25 | exclude : set[str], optional
26 | Set of admonition types to exclude from conversion (case-insensitive).
27 | Default is None.
28 |
29 | """
30 | excluded_types = {s.lower() for s in (exclude or set())}
31 |
32 | # Read the input file
33 | gfm_text = input_path.read_text(encoding="utf-8")
34 |
35 | # Regex pattern to match GFM admonitions
36 | pattern = r"(^> \[!(\w+)\]\n(?:^> .*\n?)*)"
37 | matches = re.finditer(pattern, gfm_text, re.MULTILINE)
38 |
39 | # Process matches and collect converted admonitions
40 | admonitions = []
41 | for match in matches:
42 | adm_myst = _process_match(match, excluded_types)
43 | if adm_myst:
44 | admonitions.append(adm_myst)
45 |
46 | if admonitions:
47 | # Write all admonitions to a single file
48 | output_path.write_text("\n".join(admonitions) + "\n", encoding="utf-8")
49 | print(f"Admonitions written to {output_path}")
50 | else:
51 | print("No GitHub Markdown admonitions found.")
52 |
53 |
54 | def _process_match(match: re.Match, excluded_types: set[str]) -> str | None:
55 | """Process a regex match and return the converted admonition if valid."""
56 | # Extract the admonition type
57 | adm_type = match.group(2).lower()
58 | if adm_type not in VALID_TYPES or adm_type in excluded_types:
59 | return None
60 |
61 | # Extract the content lines
62 | full_block = match.group(0)
63 | content = "\n".join(
64 | line[2:].strip()
65 | for line in full_block.split("\n")
66 | if line.startswith("> ") and not line.startswith("> [!")
67 | ).strip()
68 |
69 | # Return the converted admonition
70 | return ":::{" + adm_type + "}\n" + content + "\n" + ":::\n"
71 |
72 |
73 | if __name__ == "__main__":
74 | # Path to the README.md file
75 | # (1 level above the current script)
76 | docs_dir = Path(__file__).resolve().parent
77 | readme_path = docs_dir.parent / "README.md"
78 |
79 | # Path to the output file
80 | # (inside the docs/source/snippets directory)
81 | snippets_dir = docs_dir / "source" / "snippets"
82 | target_path = snippets_dir / "admonitions.md"
83 |
84 | # Call the function
85 | convert_gfm_admonitions_to_myst_md(
86 | readme_path, target_path, exclude={"note"}
87 | )
88 |
--------------------------------------------------------------------------------
/docs/source/community/mission-scope.md:
--------------------------------------------------------------------------------
1 | (target-mission)=
2 | # Mission & Scope
3 |
4 | ## Mission
5 |
6 | `movement` aims to **facilitate the study of animal behaviour**
7 | by providing a suite of **Python tools to analyse body movements**
8 | across space and time.
9 |
10 | ## Scope
11 |
12 | At its core, `movement` handles the position and/or orientation
13 | of one or more individuals over time.
14 |
15 | There are a few common ways of representing animal motion from video
16 | recordings: an animal's position could be reduced to that of a single keypoint
17 | tracked on its body (usually the centroid), or instead a set of keypoints
18 | (often referred to as the pose) to better capture its orientation as well as
19 | the positions of limbs and appendages. The animal's position could be also
20 | tracked as a bounding box drawn around each individual, or as a segmentation
21 | mask that indicates the pixels belonging to each individual. Depending on the
22 | research question or the application, one or other format may be more
23 | convenient. The spatial coordinates of these representations may be defined
24 | in 2D (x, y) or 3D (x, y, z).
25 |
26 | Animal tracking frameworks such as [DeepLabCut](dlc:) or [SLEAP](sleap:) can
27 | generate keypoint representations from video data by detecting body parts and
28 | tracking them across frames. In the context of `movement`, we refer to these
29 | trajectories as _tracks_: we use _pose tracks_ to refer to the trajectories
30 | of a set of keypoints, _bounding box tracks_ to refer to the trajectories
31 | of bounding box centroids, or _motion tracks_ in the more general case.
32 |
33 | Our vision is to present a **consistent interface for representing motion
34 | tracks** along with **modular and accessible analysis tools**. We aim to
35 | support data from a range of animal tracking frameworks, in **2D or 3D**,
36 | tracking **single or multiple individuals**. As such, `movement` can be
37 | considered as operating downstream of tools like DeepLabCut and SLEAP.
38 | The focus is on providing functionalities for data cleaning, visualisation,
39 | and motion quantification (see the [Roadmap](target-roadmaps) for details).
40 |
41 | In the study of animal behaviour, motion tracks are often used to extract and
42 | label discrete actions, sometimes referred to as behavioural syllables or
43 | states. While `movement` is not designed for such tasks, it can be used to
44 | generate features that are relevant for action recognition.
45 |
46 | ## Design principles
47 |
48 | `movement` is committed to:
49 | - __Ease of installation and use__. We aim for a cross-platform installation and are mindful of dependencies that may compromise this goal.
50 | - __User accessibility__, catering to varying coding expertise by offering both a GUI and a Python API.
51 | - __Comprehensive documentation__, enriched with tutorials and examples.
52 | - __Robustness and maintainability__ through high test coverage.
53 | - __Scientific accuracy and reproducibility__ by validating inputs and outputs.
54 | - __Performance and responsiveness__, especially for large datasets, using parallel processing where appropriate.
55 | - __Modularity and flexibility__. We envision `movement` as a platform for new tools and analyses, offering users the building blocks to craft their own workflows.
56 |
57 | Some of these principles are shared with, and were inspired by, napari's [Mission and Values](napari:community/mission_and_values) statement.
58 |
--------------------------------------------------------------------------------
/movement/validators/arrays.py:
--------------------------------------------------------------------------------
1 | """Validators for data arrays."""
2 |
3 | from collections.abc import Hashable
4 |
5 | import xarray as xr
6 |
7 | from movement.utils.logging import logger
8 |
9 |
10 | def validate_dims_coords(
11 | data: xr.DataArray,
12 | required_dim_coords: dict[str, list[str] | list[Hashable]],
13 | exact_coords: bool = False,
14 | ) -> None:
15 | """Validate dimensions and coordinates in a data array.
16 |
17 | This function raises a ValueError if the specified dimensions and
18 | coordinates are not present in the input data array. By default,
19 | each dimension must contain *at least* the specified coordinates.
20 | Pass ``exact_coords=True`` to require that each dimension contains
21 | *exactly* the specified coordinates (and no others).
22 |
23 | Parameters
24 | ----------
25 | data : xarray.DataArray
26 | The input data array to validate.
27 | required_dim_coords : dict of {str: list of str | list of Hashable}
28 | A dictionary mapping required dimensions to a list of required
29 | coordinate values along each dimension.
30 | exact_coords : bool, optional
31 | If False (default), checks only that the listed coordinates
32 | exist in each dimension. If True, checks that each dimension
33 | has exactly the specified coordinates and no more.
34 | The exactness check is completely skipped for dimensions with
35 | no required coordinates.
36 |
37 | Examples
38 | --------
39 | Validate that a data array contains the dimension 'time'. No specific
40 | coordinates are required.
41 |
42 | >>> validate_dims_coords(data, {"time": []})
43 |
44 | Validate that a data array contains the dimensions 'time' and 'space',
45 | and that the 'space' dimension contains the coordinates 'x' and 'y'.
46 |
47 | >>> validate_dims_coords(data, {"time": [], "space": ["x", "y"]})
48 |
49 | Enforce that 'space' has *only* 'x' and 'y', and no other coordinates:
50 |
51 | >>> validate_dims_coords(data, {"space": ["x", "y"]}, exact_coords=True)
52 |
53 | Raises
54 | ------
55 | ValueError
56 | If the input data does not contain the required dimension(s)
57 | and/or the required coordinate(s).
58 |
59 | """
60 | # 1. Check that all required dimensions are present
61 | missing_dims = [dim for dim in required_dim_coords if dim not in data.dims]
62 | error_message = ""
63 | if missing_dims:
64 | error_message += (
65 | f"Input data must contain {missing_dims} as dimensions.\n"
66 | )
67 |
68 | # 2. For each dimension, check the presence of required coords
69 | for dim, coords in required_dim_coords.items():
70 | dim_coords_in_data = data.coords.get(dim, [])
71 | missing_coords = [c for c in coords if c not in dim_coords_in_data]
72 | if missing_coords:
73 | error_message += (
74 | f"Input data must contain {missing_coords} "
75 | f"in the '{dim}' coordinates.\n"
76 | )
77 |
78 | # 3. If exact_coords is True, verify no extra coords exist
79 | if exact_coords and coords:
80 | extra_coords = [c for c in dim_coords_in_data if c not in coords]
81 | if extra_coords:
82 | error_message += (
83 | f"Dimension '{dim}' must only contain "
84 | f"{coords} as coordinates, "
85 | f"but it also has {list(extra_coords)}.\n"
86 | )
87 |
88 | if error_message:
89 | raise logger.error(ValueError(error_message))
90 |
--------------------------------------------------------------------------------
/docs/source/community/roadmaps.md:
--------------------------------------------------------------------------------
1 | (target-roadmaps)=
2 | # Roadmaps
3 |
4 | This page outlines **current development priorities** and aims to **guide core developers** and to **encourage community contributions**. It is a living document and will be updated as the project evolves.
5 |
6 | The roadmaps are **not meant to limit** `movement` features, as we are open to suggestions and contributions. Join our [Zulip chat](movement-zulip:) to share your ideas. We will take community feedback into account when planning future releases.
7 |
8 | ## Long-term vision
9 | The following features are being considered for the first stable version `v1.0`.
10 |
11 | - __Import/Export motion tracks from/to diverse formats__. We aim to interoperate with leading tools for animal tracking and behaviour classification, and to enable conversions between their formats.
12 | - __Standardise the representation of motion tracks__. We represent tracks as [xarray data structures](xarray:user-guide/data-structures.html) to allow for labelled dimensions and performant processing.
13 | - __Interactively visualise motion tracks__. We are experimenting with [napari](napari:) as a visualisation and GUI framework.
14 | - __Clean motion tracks__, including, but not limited to, handling of missing values, filtering, smoothing, and resampling.
15 | - __Derive kinematic variables__ like velocity, acceleration, joint angles, etc., focusing on those prevalent in neuroscience and ethology.
16 | - __Integrate spatial data about the animal's environment__ for combined analysis with motion tracks. This covers regions of interest (ROIs) such as the arena in which the animal is moving and the location of objects within it.
17 | - __Define and transform coordinate systems__. Coordinates can be relative to the camera, environment, or the animal itself (egocentric).
18 | - __Provide common metrics for specialised applications__. These applications could include gait analysis, pupillometry, spatial
19 | navigation, social interactions, etc.
20 | - __Integrate with neurophysiological data analysis tools__. We eventually aim to facilitate combined analysis of motion and neural data.
21 |
22 | ## Focus areas for 2025
23 |
24 | - Annotate space by defining regions of interest programmatically and via our [GUI](target-gui).
25 | - Annotate time by defining events of interest programmatically and via our [GUI](target-gui).
26 | - Enable workflows for aligning motion tracks with concurrently recorded neurophysiological signals.
27 | - Enrich the interactive visualisation of motion tracks in `napari`, providing more customisation options.
28 | - Enable the saving of filtered tracks and derived kinematic variables to disk.
29 | - Implement metrics useful for analysing spatial navigation, social interactions, and collective behaviour.
30 |
31 | ## Version 0.1
32 | We've released version `v0.1` of `movement` in March 2025, providing a basic set of features to demonstrate the project's potential and to gather feedback from users. Our minimum requirements for this milestone were:
33 |
34 | - [x] Ability to import pose tracks from [DeepLabCut](dlc:), [SLEAP](sleap:) and [LightningPose](lp:) into a common `xarray.Dataset` structure.
35 | - [x] At least one function for cleaning the pose tracks.
36 | - [x] Ability to compute velocity and acceleration from pose tracks.
37 | - [x] Public website with [documentation](target-movement).
38 | - [x] Package released on [PyPI](https://pypi.org/project/movement/).
39 | - [x] Package released on [conda-forge](https://anaconda.org/conda-forge/movement).
40 | - [x] Ability to visualise pose tracks using [napari](napari:). We aim to represent pose tracks as napari [layers](napari:howtos/layers/index.html), overlaid on video frames.
41 |
--------------------------------------------------------------------------------
/movement/cli_entrypoint.py:
--------------------------------------------------------------------------------
1 | """CLI entrypoint for the ``movement`` package."""
2 |
3 | import argparse
4 | import platform
5 | import subprocess
6 | import sys
7 |
8 | import numpy as np
9 | import pandas as pd
10 | import xarray as xr
11 |
12 | import movement
13 |
14 | ASCII_ART = r"""
15 | _ __ ___ _____ _____ _ __ ___ ___ _ __ | |_
16 | | '_ ` _ \ / _ \ \ / / _ \ '_ ` _ \ / _ \ '_ \| __|
17 | | | | | | | (_) \ V / __/ | | | | | __/ | | | |_
18 | |_| |_| |_|\___/ \_/ \___|_| |_| |_|\___|_| |_|\__|
19 |
20 | .******
21 | ,******/*******
22 | ,*******/******(### **
23 | ,/*************(###((**/****/**
24 | ,*/******/*****(####(***/***/**/***(#.
25 | .*******/****((###(****/**/***/**(#####//////
26 | ...*.****(####(****/**/****/*(#####//////////////
27 | .,.*...###(*****/*/****/*(#####///////////////###
28 | ...*.,.#(#,,,/******/(##(##///////////////#######
29 | ..**...###,,,*,,,(#####///////////////###########
30 | ...*..*(##,,,*,,,###.////////////(###############
31 | .../...###/,,*,,,###...,.////(###################
32 | ...*...(##,,,*/,,###.,.,...####################(#
33 | ...###,*,*,,,###...,...################(#####
34 | ,,,*,*,##(..*,...############(######(
35 | ,*,,,###...,..*########(######(
36 | ,#/ .../...####(######(
37 | ,..,...(######(
38 | ,..###(
39 | """
40 |
41 |
42 | def main() -> None:
43 | """Entrypoint for the CLI."""
44 | parser = argparse.ArgumentParser(prog="movement")
45 | subparsers = parser.add_subparsers(dest="command", title="commands")
46 |
47 | # Add 'info' command
48 | info_parser = subparsers.add_parser(
49 | "info", help="output diagnostic information about the environment"
50 | )
51 | info_parser.set_defaults(func=info)
52 |
53 | # Add 'launch' command
54 | launch_parser = subparsers.add_parser(
55 | "launch", help="launch the movement plugin in napari"
56 | )
57 | launch_parser.set_defaults(func=launch)
58 |
59 | args = parser.parse_args()
60 | if args.command is None:
61 | help_message = parser.format_help()
62 | print(help_message)
63 | else:
64 | args.func()
65 |
66 |
67 | def info() -> None:
68 | """Output diagnostic information."""
69 | text = (
70 | f"{ASCII_ART}\n"
71 | f" movement: {movement.__version__}\n"
72 | f" Python: {platform.python_version()}\n"
73 | f" NumPy: {np.__version__}\n"
74 | f" xarray: {xr.__version__}\n"
75 | f" pandas: {pd.__version__}\n"
76 | )
77 |
78 | try:
79 | import napari
80 |
81 | text += f" napari: {napari.__version__}\n"
82 | except ImportError:
83 | text += " napari: not installed\n"
84 |
85 | text += f" Platform: {platform.platform()}\n"
86 | print(text)
87 |
88 |
89 | def launch() -> None:
90 | """Launch the movement plugin in napari."""
91 | try:
92 | # Use sys.executable to ensure the correct Python interpreter is used
93 | subprocess.run(
94 | [sys.executable, "-m", "napari", "-w", "movement"], check=True
95 | )
96 | except subprocess.CalledProcessError as e:
97 | # if subprocess.run() fails with non-zero exit code
98 | print(
99 | "\nAn error occurred while launching the movement plugin "
100 | f"for napari:\n {e}"
101 | )
102 |
103 |
104 | if __name__ == "__main__": # pragma: no cover
105 | main()
106 |
--------------------------------------------------------------------------------
/tests/fixtures/helpers.py:
--------------------------------------------------------------------------------
1 | """Helpers fixture for ``movement`` test modules."""
2 |
3 | import pytest
4 | import xarray as xr
5 |
6 |
7 | class Helpers:
8 | """General helper methods for ``movement`` test modules."""
9 |
10 | @staticmethod
11 | def assert_valid_dataset(dataset, expected_values):
12 | """Assert the dataset is a valid ``movement`` Dataset.
13 |
14 | The validation includes:
15 | - checking the dataset is an xarray Dataset
16 | - checking the expected variables are present and are of the right
17 | shape and type
18 | - checking the confidence array shape matches the position array
19 | - checking the dimensions and coordinates against the expected values
20 | - checking the coordinates' names and size
21 | - checking the metadata attributes
22 |
23 | Parameters
24 | ----------
25 | dataset : xr.Dataset
26 | The dataset to validate.
27 | expected_values : dict
28 | A dictionary containing the expected values for the dataset.
29 | It must contain the following keys:
30 |
31 | - dim_names: list of expected dimension names as defined in
32 | movement.validators.datasets
33 | - vars_dims: dictionary of data variable names and the
34 | corresponding dimension sizes
35 |
36 | Optional keys include:
37 |
38 | - file_path: Path to the source file
39 | - fps: int, frames per second
40 | - source_software: str, name of the software used to generate
41 | the dataset
42 |
43 | """
44 | # Check dataset is an xarray Dataset
45 | assert isinstance(dataset, xr.Dataset)
46 |
47 | # Expected variables are present and of right shape/type
48 | for var, ndim in expected_values.get("vars_dims").items():
49 | data_var = dataset.get(var)
50 | assert isinstance(data_var, xr.DataArray)
51 | assert data_var.ndim == ndim
52 | position_shape = dataset.position.shape
53 |
54 | # Confidence has the same shape as position, except for the space dim
55 | assert (
56 | dataset.confidence.shape == position_shape[:1] + position_shape[2:]
57 | )
58 |
59 | # Check the dims and coords
60 | expected_dim_names = expected_values.get("dim_names")
61 | expected_dim_length_dict = dict(
62 | zip(expected_dim_names, position_shape, strict=True)
63 | )
64 | assert expected_dim_length_dict == dataset.sizes
65 |
66 | # Check the coords
67 | for dim in expected_dim_names[1:]:
68 | assert all(isinstance(s, str) for s in dataset.coords[dim].values)
69 | assert all(coord in dataset.coords["space"] for coord in ["x", "y"])
70 |
71 | # Check the metadata attributes
72 | expected_file_path = expected_values.get("file_path")
73 | source_file = getattr(dataset, "source_file", None)
74 | assert source_file == (
75 | expected_file_path.as_posix()
76 | if expected_file_path is not None
77 | else None
78 | )
79 | assert dataset.source_software == expected_values.get(
80 | "source_software"
81 | )
82 | fps = getattr(dataset, "fps", None)
83 | assert fps == expected_values.get("fps")
84 |
85 | @staticmethod
86 | def count_nans(da):
87 | """Count number of NaNs in a DataArray."""
88 | return da.isnull().sum().item()
89 |
90 | @staticmethod
91 | def count_consecutive_nans(da):
92 | """Count occurrences of consecutive NaNs in a DataArray."""
93 | return (da.isnull().astype(int).diff("time") != 0).sum().item()
94 |
95 |
96 | @pytest.fixture
97 | def helpers():
98 | """Return an instance of the ``Helpers`` class."""
99 | return Helpers
100 |
--------------------------------------------------------------------------------
/movement/plots/trajectory.py:
--------------------------------------------------------------------------------
1 | """Wrappers to plot movement data."""
2 |
3 | import xarray as xr
4 | from matplotlib import pyplot as plt
5 |
6 | DEFAULT_PLOTTING_ARGS = {
7 | "s": 15,
8 | "marker": "o",
9 | "alpha": 1.0,
10 | }
11 |
12 |
13 | def plot_centroid_trajectory(
14 | da: xr.DataArray,
15 | individual: str | None = None,
16 | keypoints: str | list[str] | None = None,
17 | ax: plt.Axes | None = None,
18 | **kwargs,
19 | ) -> tuple[plt.Figure, plt.Axes]:
20 | """Plot centroid trajectory.
21 |
22 | This function plots the trajectory of the centroid
23 | of multiple keypoints for a given individual. By default, the trajectory
24 | is colored by time (using the default colormap). Pass a different colormap
25 | through ``cmap`` if desired. If a single keypoint is passed, the trajectory
26 | will be the same as the trajectory of the keypoint.
27 |
28 | Parameters
29 | ----------
30 | da : xr.DataArray
31 | A data array containing position information, with `time` and `space`
32 | as required dimensions. Optionally, it may have `individuals` and/or
33 | `keypoints` dimensions.
34 | individual : str, optional
35 | The name of the individual to be plotted. By default, the first
36 | individual is plotted.
37 | keypoints : str, list[str], optional
38 | The name of the keypoint to be plotted, or a list of keypoint names
39 | (their centroid will be plotted). By default, the centroid of all
40 | keypoints is plotted.
41 | ax : matplotlib.axes.Axes or None, optional
42 | Axes object on which to draw the trajectory. If None, a new
43 | figure and axes are created.
44 | **kwargs : dict
45 | Additional keyword arguments passed to
46 | :meth:`matplotlib.axes.Axes.scatter`.
47 |
48 | Returns
49 | -------
50 | (figure, axes) : tuple of (matplotlib.pyplot.Figure, matplotlib.axes.Axes)
51 | The figure and axes containing the trajectory plot.
52 |
53 | """
54 | if isinstance(individual, list):
55 | raise ValueError("Only one individual can be selected.")
56 |
57 | selection = {}
58 |
59 | if "individuals" in da.dims:
60 | if individual is None:
61 | selection["individuals"] = da.individuals.values[0]
62 | else:
63 | selection["individuals"] = individual
64 |
65 | if "keypoints" in da.dims:
66 | if keypoints is None:
67 | selection["keypoints"] = da.keypoints.values
68 | else:
69 | selection["keypoints"] = keypoints
70 |
71 | plot_point = da.sel(**selection)
72 |
73 | # If there are multiple selected keypoints, calculate the centroid
74 | plot_point = (
75 | plot_point.mean(dim="keypoints", skipna=True)
76 | if "keypoints" in plot_point.dims and plot_point.sizes["keypoints"] > 1
77 | else plot_point
78 | )
79 |
80 | plot_point = plot_point.squeeze() # Only space and time should remain
81 |
82 | fig, ax = plt.subplots(figsize=(6, 6)) if ax is None else (ax.figure, ax)
83 |
84 | # Merge default plotting args with user-provided kwargs
85 | for key, value in DEFAULT_PLOTTING_ARGS.items():
86 | kwargs.setdefault(key, value)
87 |
88 | colorbar = False
89 | if "c" not in kwargs:
90 | kwargs["c"] = plot_point.time
91 | colorbar = True
92 |
93 | # Plot the scatter, colouring by time or user-provided colour
94 | sc = ax.scatter(
95 | plot_point.sel(space="x"),
96 | plot_point.sel(space="y"),
97 | **kwargs,
98 | )
99 |
100 | ax.set_xlabel("x")
101 | ax.set_ylabel("y")
102 | ax.set_title("Trajectory")
103 |
104 | # Add 'colorbar' for time dimension if no colour was provided by user
105 | time_label = "Time"
106 | fig.colorbar(sc, ax=ax, label=time_label).solids.set(
107 | alpha=1.0
108 | ) if colorbar else None
109 |
110 | return fig, ax
111 |
--------------------------------------------------------------------------------
/docs/source/community/resources.md:
--------------------------------------------------------------------------------
1 | (target-resources)=
2 | # Resources
3 |
4 | Feel free to use and share the following resources when
5 | communicating about `movement`, whether to promote it, teach it,
6 | or acknowledge it in your own work.
7 |
8 | ## Brand & logo
9 |
10 | Use the project name `movement` in lowercase, even at the beginning of a sentence.
11 |
12 | The official graphic assets for `movement` can be found below and on [Zenodo](https://doi.org/10.5281/zenodo.17902182),
13 | shared under the [CC BY 4.0 license](https://creativecommons.org/licenses/by/4.0/).
14 |
15 | ::::{grid} 1 1 3 3
16 | :gutter: 1 1 2 2
17 |
18 | :::{grid-item-card}
19 | :columns: 3
20 | :img-top: ../_static/movement_logo.svg
21 | Primary logo
22 | +++
23 | [SVG](../_static/movement_logo.svg)
24 | [PNG](../_static/movement_logo.png)
25 | :::
26 | :::{grid-item-card}
27 | :columns: 3
28 | :img-top: ../_static/movement_favicon.svg
29 | Compact logo
30 | +++
31 | [SVG](../_static/movement_favicon.svg)
32 | [PNG](../_static/movement_favicon.png)
33 | :::
34 |
35 | :::{grid-item-card}
36 | :columns: 6
37 | :img-top: ../_static/movement_overview.png
38 | Overview figure
39 | +++
40 | [PNG](../_static/movement_overview.png)
41 | :::
42 |
43 | ::::
44 |
45 | ## Typeface & colours
46 |
47 | The typeface appearing in `movement` graphics is [Barlow](https://fonts.google.com/specimen/Barlow), in various weights and styles.
48 |
49 | The colours are taken from the [ColoBrewer](https://colorbrewer2.org) **Set2** palette:
50 |
51 |
52 |
53 |
56 | Green Sheen
57 | RGB: 102 194 165
58 | HEX: #66c2a5
59 |
60 |
61 |
64 | Ceil
65 | RGB: 141 160 203
66 | HEX: #8da0cb
67 |
68 |
69 |
72 | Atomic Tangerine
73 | RGB: 252 141 98
74 | HEX: #fc8d62
75 |
76 |
77 |
78 |
79 | ## Presentations & media
80 |
81 | A selection of talks, posters, and blogposts about `movement`.
82 |
83 | | Type | Venue | Date | Link |
84 | |-------|-------|------|------|
85 | | Talk | [CBIAS 2025](https://www.crick.ac.uk/whats-on/crick-bioimage-analysis-symposium-2025) | Nov 2025 | [Slides](https://neuroinformatics.dev/slides-movement-cbias2025/) |
86 | | Blogpost | [UCL-ARC Showcase](https://www.ucl.ac.uk/advanced-research-computing/arc-showcase) | May 2025 | [URL](https://www.ucl.ac.uk/advanced-research-computing/case-studies/2025/may/movement-python-package-simplifies-analysis-animals-motion) |
87 | | Poster | [ASAB Spring 2025](https://asabspring2025.github.io) | Apr 2025 | [PDF on Zenodo](https://doi.org/10.5281/zenodo.17924159) |
88 | | Talk | [ABIDE](https://abide.ics.ulisboa.pt/en/) Seminar | Feb 2025 | [Video on YouTube](https://www.youtube.com/watch?v=GXBQsqqZZTg) |
89 |
90 |
91 | ## Teaching materials
92 |
93 | `movement` is being taught as part of the
94 | [Animals in Motion](https://neuroinformatics.dev/open-software-summer-school/2026/animals-in-motion.html)
95 | workshop at the [Neuroinformatics Unit Open Software Summer School](https://neuroinformatics.dev/open-software-summer-school).
96 |
97 | :::{dropdown} Online handbook
98 | :open:
99 | :icon: book
100 | :color: success
101 |
102 | Check out the open workshop handbook at
103 | [animals-in-motion.neuroinformatics.dev](https://animals-in-motion.neuroinformatics.dev).
104 | :::
105 |
--------------------------------------------------------------------------------
/movement/roi/conditions.py:
--------------------------------------------------------------------------------
1 | """Functions for computing condition arrays involving RoIs."""
2 |
3 | from collections import defaultdict
4 | from collections.abc import Sequence
5 |
6 | import numpy as np
7 | import xarray as xr
8 |
9 | from movement.roi.base import BaseRegionOfInterest
10 |
11 |
12 | def compute_region_occupancy(
13 | data,
14 | regions: Sequence[BaseRegionOfInterest],
15 | ) -> xr.DataArray:
16 | """Return a condition array indicating if points were inside regions.
17 |
18 | The function returns a boolean DataArray where each element indicates
19 | whether a point in the input ``data`` lies within the corresponding RoIs
20 | in ``regions``. The original dimensions of ``data`` are preserved, except
21 | for the ``space`` dimension which is replaced by the ``region``
22 | dimension. The ``region`` dimension has a number of elements equal to
23 | the number of RoIs in the ``regions`` argument and it's coordinate names
24 | correspond to the names of the given RoIs.
25 |
26 | Parameters
27 | ----------
28 | data : xarray.DataArray
29 | Spatial data to check for inclusion within the ``regions``. Must be
30 | compatible with the ``position`` argument to :func:`contains_point()\
31 | `.
32 | regions : Sequence[BaseRegionOfInterest]
33 | Regions of Interest that the points in ``data`` will be checked
34 | against, to see if they lie inside.
35 |
36 | Returns
37 | -------
38 | xarray.DataArray
39 | A boolean ``DataArray`` providing occupancy information.
40 |
41 | Examples
42 | --------
43 | >>> import numpy as np
44 | >>> import xarray as xr
45 | >>> from movement.roi import PolygonOfInterest, compute_region_occupancy
46 | >>> square = PolygonOfInterest(
47 | ... [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)], name="square"
48 | ... )
49 | >>> triangle = PolygonOfInterest(
50 | ... [(0.0, 0.0), (1.0, 0.0), (0.0, 1.0)], name="triangle"
51 | ... )
52 | >>> data = xr.DataArray(
53 | ... data=np.array([[0.25, 0.25], [0.75, 0.75]]),
54 | ... dims=["time", "space"],
55 | ... coords={"space": ["x", "y"]},
56 | ... )
57 | >>> occupancies = compute_region_occupancy(data, [square, triangle])
58 | >>> occupancies.sel(region="square").values
59 | np.array([True, True])
60 | >>> occupancies.sel(region="triangle").values
61 | np.array([True, False])
62 |
63 | Notes
64 | -----
65 | When RoIs in ``regions`` have identical names, a suffix
66 | will be appended to their name in the form of "_X", where "X" is a number
67 | starting from 0. These numbers are zero-padded depending on the maximum
68 | number of regions with identical names (e.g. if there are 100 RoIs with the
69 | same name, "00" will be appended to the first of them)
70 |
71 | Regions with unique names will retain their original name as their
72 | corresponding coordinate name.
73 |
74 | """
75 | number_of_times_name_appears: defaultdict[str, int] = defaultdict(int)
76 | for r in regions:
77 | number_of_times_name_appears[r.name] += 1
78 |
79 | duplicate_names_max_chars = {
80 | key: int(np.ceil(np.log10(value)).item())
81 | for key, value in number_of_times_name_appears.items()
82 | if value > 1
83 | }
84 | duplicate_names_used: defaultdict[str, int] = defaultdict(int)
85 |
86 | occupancies = {}
87 | for r in regions:
88 | name = r.name
89 | if name in duplicate_names_max_chars:
90 | name_suffix = str(duplicate_names_used[name]).zfill(
91 | duplicate_names_max_chars[name]
92 | )
93 | duplicate_names_used[name] += 1
94 | name = f"{name}_{name_suffix}"
95 | occupancies[name] = r.contains_point(data)
96 |
97 | return xr.concat(occupancies.values(), dim="region").assign_coords(
98 | region=list(occupancies.keys())
99 | )
100 |
--------------------------------------------------------------------------------
/tests/test_integration/test_kinematics_vector_transform.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import numpy as np
4 | import pytest
5 | import xarray as xr
6 |
7 | import movement.kinematics as kin
8 | from movement.utils import vector
9 |
10 | # Displacement vectors in polar coordinates
11 | # for individual 0, with 10 time points and 2 space dimensions
12 | # moving along x = y in the x positive, y positive direction
13 |
14 | # forward displacement (rho = √2, phi = π/4)
15 | forward_displacement_polar = np.vstack(
16 | [
17 | np.tile([math.sqrt(2), math.pi / 4], (9, 1)),
18 | np.zeros((1, 2)),
19 | # at time t=10, the forward displacement Cartesian vector is (x=0,y=0)
20 | ]
21 | )
22 |
23 |
24 | @pytest.mark.parametrize(
25 | "valid_dataset", ["valid_poses_dataset", "valid_bboxes_dataset"]
26 | )
27 | @pytest.mark.parametrize(
28 | "kinematic_variable, expected_kinematics_polar",
29 | [
30 | (
31 | "forward_displacement",
32 | [
33 | forward_displacement_polar,
34 | # Individual 0, rho = √2, phi = 45deg = π/4
35 | forward_displacement_polar * np.array([[1, -1]]),
36 | # Individual 1, rho = √2, phi = -45deg = -π/4
37 | ],
38 | ),
39 | (
40 | "backward_displacement",
41 | [
42 | np.roll(
43 | forward_displacement_polar * np.array([[1, -3]]),
44 | shift=1,
45 | axis=0,
46 | ),
47 | # Individual 0, rho = √2, phi = -135deg = -3π/4
48 | np.roll(
49 | forward_displacement_polar * np.array([[1, 3]]),
50 | shift=1,
51 | axis=0,
52 | ),
53 | # Individual 1, rho = √2, phi = 135deg = 3π/4
54 | ],
55 | ),
56 | (
57 | "velocity",
58 | [
59 | np.tile(
60 | [math.sqrt(2), math.pi / 4], (10, 1)
61 | ), # Individual O, rho=√2, phi=45deg=π/4
62 | np.tile(
63 | [math.sqrt(2), -math.pi / 4], (10, 1)
64 | ), # Individual 1, rho=√2, phi=-45deg=-π/4
65 | ],
66 | ),
67 | (
68 | "acceleration",
69 | [
70 | np.zeros((10, 2)), # Individual 0
71 | np.zeros((10, 2)), # Individual 1
72 | ],
73 | ),
74 | ],
75 | )
76 | def test_cart2pol_transform_on_kinematics(
77 | valid_dataset, kinematic_variable, expected_kinematics_polar, request
78 | ):
79 | """Test transformation between Cartesian and polar coordinates
80 | with various kinematic properties.
81 | """
82 | ds = request.getfixturevalue(valid_dataset)
83 | kinematic_array_cart = getattr(kin, f"compute_{kinematic_variable}")(
84 | ds.position
85 | )
86 | assert kinematic_array_cart.name == kinematic_variable
87 | kinematic_array_pol = vector.cart2pol(kinematic_array_cart)
88 |
89 | # Build expected data array
90 | expected_array_pol = xr.DataArray(
91 | np.stack(expected_kinematics_polar, axis=-1),
92 | # Stack along the "individuals" axis
93 | dims=["time", "space", "individuals"],
94 | )
95 | if "keypoints" in ds.position.coords:
96 | expected_array_pol = expected_array_pol.expand_dims(
97 | {"keypoints": ds.position.coords["keypoints"].size}
98 | )
99 | expected_array_pol = expected_array_pol.transpose(
100 | "time", "space", "keypoints", "individuals"
101 | )
102 |
103 | # Compare the values of the kinematic_array against the expected_array
104 | np.testing.assert_allclose(
105 | kinematic_array_pol.values, expected_array_pol.values
106 | )
107 |
108 | # Check we can recover the original Cartesian array
109 | kinematic_array_cart_recover = vector.pol2cart(kinematic_array_pol)
110 | xr.testing.assert_allclose(
111 | kinematic_array_cart, kinematic_array_cart_recover
112 | )
113 |
--------------------------------------------------------------------------------
/tests/test_unit/test_roi/test_instantiate.py:
--------------------------------------------------------------------------------
1 | import re
2 | from typing import Any
3 |
4 | import numpy as np
5 | import pytest
6 | import shapely
7 |
8 | from movement.roi.base import BaseRegionOfInterest
9 |
10 |
11 | @pytest.mark.parametrize(
12 | ["input_pts", "kwargs_for_creation", "expected_results"],
13 | [
14 | pytest.param(
15 | "unit_square_pts",
16 | {"dimensions": 2, "closed": False},
17 | {"is_closed": True, "dimensions": 2, "name": "Un-named region"},
18 | id="Polygon, closed is ignored",
19 | ),
20 | pytest.param(
21 | "unit_square_pts",
22 | {"dimensions": 1, "closed": False},
23 | {"is_closed": False, "dimensions": 1},
24 | id="Line segment(s)",
25 | ),
26 | pytest.param(
27 | "unit_square_pts",
28 | {"dimensions": 1, "closed": True},
29 | {"is_closed": True, "dimensions": 1},
30 | id="Looped lines",
31 | ),
32 | pytest.param(
33 | "unit_square_pts",
34 | {"dimensions": 2, "name": "elephant"},
35 | {"is_closed": True, "dimensions": 2, "name": "elephant"},
36 | id="Explicit name",
37 | ),
38 | pytest.param(
39 | np.array([[0.0, 0.0], [1.0, 0.0]]),
40 | {"dimensions": 2},
41 | ValueError("Need at least 3 points to define a 2D region (got 2)"),
42 | id="Too few points (2D)",
43 | ),
44 | pytest.param(
45 | np.array([[0.0, 0.0]]),
46 | {"dimensions": 1},
47 | ValueError("Need at least 2 points to define a 1D region (got 1)"),
48 | id="Too few points (1D)",
49 | ),
50 | pytest.param(
51 | np.array([[0.0, 0.0], [1.0, 0.0]]),
52 | {"dimensions": 1},
53 | {"is_closed": False},
54 | id="Borderline enough points (1D)",
55 | ),
56 | pytest.param(
57 | np.array([[0.0, 0.0], [1.0, 0.0]]),
58 | {"dimensions": 1, "closed": True},
59 | ValueError("Cannot create a loop from a single line segment."),
60 | id="Cannot close single line segment.",
61 | ),
62 | pytest.param(
63 | "unit_square_pts",
64 | {"dimensions": 3, "closed": False},
65 | ValueError(
66 | "Only regions of interest of dimension 1 or 2 "
67 | "are supported (requested 3)"
68 | ),
69 | id="Bad dimensionality",
70 | ),
71 | ],
72 | )
73 | def test_creation(
74 | input_pts,
75 | kwargs_for_creation: dict[str, Any],
76 | expected_results: dict[str, Any] | Exception,
77 | request,
78 | ) -> None:
79 | if isinstance(input_pts, str):
80 | input_pts = request.getfixturevalue(input_pts)
81 |
82 | if isinstance(expected_results, Exception):
83 | with pytest.raises(
84 | type(expected_results), match=re.escape(str(expected_results))
85 | ):
86 | BaseRegionOfInterest(input_pts, **kwargs_for_creation)
87 | else:
88 | roi = BaseRegionOfInterest(input_pts, **kwargs_for_creation)
89 |
90 | expected_dim = kwargs_for_creation.pop("dimensions", 2)
91 | expected_closure = kwargs_for_creation.pop("closed", False)
92 | if expected_dim == 2:
93 | assert isinstance(roi.region, shapely.Polygon)
94 | assert len(roi.coords) == len(input_pts) + 1
95 | string_should_contain = "-gon"
96 | elif expected_closure:
97 | assert isinstance(roi.region, shapely.LinearRing)
98 | assert len(roi.coords) == len(input_pts) + 1
99 | string_should_contain = "line segment(s)"
100 | else:
101 | assert isinstance(roi.region, shapely.LineString)
102 | assert len(roi.coords) == len(input_pts)
103 | string_should_contain = "line segment(s)"
104 | assert string_should_contain in roi.__str__()
105 | assert string_should_contain in roi.__repr__()
106 |
107 | for attribute_name, expected_value in expected_results.items():
108 | assert getattr(roi, attribute_name) == expected_value
109 |
--------------------------------------------------------------------------------
/docs/make_api.py:
--------------------------------------------------------------------------------
1 | """Generate the API index and autosummary pages for ``movement`` modules.
2 |
3 | This script generates the top-level API index file (``api_index.rst``)
4 | for all modules in the `movement` package, except for those specified
5 | in ``EXCLUDE_MODULES``.
6 | This script also allows "package modules" that aggregate submodules
7 | via their ``__init__.py`` files (e.g. ``movement.kinematics``) to be added
8 | to the API index, rather than listing each submodule separately.
9 | These modules are specified in ``PACKAGE_MODULES`` and will have their
10 | autosummary pages generated.
11 | """
12 |
13 | import importlib
14 | import inspect
15 | import os
16 | import sys
17 | from pathlib import Path
18 |
19 | from jinja2 import FileSystemLoader
20 | from jinja2.sandbox import SandboxedEnvironment
21 | from sphinx.ext.autosummary.generate import _underline
22 | from sphinx.util import rst
23 |
24 | # Single-file modules to exclude from the API index
25 | EXCLUDE_MODULES = {
26 | "movement.cli_entrypoint",
27 | "movement.napari.loader_widgets",
28 | "movement.napari.meta_widget",
29 | }
30 |
31 | # Modules with __init__.py that expose submodules explicitly
32 | PACKAGE_MODULES = {"movement.kinematics", "movement.plots", "movement.roi"}
33 |
34 | # Configure paths
35 | SCRIPT_DIR = Path(__file__).resolve().parent
36 | MOVEMENT_ROOT = SCRIPT_DIR.parent
37 | SOURCE_PATH = Path("source")
38 | TEMPLATES_PATH = SOURCE_PATH / "_templates"
39 |
40 | os.chdir(SCRIPT_DIR)
41 | sys.path.insert(0, str(MOVEMENT_ROOT))
42 |
43 |
44 | def get_modules():
45 | """Return all modules to be documented."""
46 | # Gather all modules and their paths
47 | module_names = set()
48 | for path in sorted((MOVEMENT_ROOT / "movement").rglob("*.py")):
49 | module_name = str(
50 | path.relative_to(MOVEMENT_ROOT).with_suffix("")
51 | ).replace(os.sep, ".")
52 | if path.name == "__init__.py":
53 | parent = module_name.rsplit(".", 1)[0]
54 | if parent in PACKAGE_MODULES:
55 | module_names.add(parent)
56 | else:
57 | module_names.add(module_name)
58 | # Determine submodules of package modules to exclude
59 | PACKAGE_MODULE_CHILDREN = {
60 | name
61 | for name in module_names
62 | for parent in PACKAGE_MODULES
63 | if name.startswith(parent + ".")
64 | }
65 | return module_names - EXCLUDE_MODULES - PACKAGE_MODULE_CHILDREN
66 |
67 |
68 | def get_members(module_name):
69 | """Return all functions and classes in a module."""
70 | mod = importlib.import_module(module_name)
71 | functions = []
72 | classes = []
73 | for name in getattr(mod, "__all__", dir(mod)):
74 | obj = getattr(mod, name, None)
75 | if inspect.isfunction(obj):
76 | functions.append(f"{name}")
77 | elif inspect.isclass(obj):
78 | classes.append(f"{name}")
79 | return sorted(functions), sorted(classes)
80 |
81 |
82 | def write_autosummary_module_page(module_name, output_path):
83 | """Generate an .rst file with autosummary listing for the given module."""
84 | functions, classes = get_members(module_name)
85 | env = SandboxedEnvironment(loader=FileSystemLoader(TEMPLATES_PATH))
86 | # Add custom autosummary filters
87 | env.filters["escape"] = rst.escape
88 | env.filters["underline"] = _underline
89 | template = env.get_template("autosummary/module.rst")
90 | content = template.render(
91 | fullname=module_name,
92 | underline="=" * len(module_name),
93 | classes=classes,
94 | functions=functions,
95 | )
96 | output_path.parent.mkdir(parents=True, exist_ok=True)
97 | output_path.write_text(content)
98 |
99 |
100 | def make_api_index(module_names):
101 | """Create a top-level API index file listing the specified modules."""
102 | doctree_lines = [
103 | f" {module_name}" for module_name in sorted(module_names)
104 | ]
105 | api_head = (TEMPLATES_PATH / "api_index_head.rst").read_text()
106 | output_path = SOURCE_PATH / "api_index.rst"
107 | output_path.write_text(api_head + "\n" + "\n".join(doctree_lines))
108 |
109 |
110 | if __name__ == "__main__":
111 | # Generate autosummary pages for manual modules
112 | for module_name in PACKAGE_MODULES:
113 | output_path = SOURCE_PATH / "api" / f"{module_name}.rst"
114 | write_autosummary_module_page(module_name, output_path)
115 | # Generate the API index
116 | make_api_index(get_modules())
117 |
--------------------------------------------------------------------------------
/tests/test_unit/test_roi/test_plot.py:
--------------------------------------------------------------------------------
1 | import re
2 | from typing import Any
3 |
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | import pytest
7 | from matplotlib.lines import Line2D
8 | from matplotlib.patches import PathPatch
9 |
10 | from movement.roi import LineOfInterest, PolygonOfInterest
11 | from movement.roi.base import BaseRegionOfInterest
12 |
13 |
14 | @pytest.fixture
15 | def havarti() -> PolygonOfInterest:
16 | """Wedge-shaped RoI with several holes.
17 |
18 | Havarti is a type of cheese that typically has holes and divots.
19 | """
20 | wedge = [(0, 0), (4, 0), (4, 0.5), (0, 2)]
21 | hole1 = [(0.5, 0.5), (0.5, 0.75), (0.625, 0.625)]
22 | hole2 = [(3, 0.25), (3.25, 0.5), (3.25, 0.75), (2.75, 0.75), (2.625, 0.5)]
23 | hole3 = [(1.0, 0.1), (2, 0.1), (1.75, 0.2), (1.5, 0.3), (1.25, 0.2)]
24 | hole4 = [(0.5, 1.75), (0.75, 1.5), (1.0, 1.25), (1.25, 1.25), (1.5, 1.4)]
25 | return PolygonOfInterest(
26 | wedge, holes=[hole1, hole2, hole3, hole4], name="Havarti"
27 | )
28 |
29 |
30 | @pytest.fixture
31 | def decaoctagonal_doughnut() -> PolygonOfInterest:
32 | """18-sided doughnut.
33 |
34 | This region matches (approximately) to the arena in the
35 | "SLEAP_three-mice_Aeon_proofread.analysis.h5" dataset.
36 | """
37 | centre = np.array([712.5, 541])
38 | width = 40.0
39 | extent = 1090.0
40 |
41 | n_pts = 18
42 | unit_shape = np.array(
43 | [
44 | np.exp((np.pi / 2.0 + (2.0 * i * np.pi) / n_pts) * 1.0j)
45 | for i in range(n_pts)
46 | ],
47 | dtype=complex,
48 | )
49 | outer_boundary = extent / 2.0 * unit_shape.copy()
50 | outer_boundary = (
51 | np.array([outer_boundary.real, outer_boundary.imag]).transpose()
52 | + centre
53 | )
54 | inner_boundary = (extent - width) / 2.0 * unit_shape.copy()
55 | inner_boundary = (
56 | np.array([inner_boundary.real, inner_boundary.imag]).transpose()
57 | + centre
58 | )
59 | return PolygonOfInterest(
60 | outer_boundary, holes=[inner_boundary], name="Arena"
61 | )
62 |
63 |
64 | @pytest.mark.parametrize(
65 | ["region_to_plot", "kwargs"],
66 | [
67 | pytest.param("unit_square", {}, id="Unit square"),
68 | pytest.param("unit_square_with_hole", {}, id="Unit square with hole"),
69 | pytest.param(
70 | "havarti",
71 | {
72 | "facecolor": "yellow",
73 | "edgecolor": "black",
74 | "ax": "new", # Interpreted by test as create & pass in an axis
75 | },
76 | id="Cheese",
77 | ),
78 | pytest.param(
79 | "decaoctagonal_doughnut",
80 | {"facecolor": ("black", 0.0)}, # Transparency hack
81 | id="Decaoctagonal doughnut",
82 | ),
83 | pytest.param(
84 | LineOfInterest([(0.0, 0.0), (1.0, 0.0)]), {}, id="Segment"
85 | ),
86 | pytest.param(
87 | LineOfInterest([(0.0, 0.0), (1.0, 0), (1.0, 1.0), (0.0, 1)]),
88 | {},
89 | id="Multi-segment",
90 | ),
91 | ],
92 | )
93 | def test_plot(
94 | region_to_plot: BaseRegionOfInterest,
95 | kwargs: dict[str, Any],
96 | request,
97 | ) -> None:
98 | if isinstance(region_to_plot, str):
99 | region_to_plot = request.getfixturevalue(region_to_plot)
100 |
101 | if kwargs.get("ax") is not None:
102 | # Simulate passing in existing axis,
103 | # so we don't want to directly save the output ax
104 | _, ax = plt.subplots(1, 1)
105 | kwargs["ax"] = ax
106 | region_to_plot.plot(**kwargs)
107 | else:
108 | # Simulate creation of a new axis and figure
109 | kwargs["ax"] = None
110 | _, ax = region_to_plot.plot(**kwargs)
111 | plt.close()
112 | if region_to_plot.dimensions == 2:
113 | assert len(ax.patches) == 1 and len(ax.lines) == 0
114 | assert type(ax.patches[0]) is PathPatch
115 | else:
116 | assert len(ax.patches) == 0 and len(ax.lines) == 1
117 | assert type(ax.lines[0]) is Line2D
118 |
119 |
120 | def test_requires_explicit_implementation() -> None:
121 | """Test that the BaseRegionOfInterest class cannot be plotted."""
122 | base_region = BaseRegionOfInterest([(0.0, 0.0), (1.0, 0.0)], dimensions=1)
123 |
124 | with pytest.raises(
125 | NotImplementedError,
126 | match=re.escape("_plot must be implemented by subclass."),
127 | ):
128 | base_region.plot()
129 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://pypi.org/project/movement)
2 | [](https://pypi.org/project/movement)
3 | [](https://anaconda.org/conda-forge/movement)
4 | [](https://pepy.tech/project/movement)
5 | [](https://opensource.org/licenses/BSD-3-Clause)
6 | [](https://github.com/neuroinformatics-unit/movement/actions)
7 | [](https://codecov.io/gh/neuroinformatics-unit/movement)
8 | [](https://mybinder.org/v2/gh/neuroinformatics-unit/movement/gh-pages?filepath=notebooks/examples)
9 | [](https://github.com/astral-sh/ruff)
10 | [](https://github.com/pre-commit/pre-commit)
11 | [](https://neuroinformatics.zulipchat.com/#narrow/stream/406001-Movement/topic/Welcome!)
12 | [](https://zenodo.org/doi/10.5281/zenodo.12755724)
13 |
14 | # movement
15 |
16 | A Python toolbox for analysing animal body movements across space and time.
17 |
18 |
19 | 
20 |
21 | ## Quick install
22 |
23 | Create and activate a conda environment with movement installed (including the GUI):
24 | ```bash
25 | conda create -n movement-env -c conda-forge movement napari pyqt
26 | conda activate movement-env
27 | ```
28 |
29 |
30 | > [!Note]
31 | > Read the [documentation](https://movement.neuroinformatics.dev/latest) for more information, including [full installation instructions](https://movement.neuroinformatics.dev/latest/user_guide/installation.html) and [examples](https://movement.neuroinformatics.dev/latest/examples/index.html).
32 |
33 | ## Overview
34 |
35 | Deep learning methods for motion tracking have revolutionised a range of
36 | scientific disciplines, from neuroscience and biomechanics, to conservation
37 | and ethology. Tools such as
38 | [DeepLabCut](https://www.mackenziemathislab.org/deeplabcut) and
39 | [SLEAP](https://sleap.ai/) now allow researchers to track animal movements
40 | in videos with remarkable accuracy, without requiring physical markers.
41 | However, there is still a need for standardised, easy-to-use methods
42 | to process the tracks generated by these tools.
43 |
44 | `movement` aims to provide a consistent, modular interface for analysing
45 | motion tracks, enabling steps such as data cleaning, visualisation,
46 | and motion quantification. We aim to support all popular animal tracking
47 | frameworks and file formats.
48 |
49 | Find out more on our [mission and scope](https://movement.neuroinformatics.dev/latest/community/mission-scope.html) statement and our [roadmap](https://movement.neuroinformatics.dev/latest/community/roadmaps.html).
50 |
51 |
52 |
53 | > [!Tip]
54 | > If you prefer analysing your data in R, we recommend checking out the
55 | > [animovement](https://animovement.dev/) toolbox, which is similar in scope.
56 | > We are working together with its developer
57 | > to gradually converge on common data standards and workflows.
58 |
59 |
60 |
61 | ## Join the movement
62 |
63 | `movement` is made possible by the generous contributions of many [people](https://movement.neuroinformatics.dev/latest/community/people.html).
64 |
65 | We welcome and encourage contributions in any form—whether it is fixing a bug, developing a new feature, or improving the documentation—as long as you follow our [code of conduct](CODE_OF_CONDUCT.md).
66 |
67 | Go to our [community page](https://movement.neuroinformatics.dev/latest/community/index.html) to find out how to connect with us and get involved.
68 |
69 |
70 | ## Citation
71 |
72 | If you use movement in your work, please cite the following Zenodo DOI:
73 |
74 | > Nikoloz Sirmpilatze, Chang Huan Lo, Sofía Miñano, Brandon D. Peri, Dhruv Sharma, Laura Porta, Iván Varela & Adam L. Tyson (2024). neuroinformatics-unit/movement. Zenodo. https://zenodo.org/doi/10.5281/zenodo.12755724
75 |
76 | ## License
77 | ⚖️ [BSD 3-Clause](./LICENSE)
78 |
79 | ## Package template
80 | This package layout and configuration (including pre-commit hooks and GitHub actions) have been copied from the [python-cookiecutter](https://github.com/neuroinformatics-unit/python-cookiecutter) template.
81 |
--------------------------------------------------------------------------------
/tests/test_integration/test_io.py:
--------------------------------------------------------------------------------
1 | import h5py
2 | import numpy as np
3 | import pytest
4 | import xarray as xr
5 | from pytest import DATA_PATHS
6 |
7 | from movement.io import load_poses, save_poses
8 |
9 |
10 | @pytest.fixture(params=["dlc.h5", "dlc.csv"])
11 | def dlc_output_file(request, tmp_path):
12 | """Return the output file path for a DLC .h5 or .csv file."""
13 | return tmp_path / request.param
14 |
15 |
16 | @pytest.mark.parametrize(
17 | "dlc_poses_df", ["valid_dlc_poses_df", "valid_dlc_3d_poses_df"]
18 | )
19 | def test_load_and_save_to_dlc_style_df(dlc_poses_df, request):
20 | """Test that loading pose tracks from a DLC-style DataFrame and
21 | converting back to a DataFrame returns the same data values.
22 | """
23 | dlc_poses_df = request.getfixturevalue(dlc_poses_df)
24 | ds = load_poses.from_dlc_style_df(dlc_poses_df)
25 | df = save_poses.to_dlc_style_df(ds, split_individuals=False)
26 | np.testing.assert_allclose(df.values, dlc_poses_df.values)
27 |
28 |
29 | def test_save_and_load_dlc_file(dlc_output_file, valid_poses_dataset):
30 | """Test that saving pose tracks to DLC .h5 and .csv files and then
31 | loading them back in returns the same Dataset.
32 | """
33 | save_poses.to_dlc_file(
34 | valid_poses_dataset, dlc_output_file, split_individuals=False
35 | )
36 | ds = load_poses.from_dlc_file(dlc_output_file)
37 | xr.testing.assert_allclose(ds, valid_poses_dataset)
38 |
39 |
40 | def test_convert_sleap_to_dlc_file(sleap_file, dlc_output_file):
41 | """Test that pose tracks loaded from SLEAP .slp and .h5 files,
42 | when converted to DLC .h5 and .csv files and re-loaded return
43 | the same Datasets.
44 | """
45 | sleap_ds = load_poses.from_sleap_file(sleap_file)
46 | save_poses.to_dlc_file(sleap_ds, dlc_output_file, split_individuals=False)
47 | dlc_ds = load_poses.from_dlc_file(dlc_output_file)
48 | xr.testing.assert_allclose(sleap_ds, dlc_ds)
49 |
50 |
51 | @pytest.mark.parametrize(
52 | "sleap_h5_file, fps",
53 | [
54 | ("SLEAP_single-mouse_EPM.analysis.h5", 30),
55 | ("SLEAP_three-mice_Aeon_proofread.analysis.h5", None),
56 | ("SLEAP_three-mice_Aeon_mixed-labels.analysis.h5", 50),
57 | ],
58 | )
59 | def test_to_sleap_analysis_file_returns_same_h5_file_content(
60 | sleap_h5_file, fps, new_h5_file
61 | ):
62 | """Test that saving pose tracks (loaded from a SLEAP analysis
63 | file) to a SLEAP-style .h5 analysis file returns the same file
64 | contents.
65 | """
66 | sleap_h5_file_path = DATA_PATHS.get(sleap_h5_file)
67 | ds = load_poses.from_sleap_file(sleap_h5_file_path, fps=fps)
68 | save_poses.to_sleap_analysis_file(ds, new_h5_file)
69 |
70 | with (
71 | h5py.File(ds.source_file, "r") as file_in,
72 | h5py.File(new_h5_file, "r") as file_out,
73 | ):
74 | assert set(file_in.keys()) == set(file_out.keys())
75 | keys = [
76 | "track_occupancy",
77 | "tracks",
78 | "point_scores",
79 | ]
80 | for key in keys:
81 | np.testing.assert_allclose(file_in[key][:], file_out[key][:])
82 |
83 |
84 | @pytest.mark.parametrize(
85 | "file",
86 | [
87 | "DLC_single-wasp.predictions.h5",
88 | "DLC_two-mice.predictions.csv",
89 | "SLEAP_single-mouse_EPM.analysis.h5",
90 | "SLEAP_three-mice_Aeon_proofread.predictions.slp",
91 | ],
92 | )
93 | def test_to_sleap_analysis_file_source_file(file, new_h5_file):
94 | """Test that saving pose tracks (loaded from valid source files)
95 | to a SLEAP-style .h5 analysis file stores the .slp labels path
96 | only when the source file is a .slp file.
97 | """
98 | file_path = DATA_PATHS.get(file)
99 | if file.startswith("DLC"):
100 | ds = load_poses.from_dlc_file(file_path)
101 | else:
102 | ds = load_poses.from_sleap_file(file_path)
103 | save_poses.to_sleap_analysis_file(ds, new_h5_file)
104 |
105 | with h5py.File(new_h5_file, "r") as f:
106 | if file_path.suffix == ".slp":
107 | assert file_path.name in f["labels_path"][()].decode()
108 | else:
109 | assert f["labels_path"][()].decode() == ""
110 |
111 |
112 | def test_save_and_load_to_nwb_file(valid_poses_dataset):
113 | """Test that saving pose tracks to NWBFile and then loading
114 | the file back in returns the same Dataset.
115 | """
116 | nwb_files = save_poses.to_nwb_file(valid_poses_dataset)
117 | ds_singles = [load_poses.from_nwb_file(nwb_file) for nwb_file in nwb_files]
118 | ds = xr.merge(ds_singles)
119 | # Change expected differences to match valid_poses_dataset
120 | ds["time"] = ds.time.astype(int)
121 | ds.attrs["time_unit"] = valid_poses_dataset.attrs["time_unit"]
122 | ds.attrs["source_file"] = valid_poses_dataset.attrs["source_file"]
123 | del ds.attrs["fps"]
124 | xr.testing.assert_allclose(ds, valid_poses_dataset)
125 |
--------------------------------------------------------------------------------
/tests/test_unit/test_logging.py:
--------------------------------------------------------------------------------
1 | import json
2 | import warnings
3 |
4 | import pytest
5 | import xarray as xr
6 | from loguru import logger as loguru_logger
7 |
8 | from movement.utils.logging import (
9 | MovementLogger,
10 | log_to_attrs,
11 | logger,
12 | showwarning,
13 | )
14 |
15 | log_methods = ["debug", "info", "warning", "error", "exception"]
16 |
17 |
18 | def assert_log_entry_in_file(expected_components, log_file):
19 | """Assert that a log entry with the expected components is
20 | found in the log file.
21 | """
22 | with open(log_file) as f:
23 | all_lines = f.readlines()
24 | assert any(
25 | all(component in line for component in expected_components)
26 | for line in all_lines
27 | ), (
28 | f"Expected log entry with components {expected_components} "
29 | "not found in log file."
30 | )
31 |
32 |
33 | @pytest.mark.parametrize("method", log_methods)
34 | def test_log_to_file(method):
35 | """Ensure the correct logger method is called and
36 | the expected message is in the logfile.
37 | """
38 | log_method = getattr(logger, method)
39 | log_message = f"{method} message"
40 | log_method(log_message)
41 | level = method.upper() if method != "exception" else "ERROR"
42 | # Check if a matching log entry is found in the log file
43 | assert_log_entry_in_file([level, log_message], pytest.LOG_FILE)
44 |
45 |
46 | def test_showwarning():
47 | """Ensure the custom ``showwarning`` function is called when a
48 | warning is issued.
49 | """
50 | kwargs = {
51 | "message": "This is a deprecation warning",
52 | "category": DeprecationWarning,
53 | "stacklevel": 2,
54 | }
55 | warnings.showwarning = showwarning
56 | warnings.warn(**kwargs)
57 | # Check if the warning message is in the log file
58 | expected_components = [kwargs["category"].__name__, kwargs["message"]]
59 | assert_log_entry_in_file(expected_components, pytest.LOG_FILE)
60 |
61 |
62 | def test_logger_repr():
63 | """Ensure the custom logger's representation equals the loguru logger."""
64 | assert repr(MovementLogger()) == repr(loguru_logger)
65 |
66 |
67 | @pytest.mark.parametrize(
68 | "input_data",
69 | ["valid_poses_dataset", "valid_bboxes_dataset"],
70 | )
71 | @pytest.mark.parametrize(
72 | "selector_fn, expected_selector_type",
73 | [
74 | (lambda ds: ds, xr.Dataset), # take full dataset
75 | (lambda ds: ds.position, xr.DataArray), # take position DataArray
76 | ],
77 | )
78 | @pytest.mark.parametrize(
79 | "extra_kwargs",
80 | [{}, {"extra1": 42}],
81 | ids=["no_extra_kwargs", "with_extra_kwargs"],
82 | )
83 | def test_log_to_attrs(
84 | input_data, selector_fn, expected_selector_type, extra_kwargs, request
85 | ):
86 | """Test that the ``log_to_attrs()`` decorator saves
87 | log entries to the dataset's or data array's ``log``
88 | attribute.
89 | """
90 |
91 | @log_to_attrs
92 | def fake_func(data, arg, kwarg=None, **kwargs):
93 | return data
94 |
95 | # Apply operation to dataset or data array
96 | dataset = request.getfixturevalue(input_data)
97 | input_data = selector_fn(dataset)
98 | output_data = fake_func(input_data, "test1", kwarg="test2", **extra_kwargs)
99 |
100 | # Check that output is as expected
101 | assert isinstance(output_data, expected_selector_type)
102 | assert "log" in output_data.attrs
103 |
104 | # Deserialize the log from JSON
105 | log_entries = json.loads(output_data.attrs["log"])
106 | assert isinstance(log_entries, list)
107 | assert len(log_entries) == 1
108 |
109 | log_entry = log_entries[0]
110 | assert log_entry["operation"] == "fake_func"
111 | assert log_entry["arg"] == "'test1'" # repr() puts quotes around strings
112 | if extra_kwargs:
113 | assert log_entry["kwargs"] == "{'extra1': 42}"
114 | else:
115 | assert "kwargs" not in log_entry
116 |
117 |
118 | def test_log_to_attrs_json_decode_error(valid_poses_dataset):
119 | """Test that a JSON decode error in the log attribute is handled."""
120 |
121 | @log_to_attrs
122 | def fake_func(data):
123 | return data
124 |
125 | # Create a dataset with an invalid log attribute
126 | invalid_log = '[{"invalid_json": "missing_quote}]' # Invalid JSON
127 | valid_poses_dataset.attrs["log"] = invalid_log
128 |
129 | # Call the function to trigger the decorator
130 | result = fake_func(valid_poses_dataset)
131 |
132 | # Check that a warning is written to the log file
133 | assert_log_entry_in_file(
134 | ["WARNING", "Failed to decode existing log in attributes"],
135 | pytest.LOG_FILE,
136 | )
137 |
138 | # Check that the log contains only the new entry from the fake_func call
139 | log_entries = json.loads(result.attrs["log"])
140 | assert len(log_entries) == 1
141 | assert log_entries[0]["operation"] == "fake_func"
142 |
--------------------------------------------------------------------------------
/tests/test_unit/test_kinematics/test_kinetic_energy.py:
--------------------------------------------------------------------------------
1 | from contextlib import nullcontext as does_not_raise
2 |
3 | import numpy as np
4 | import pytest
5 | import xarray as xr
6 |
7 | from movement.kinematics.kinetic_energy import compute_kinetic_energy
8 |
9 |
10 | @pytest.mark.parametrize("decompose", [True, False])
11 | def test_basic_shape_and_values(decompose):
12 | """Basic sanity check with simple data."""
13 | data = np.array(
14 | [[[[1, 0], [0, 1], [1, 1]]], [[[2, 0], [0, 2], [2, 2]]]]
15 | ) # shape: (time, individuals, keypoints, space)
16 | position = xr.DataArray(
17 | data,
18 | dims=["time", "individuals", "keypoints", "space"],
19 | coords={
20 | "time": [0, 1],
21 | "individuals": [0],
22 | "keypoints": [0, 1, 2],
23 | "space": ["x", "y"],
24 | },
25 | )
26 | result = compute_kinetic_energy(position, decompose=decompose)
27 | if decompose:
28 | assert set(result.dims) == {"time", "individuals", "energy"}
29 | assert list(result.coords["energy"].values) == [
30 | "translational",
31 | "internal",
32 | ]
33 | assert result.shape == (2, 1, 2)
34 | else:
35 | assert set(result.dims) == {"time", "individuals"}
36 | assert result.shape == (2, 1)
37 | assert (result >= 0).all()
38 |
39 |
40 | def test_uniform_linear_motion(valid_poses_dataset):
41 | """Uniform rigid motion:
42 | expect translational energy > 0, internal ≈ 0.
43 | """
44 | ds = valid_poses_dataset.copy(deep=True)
45 | energy = compute_kinetic_energy(ds["position"], decompose=True)
46 | trans = energy.sel(energy="translational")
47 | internal = energy.sel(energy="internal")
48 | assert np.allclose(trans, 3)
49 | assert np.allclose(internal, 0)
50 |
51 |
52 | @pytest.fixture
53 | def spinning_dataset():
54 | """Create synthetic rotational-only dataset."""
55 | time = 10
56 | keypoints = 4
57 | angles = np.linspace(0, 2 * np.pi, time)
58 | radius = 1.0
59 |
60 | positions = []
61 | for theta in angles:
62 | snapshot = []
63 | for k in range(keypoints):
64 | angle = theta + k * np.pi / 2
65 | snapshot.append([radius * np.cos(angle), radius * np.sin(angle)])
66 | positions.append([snapshot]) # 1 individual
67 |
68 | return xr.DataArray(
69 | np.array(positions),
70 | dims=["time", "individuals", "keypoints", "space"],
71 | coords={
72 | "time": np.arange(time),
73 | "individuals": ["id0"],
74 | "keypoints": [f"k{i}" for i in range(keypoints)],
75 | "space": ["x", "y"],
76 | },
77 | )
78 |
79 |
80 | def test_pure_rotation(spinning_dataset):
81 | """In pure rotational motion, translational energy ≈ 0."""
82 | energy = compute_kinetic_energy(spinning_dataset, decompose=True)
83 | trans = energy.sel(energy="translational")
84 | internal = energy.sel(energy="internal")
85 | assert np.allclose(trans, 0)
86 | assert (internal > 0).all()
87 |
88 |
89 | @pytest.mark.parametrize(
90 | "masses",
91 | [
92 | {"centroid": 2.0, "left": 2.0, "right": 2.0},
93 | {"centroid": 0.4, "left": 0.3, "right": 0.3},
94 | ],
95 | )
96 | def test_weighted_kinetic_energy(valid_poses_dataset, masses):
97 | """Kinetic energy should scale linearly with individual's total mass
98 | if velocity is constant.
99 | """
100 | ds = valid_poses_dataset.copy(deep=True)
101 | position = ds["position"]
102 | unweighted = compute_kinetic_energy(position)
103 | weighted = compute_kinetic_energy(position, masses=masses)
104 | factor = sum(masses.values()) / position.sizes["keypoints"]
105 | xr.testing.assert_allclose(weighted, unweighted * factor)
106 |
107 |
108 | @pytest.mark.parametrize(
109 | "valid_poses_dataset, keypoints, expected_exception",
110 | [
111 | pytest.param(
112 | "multi_individual_array",
113 | None,
114 | does_not_raise(),
115 | id="3-keypoints (sufficient)",
116 | ),
117 | pytest.param(
118 | "multi_individual_array",
119 | ["centroid"],
120 | pytest.raises(ValueError, match="At least 2 keypoints"),
121 | id="3-keypoints 1-selected (insufficient)",
122 | ),
123 | pytest.param(
124 | "single_keypoint_array",
125 | None,
126 | pytest.raises(ValueError, match="At least 2 keypoints"),
127 | id="1-keypoint (insufficient)",
128 | ),
129 | ],
130 | indirect=["valid_poses_dataset"],
131 | )
132 | def test_insufficient_keypoints(
133 | valid_poses_dataset, keypoints, expected_exception
134 | ):
135 | """Function should raise error if fewer than 2 keypoints."""
136 | with expected_exception:
137 | compute_kinetic_energy(
138 | valid_poses_dataset["position"],
139 | keypoints=keypoints,
140 | decompose=True,
141 | )
142 |
--------------------------------------------------------------------------------
/docs/source/user_guide/installation.md:
--------------------------------------------------------------------------------
1 | (target-installation)=
2 | # Installation
3 |
4 | ## Create a virtual environment
5 |
6 | While not strictly required, we strongly recommended installing `movement` in a
7 | clean virtual environment, using tools such as
8 | [conda](conda:) or [uv](uv:getting-started/installation/).
9 |
10 | ::::{tab-set}
11 | :::{tab-item} conda
12 | Create and activate a new [conda environment](conda:user-guide/tasks/manage-environments.html):
13 | ```sh
14 | conda create -y -n movement-env -c conda-forge python=3.13
15 | conda activate movement-env
16 | ```
17 |
18 | We used `movement-env` as the environment name, but you can choose any name you prefer.
19 | :::
20 |
21 | :::{tab-item} uv
22 | Create and activate a new [virtual environment](uv:pip/environments/) inside your project directory:
23 |
24 | ```sh
25 | uv venv --python=3.13
26 |
27 | source .venv/bin/activate # On macOS and Linux
28 | .venv\Scripts\activate # On Windows PowerShell
29 | ```
30 | :::
31 | ::::
32 |
33 | ## Install the package
34 | With your environment activated, install `movement` using one of the methods below.
35 |
36 | ::::{tab-set}
37 | :::{tab-item} From conda-forge using conda
38 | Install the core package:
39 | ```sh
40 | conda install -c conda-forge movement
41 | ```
42 |
43 | If you wish to use the GUI, which requires [napari](napari:), run instead:
44 | ```sh
45 | conda install -c conda-forge movement napari pyqt
46 | ```
47 | You may exchange `pyqt` for `pyside6` if you prefer a different Qt backend.
48 | See [napari's installation guide](napari:tutorials/fundamentals/installation.html)
49 | for more details on available backends.
50 |
51 | :::
52 |
53 | :::{tab-item} From PyPI using pip
54 | Install the core package:
55 | ```sh
56 | pip install movement
57 | ```
58 | If you wish to use the GUI, which requires [napari](napari:), run instead:
59 | ```sh
60 | pip install "movement[napari]"
61 | ```
62 | :::
63 |
64 | :::{tab-item} From PyPI using uv
65 | Install the core package:
66 | ```sh
67 | uv pip install movement
68 | ```
69 | If you wish to use the GUI, which requires [napari](napari:), run instead:
70 | ```sh
71 | uv pip install "movement[napari]"
72 | ```
73 | :::
74 |
75 | ::::
76 |
77 | :::{dropdown} Note for Apple Silicon users with macOS 13 or earlier
78 | :color: info
79 | :icon: info
80 |
81 | If you are using macOS 13 or earlier on Apple Silicon (M-series),
82 | we recommend installing `movement` via `conda-forge`.
83 | Alternatively, upgrade to macOS 14 to use any of the installation methods above.
84 | :::
85 |
86 | :::{admonition} For developers
87 | :class: tip
88 |
89 | If you would like to contribute to `movement`, see our [contributing guide](target-contributing)
90 | for detailed developer setup instructions and coding guidelines.
91 | :::
92 |
93 | ## Verify the installation
94 | With your virtual environment activated, run:
95 | ```sh
96 | movement info
97 | ```
98 | You should see a printout including the version numbers of `movement`
99 | and some of its dependencies.
100 |
101 | To test the GUI installation:
102 |
103 | ```sh
104 | movement launch
105 | ```
106 |
107 | This is equivalent to running `napari -w movement` and should open the `napari`
108 | window with the `movement` widget docked on the right-hand side.
109 |
110 | ## Update the package
111 |
112 | :::{dropdown} Always update using the same package manager used for installation
113 | :icon: info
114 | :color: info
115 |
116 | If your environment was created with `conda`, first check which channel `movement` was installed from before updating.
117 | Run `conda list movement` in your active `conda` environment and look at the **Channel** column:
118 | - If the channel is `conda-forge`, update using `conda`.
119 | - If the channel is `pypi`, update using `pip`.
120 |
121 | :::
122 |
123 |
124 | ::::{tab-set}
125 | :::{tab-item} conda
126 | ```sh
127 | conda update -c conda-forge movement -y
128 | ```
129 | :::
130 |
131 | :::{tab-item} pip
132 | ```sh
133 | pip install -U movement
134 | ```
135 | :::
136 |
137 | :::{tab-item} uv
138 | ```sh
139 | uv pip install -U movement
140 | ```
141 | :::
142 | ::::
143 |
144 |
145 | If the above fails, try installing `movement` in a fresh new environment to avoid dependency conflicts.
146 |
147 | First remove the existing environment:
148 |
149 | ::::{tab-set}
150 | :::{tab-item} conda
151 | ```sh
152 | conda env remove -n movement-env
153 | ```
154 |
155 | This command assumes your environment is named `movement-env`.
156 | If you are unsure about the name, you can get a list of the environments
157 | on your system with `conda env list`.
158 | :::
159 |
160 | :::{tab-item} uv
161 | Delete the `.venv` folder in your project directory.
162 |
163 | ```powershell
164 | rm -rf .venv # On macOS and Linux
165 | rmdir /s /q .venv # On Windows PowerShell
166 | ```
167 |
168 | Optionally, you can clean the `uv` cache for unused packages:
169 | ```sh
170 | uv cache prune
171 | ```
172 | :::
173 | ::::
174 |
175 | Once the environment has been removed, you can create a new one following the [instructions](#create-a-virtual-environment) above.
176 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "movement"
3 | authors = [
4 | { name = "Nikoloz Sirmpilatze", email = "niko.sirbiladze@gmail.com" },
5 | { name = "Chang Huan Lo", email = "changhuan.lo@ucl.ac.uk" },
6 | { name = "Sofía Miñano", email = "s.minano@ucl.ac.uk" },
7 | ]
8 | description = "A Python toolbox for analysing animal body movements across space and time"
9 | readme = "README.md"
10 | requires-python = ">=3.11.0"
11 | dynamic = ["version"]
12 |
13 | license = { text = "BSD-3-Clause" }
14 |
15 | dependencies = [
16 | "numpy>=2.0.0",
17 | "pandas",
18 | "h5py",
19 | "netCDF4<1.7.3",
20 | "tables>=3.10.1",
21 | "attrs",
22 | "pooch",
23 | "tqdm",
24 | "shapely",
25 | "sleap-io",
26 | "xarray[accel,io,viz]",
27 | "PyYAML",
28 | "napari-video>=0.2.13",
29 | "pyvideoreader>=0.5.3", # since switching to depend on openCV-headless
30 | "qt-niu", # needed for collapsible widgets
31 | "loguru",
32 | "pynwb",
33 | "ndx-pose>=0.2.1",
34 | ]
35 |
36 | classifiers = [
37 | "Development Status :: 3 - Alpha",
38 | "Programming Language :: Python",
39 | "Programming Language :: Python :: 3",
40 | "Programming Language :: Python :: 3.11",
41 | "Programming Language :: Python :: 3.12",
42 | "Programming Language :: Python :: 3.13",
43 | "Operating System :: OS Independent",
44 | "License :: OSI Approved :: BSD License",
45 | "Framework :: napari",
46 | ]
47 |
48 | # Entry point for napari plugin
49 | entry-points."napari.manifest".movement = "movement.napari:napari.yaml"
50 |
51 | [project.urls]
52 | "Homepage" = "https://github.com/neuroinformatics-unit/movement"
53 | "Bug Tracker" = "https://github.com/neuroinformatics-unit/movement/issues"
54 | "Documentation" = "https://movement.neuroinformatics.dev/"
55 | "Source Code" = "https://github.com/neuroinformatics-unit/movement"
56 | "User Support" = "https://neuroinformatics.zulipchat.com/#narrow/stream/406001-Movement"
57 |
58 | [project.optional-dependencies]
59 | napari = ["napari[all]>=0.6.0"]
60 | dev = [
61 | "pytest",
62 | "pytest-cov",
63 | "pytest-mock",
64 | "coverage",
65 | "tox",
66 | "mypy",
67 | "pre-commit",
68 | "ruff",
69 | "codespell",
70 | "setuptools_scm",
71 | "pandas-stubs",
72 | "types-attrs",
73 | "check-manifest",
74 | "types-PyYAML",
75 | "types-requests",
76 | "pytest-qt",
77 | "movement[napari]",
78 | ]
79 |
80 | [project.scripts]
81 | movement = "movement.cli_entrypoint:main"
82 |
83 | [build-system]
84 | requires = ["setuptools>=45", "wheel", "setuptools_scm[toml]>=6.2"]
85 | build-backend = "setuptools.build_meta"
86 |
87 | [tool.setuptools]
88 | include-package-data = true
89 |
90 | [tool.setuptools.packages.find]
91 | include = ["movement*"]
92 | exclude = ["tests", "docs*"]
93 |
94 |
95 | [tool.pytest.ini_options]
96 | addopts = "--cov=movement"
97 |
98 | [tool.setuptools_scm]
99 |
100 | [tool.check-manifest]
101 | ignore = [
102 | ".yaml",
103 | "tox.ini",
104 | "tests/",
105 | "tests/test_unit/",
106 | "tests/test_integration/",
107 | "docs/",
108 | "docs/source/",
109 | ]
110 |
111 | [[tool.mypy.overrides]]
112 | module = ["pooch.*", "h5py.*", "sleap_io.*"]
113 | ignore_missing_imports = true
114 |
115 | [tool.ruff]
116 | line-length = 79
117 | exclude = ["__init__.py", "build", ".eggs"]
118 | fix = true
119 |
120 | [tool.ruff.lint]
121 | # See https://docs.astral.sh/ruff/rules/
122 | ignore = [
123 | "D203", # one blank line before class
124 | "D213", # multi-line-summary second line
125 | ]
126 | select = [
127 | "E", # pycodestyle errors
128 | "F", # Pyflakes
129 | "UP", # pyupgrade
130 | "I", # isort
131 | "B", # flake8 bugbear
132 | "SIM", # flake8 simplify
133 | "C90", # McCabe complexity
134 | "D", # pydocstyle
135 | "NPY201", # checks for syntax that was deprecated in numpy2.0
136 | ]
137 | per-file-ignores = { "tests/*" = [
138 | "D100", # missing docstring in public module
139 | "D205", # missing blank line between summary and description
140 | "D103", # missing docstring in public function
141 | ], "examples/*" = [
142 | "B018", # Found useless expression
143 | "D103", # Missing docstring in public function
144 | "D400", # first line should end with a period.
145 | "D415", # first line should end with a period, question mark...
146 | "D205", # missing blank line between summary and description
147 | ] }
148 |
149 | [tool.ruff.format]
150 | docstring-code-format = true # Also format code in docstrings
151 |
152 | [tool.codespell]
153 | skip = '.git,.tox,*.svg'
154 | check-hidden = true
155 |
156 | [tool.tox]
157 | legacy_tox_ini = """
158 | [tox]
159 | requires =
160 | tox>=4
161 | tox-gh-actions
162 | envlist = py{311,312,313}
163 | isolated_build = True
164 |
165 | [gh-actions]
166 | python =
167 | 3.11: py311
168 | 3.12: py312
169 | 3.13: py313
170 |
171 | [testenv]
172 | passenv =
173 | CI
174 | GITHUB_ACTIONS
175 | DISPLAY
176 | XAUTHORITY
177 | NUMPY_EXPERIMENTAL_ARRAY_FUNCTION
178 | PYVISTA_OFF_SCREEN
179 | extras =
180 | dev
181 | commands =
182 | pytest -v --color=yes --cov=movement --cov-report=xml
183 | """
184 |
--------------------------------------------------------------------------------
/docs/source/blog/displacement-vectors.md:
--------------------------------------------------------------------------------
1 | ---
2 | blogpost: true
3 | date: November 7, 2025
4 | author: Carlo Castoldi
5 | location: Milan, Italy
6 | category: update
7 | language: English
8 | ---
9 |
10 | # Replacing displacement vectors for greater clarity
11 |
12 | This post introduces recent improvements to how ``movement`` computes displacement vectors, making the definitions more explicit, flexible, and intuitive for users.
13 |
14 | ## Background
15 |
16 | Computing kinematic properties is a core functionality since early versions of `movement`, when they where first introduced by [Chang Huan Lo](https://github.com/lochhh) in [#106](https://github.com/neuroinformatics-unit/movement/pull/106).
17 | For a library dedicated to analysing motion-tracking data, quantifying how far a tracked point moves between consecutive frames is fundamental. This measure underpins subsequent computations, such as the total distance travelled along a path. That's why we introduced the `compute_displacement` function early on, and why it features in our {ref}`compute and visualise kinematics ` example.
18 |
19 | Its original implementation, however, produced results that were difficult to interpret. For a given individual and keypoint at timestep `t`, displacement was defined as the vector pointing from the previous position at `t-1` to the current position at `t`. This definition is somewhat counter-intuitive: it identifies the last spatial translation used by the keypoint to reach its current position. It indicates where the point _came from_ rather than where it is _going_.
20 |
21 | For this reason, during the Hackday at [Open Software Week 2025](https://neuroinformatics.dev/open-software-summer-school/2025/index.html)—and as my first contribution to `movement`—I [volunteered](https://github.com/neuroinformatics-unit/osw25-hackday/issues/16) to develop a more intuitive interface for displacement vectors, under the supervision of [Sofía Miñano](https://github.com/sfmig).
22 | These improvements were introduced in [#657](https://github.com/neuroinformatics-unit/movement/pull/657) through a collaborative effort. The update provides a simpler, well-tested, and better-documented implementation that makes displacement computations easier to understand and use.
23 |
24 | 
25 |
26 | ## What's new?
27 |
28 | {mod}`kinematics ` has two new sister functions:
29 |
30 | - {func}`compute_forward_displacement `, computing the vector defined at time `t` that goes from the position in the current frame to the position in the next frame, at `t+1`.
31 | - {func}`compute_backward_displacement `, computing the vector defined at time `t` that goes from the position in the current frame to the position in the previous frame, at `t-1`.
32 |
33 | These functions replace the previous, more ambiguous `compute_displacement`, which has now been deprecated.
34 | The new API makes the directionality of displacement **explicit**, giving users greater **flexibility**. Depending on the context, one can now choose between forward- or backward-oriented vectors instead of relying on a single implicit definition.
35 |
36 | If you need a drop-in replacement for the old behaviour, you can use:
37 |
38 | ```python
39 | import movement.kinematics as kin
40 |
41 | # Instead of:
42 | displacement = kin.compute_displacement(ds.position)
43 |
44 | # Use:
45 | displacement = -kin.compute_backward_displacement(ds.position)
46 | ```
47 |
48 | __Related changes__
49 |
50 | We slightly modified the behaviour of vector conversion from Cartesian to polar coordinates. For simplicity and interpretability, {func}`cart2pol ` now always sets the angle `phi` to 0 when the vector's norm `rho` is 0, rather than following the [C standard](https://www.iso.org/standard/29237.html) for [`arctan2`](https://en.wikipedia.org/wiki/Atan2). This change should not affect existing workflows, as a zero-length vector has an undefined direction—meaning it could point in any direction, and assigning `phi = 0` is a safe, neutral choice.
51 |
52 | ## Reflections
53 |
54 | I would like to extend my sincere gratitude to the [Neuroinformatics Unit](https://neuroinformatics.dev/) for fostering an exceptional open environment that has even inspired me to enhance my own projects. Their efforts have motivated me to make [BraiAn](https://silvalab.codeberg.page/BraiAn/) more accessible to inexperienced researchers, to improve its interoperability and to develop automated pipelines for software verification.
55 |
56 | I am firmly convinced that bridging the gap between experimental laboratories is crucial for enabling reproducible and comparable results across research groups. I have long _believed_ that the development and adoption of shared standards and widely accepted platforms can facilitate this goal. The Neuroinformatics Unit has not only reinforced my conviction but also _demonstrated_, through their remarkable work, that this vision can be turned into a practical reality.
57 |
--------------------------------------------------------------------------------
/tests/test_unit/test_plots/test_trajectory.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | from matplotlib import pyplot as plt
4 |
5 | from movement.plots.trajectory import plot_centroid_trajectory
6 |
7 | plt.switch_backend("Agg") # to avoid pop-up window
8 |
9 |
10 | @pytest.mark.parametrize(
11 | ["image", "selection", "expected_data"],
12 | [
13 | pytest.param(
14 | True,
15 | {"keypoints": ["left", "right"]},
16 | np.array([[0, 0], [0, 1], [0, 2], [0, 3]], dtype=float),
17 | id="left-right + image",
18 | ),
19 | pytest.param(
20 | False,
21 | {"keypoints": ["snout", "tail"]},
22 | np.array([[0, 0], [0, 1], [0, 2], [0, 3]], dtype=float),
23 | id="snout-tail",
24 | ),
25 | pytest.param(
26 | False,
27 | {"keypoints": "centre"},
28 | np.array([[0, 0], [0, 1], [0, 2], [0, 3]], dtype=float),
29 | id="centre",
30 | ),
31 | pytest.param(
32 | False,
33 | {},
34 | np.array([[0, 0], [0, 1], [0, 2], [0, 3]], dtype=float),
35 | id="no specified keypoints or individuals",
36 | ),
37 | pytest.param(
38 | False,
39 | {"individual": "id_0"},
40 | np.array([[0, 0], [0, 1], [0, 2], [0, 3]], dtype=float),
41 | id="only individual specified",
42 | ),
43 | pytest.param(
44 | False,
45 | {"keypoints": ["centre", "snout"]},
46 | np.array([[0, 0.5], [0, 1.5], [0, 2.5], [0, 3.5]], dtype=float),
47 | id="centre-snout",
48 | ),
49 | pytest.param(
50 | True,
51 | {"keypoints": ["centre", "snout"]},
52 | np.array([[0, 0.5], [0, 1.5], [0, 2.5], [0, 3.5]], dtype=float),
53 | id="centre-snout + image",
54 | ),
55 | ],
56 | )
57 | def test_trajectory_plot(one_individual, image, selection, expected_data):
58 | """Test trajectory plot."""
59 | da = one_individual
60 | _, ax = plt.subplots()
61 | if image:
62 | ax.imshow(np.zeros((10, 10)))
63 | _, ax = plot_centroid_trajectory(da, ax=ax, **selection)
64 | output_data = ax.collections[0].get_offsets().data
65 | np.testing.assert_array_almost_equal(output_data, expected_data)
66 |
67 |
68 | @pytest.mark.parametrize(
69 | ["selection"],
70 | [
71 | pytest.param(
72 | {"keypoints": "centre"},
73 | id="no_keypoints",
74 | ),
75 | pytest.param(
76 | {"individuals": "id_0"},
77 | id="no_individuals",
78 | ),
79 | pytest.param(
80 | {"keypoints": "centre", "individuals": "id_0"},
81 | id="only_time_space",
82 | ),
83 | ],
84 | )
85 | def test_trajectory_dropped_dim(two_individuals, selection):
86 | """Test trajectory plot without keypoints and/or individuals dimensions.
87 |
88 | When only one coordinate is selected per dimension, that dimension will
89 | be squeezed out of the data array.
90 | """
91 | da = two_individuals.sel(**selection).squeeze()
92 | _, ax = plot_centroid_trajectory(da)
93 | output_data = ax.collections[0].get_offsets().data
94 | expected_data = np.array([[0, 0], [0, 1], [0, 2], [0, 3]], dtype=float)
95 | np.testing.assert_array_almost_equal(output_data, expected_data)
96 |
97 |
98 | @pytest.mark.parametrize(
99 | ["selection", "expected_data"],
100 | [
101 | pytest.param(
102 | {},
103 | np.array([[0, 0], [0, 1], [0, 2], [0, 3]], dtype=float),
104 | id="default",
105 | ),
106 | pytest.param(
107 | {"individual": "id_0"},
108 | np.array([[0, 0], [0, 1], [0, 2], [0, 3]], dtype=float),
109 | id="id_0",
110 | ),
111 | pytest.param(
112 | {"keypoints": ["snout", "tail"]},
113 | np.array([[0, 0], [0, 1], [0, 2], [0, 3]], dtype=float),
114 | id="snout-tail",
115 | ),
116 | pytest.param(
117 | {"individual": "id_0", "keypoints": ["tail"]},
118 | np.array([[0, -1], [0, 0], [0, 1], [0, 2]], dtype=float),
119 | id="tail id_0",
120 | ),
121 | pytest.param(
122 | {"individual": "id_1", "keypoints": ["tail"]},
123 | np.array([[0, 1], [0, 0], [0, -1], [0, -2]], dtype=float),
124 | id="tail id_1",
125 | ),
126 | ],
127 | )
128 | def test_trajectory_two_crosses(two_individuals, selection, expected_data):
129 | da = two_individuals
130 | _, ax = plot_centroid_trajectory(da, **selection)
131 | output_data = ax.collections[0].get_offsets().data
132 | np.testing.assert_array_almost_equal(output_data, expected_data)
133 |
134 |
135 | def test_trajectory_multiple_individuals(two_individuals):
136 | """Test trajectory plot with two individuals selected."""
137 | with pytest.raises(
138 | ValueError, match="Only one individual can be selected."
139 | ):
140 | plot_centroid_trajectory(two_individuals, individual=["id_0", "id_1"])
141 |
--------------------------------------------------------------------------------
/docs/source/blog/movement-v0_0_21.md:
--------------------------------------------------------------------------------
1 | ---
2 | blogpost: true
3 | date: Dec 5, 2024
4 | author: Niko Sirmpilatze
5 | location: London, England
6 | category: release
7 | language: English
8 | ---
9 |
10 | # Release v0.0.21 and next steps
11 |
12 | _This is our inaugaural blogpost, containing a summary of the `v0.0.21` release and a preview of what's coming next in 2025._
13 |
14 | ## What's new in movement v0.0.21?
15 |
16 | :::{tip}
17 | See our [installation guide](target-installation) for instructions on how to
18 | install the latest version or upgrade from an existing installation.
19 | :::
20 |
21 | __Input/Output__
22 |
23 | - We have added the {func}`movement.io.load_poses.from_multiview_files` function to support loading pose tracking data from multiple camera views.
24 | - We have made several small improvements to reading bounding box tracks. See our new {ref}`example ` to learn more about working with bounding boxes.
25 | - We have added a new {ref}`example ` on using `movement` to convert pose tracking data between different file formats.
26 |
27 | __Kinematics__
28 |
29 | The {mod}`kinematics ` module has been moved from `movement.analysis.kinematics` to `movement.kinematics` and packs a number of new functions:
30 | - {func}`compute_forward_vector `
31 | - {func}`compute_head_direction_vector `
32 | - {func}`compute_pairwise_distances `
33 | - {func}`compute_speed `
34 | - {func}`compute_path_length `
35 |
36 | __Breaking changes__
37 |
38 | - We have dropped support for using filtering and
39 | kinematic functions via the `move` accessor syntax,
40 | because we've found the concept hard to convey to new users. All functions are henceforth solely accessible by importing them from the relevant modules. Having one way of doing things simplifies the mental model for users and reduces the maintenance effort on our side. See an example below:
41 |
42 | ```python
43 | # Instead of:
44 | position_filt = ds.move.median_filter(window=5)
45 | velocity = ds.move.compute_velocity()
46 |
47 | # Use:
48 | from movement.filtering import median_filter
49 | from movement.kinematics import compute_velocity
50 |
51 | position_filt = median_filter(ds.position, window=5)
52 | velocity = compute_velocity(ds.position)
53 | ```
54 | - We have slightly modified the [structure of movement datasets](target-poses-and-bboxes-dataset), by changing the order of dimensions. This should have no effect when indexing data by dimension names, i.e. using the {meth}`xarray.Dataset.sel` or {meth}`xarray.Dataset.isel` methods. However, you may need to update your code if you are using Numpy-style indexing, for example:
55 |
56 | ```python
57 | # Indexing with dimension names (recommended, works always)
58 | position = ds.position.isel(
59 | individuals=0, keypoints=-1 # first individual, last keypoint
60 | )
61 |
62 | # Numpy-style indexing with the old dimension order (will no longer work)
63 | position = ds.position[:, 0, -1, :] # time, individuals, keypoints, space
64 |
65 | # Numpy-style indexing with the updated dimension order (use this instead)
66 | position = ds.position[:, :, -1, 0] # time, space, keypoints, individuals
67 | ```
68 |
69 |
70 | ## Looking to v0.1 and beyond
71 |
72 | Over the last 1.5 years, we have gradually built up the core functionalities we envisioned for `movement` version `v0.1`,
73 | as described in our [roadmap](target-roadmaps).
74 | These have included [input/output support](target-io) for a few popular animal tracking frameworks, as well as methods for data cleaning and computing kinematic variables.
75 |
76 | What we're still missing is a [napari](napari:) plugin for `movement`, which we envision both as an interactive visualisation framework for motion tracking data as well as a graphical user interface for `movement`.
77 | We have been working on a minimal version of this plugin for a while and are expecting to ship it as part of the `v0.1` release in early 2025.
78 |
79 | After `v0.1`, we'll be switching to [semantic versioning](https://semver.org/), as it applies to MINOR (new features) and PATCH (bug fixes) versions. Until we are ready for a `v1` MAJOR version, we cannot commit to backward compatibility, but any breaking changes will be clearly communicated in the release notes.
80 |
81 | ## Announcing movement Community Calls
82 |
83 | We are committed to fostering openness, transparency, and a strong sense of
84 | community within the `movement` project.
85 | Starting next year, we will host regular Community Calls via Zoom.
86 |
87 | The calls will take place every second Friday from **11:00 to 11:45 GMT**,
88 | beginning on **10 January 2025**.
89 | These calls are open to anyone interested in contributing to `movement` or
90 | sharing feedback on the project's progress and direction.
91 |
92 | A few days before each call, we will post an announcement on Zulip with the Zoom link and agenda.
93 | We encourage everyone who's interested in
94 | joining to follow this [Zulip topic](movement-community-calls:)
95 | to stay updated.
96 |
--------------------------------------------------------------------------------
/tests/test_unit/test_roi/test_points_within_roi.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import xarray as xr
4 |
5 | from movement.roi.line import LineOfInterest
6 |
7 |
8 | @pytest.fixture
9 | def diagonal_line() -> LineOfInterest:
10 | """Fixture for a line.
11 |
12 | RoI is a diagonal line from the origin to (1, 1).
13 | """
14 | return LineOfInterest([(0, 0), (1, 1)])
15 |
16 |
17 | @pytest.mark.parametrize(
18 | ["point", "include_boundary", "inside"],
19 | [
20 | pytest.param([0, 0], True, True, id="on starting point"),
21 | pytest.param([0, 0], False, False, id="on excluded starting point"),
22 | pytest.param([0.5, 0.5], True, True, id="inside LoI"),
23 | pytest.param(
24 | [0.5, 0.5], False, True, id="inside LoI (exclude boundary)"
25 | ),
26 | pytest.param([2.0, 2.0], True, False, id="outside LoI"),
27 | pytest.param([0.5, 0.5, 0.5], True, True, id="3D point inside LoI"),
28 | pytest.param([0.1, 0.2, 0.5], True, False, id="3D point outside LoI"),
29 | ],
30 | )
31 | def test_point_within_line(
32 | diagonal_line, point, include_boundary, inside
33 | ) -> None:
34 | """Test whether a point is on a line.
35 |
36 | The boundaries of a line are the end points.
37 | """
38 | assert diagonal_line.contains_point(point, include_boundary) == inside
39 |
40 |
41 | @pytest.mark.parametrize(
42 | ["point", "include_boundary", "inside"],
43 | [
44 | pytest.param([0, 0], True, True, id="on exterior boundary"),
45 | pytest.param([0, 0], False, False, id="on excluded exterior boundary"),
46 | pytest.param([0.25, 0.25], True, True, id="on hole boundary"),
47 | pytest.param(
48 | [0.25, 0.25], False, False, id="on excluded hole boundary"
49 | ),
50 | pytest.param([0.5, 0.5], True, False, id="inside hole"),
51 | pytest.param([0.1, 0.1], True, True, id="inside RoI"),
52 | pytest.param([2.0, 2.0], True, False, id="outside RoI"),
53 | pytest.param([0.5, 0.5, 0.5], True, False, id="3D point inside hole"),
54 | pytest.param([0.1, 0.1, 0.5], True, True, id="3D point inside RoI"),
55 | ],
56 | )
57 | def test_point_within_polygon(
58 | unit_square_with_hole, point, include_boundary, inside
59 | ) -> None:
60 | """Test whether a point is within RoI."""
61 | assert (
62 | unit_square_with_hole.contains_point(point, include_boundary) == inside
63 | )
64 |
65 |
66 | @pytest.mark.parametrize(
67 | ["points", "expected"],
68 | [
69 | pytest.param(
70 | xr.DataArray(
71 | np.array([[0.15, 0.15], [0.1, 0.1], [0.80, 0.80]]),
72 | dims=["points", "space"],
73 | ),
74 | xr.DataArray([True, True, True], dims=["points"]),
75 | id="3 points inside RoI",
76 | ),
77 | pytest.param(
78 | xr.DataArray(
79 | np.array([[0.55, 0.55], [0.5, 0.5], [0.7, 0.7], [0.6, 0.6]]),
80 | dims=["points", "space"],
81 | ),
82 | xr.DataArray([False, False, False, False], dims=["points"]),
83 | id="4 points inside hole",
84 | ),
85 | pytest.param(
86 | xr.DataArray(
87 | np.array([[0.55, 0.55], [0.1, 0.1], [0.7, 0.7], [0, 0]]),
88 | dims=["points", "space"],
89 | ),
90 | xr.DataArray([False, True, False, True], dims=["points"]),
91 | id="2 points inside hole, 1 point inside RoI, 1 on RoI boundary",
92 | ),
93 | pytest.param(
94 | xr.DataArray(
95 | np.array([[2, 2], [-2, -2]]),
96 | dims=["points", "space"],
97 | ),
98 | xr.DataArray([False, False], dims=["points"]),
99 | id="2 points outside RoI",
100 | ),
101 | ],
102 | )
103 | def test_points_within_polygon(
104 | unit_square_with_hole, points, expected
105 | ) -> None:
106 | """Test whether points (supplied as xr.DataArray) are within a polygon."""
107 | xr.testing.assert_equal(
108 | unit_square_with_hole.contains_point(points),
109 | expected,
110 | )
111 |
112 |
113 | @pytest.mark.parametrize(
114 | ["points", "expected_shape", "expected_dims"],
115 | [
116 | pytest.param(
117 | xr.DataArray(np.zeros((4, 2)), dims=["points", "space"]),
118 | (4,),
119 | ("points",),
120 | id="points (4)",
121 | ),
122 | pytest.param(
123 | xr.DataArray(
124 | np.zeros((2, 2, 2)), dims=["time", "points", "space"]
125 | ),
126 | (2, 2),
127 | ("time", "points"),
128 | id="time (2), points (2)",
129 | ),
130 | pytest.param(
131 | xr.DataArray(
132 | np.zeros((2, 2, 2, 5)),
133 | dims=["time", "points", "space", "individuals"],
134 | ),
135 | (2, 2, 5),
136 | ("time", "points", "individuals"),
137 | id="time (2), points (2), individuals (5)",
138 | ),
139 | ],
140 | )
141 | def test_shape_dims(
142 | unit_square_with_hole, points, expected_shape, expected_dims
143 | ) -> None:
144 | """Check the shape and dims of the result.
145 |
146 | The space dimension should have collapsed.
147 | """
148 | result = unit_square_with_hole.contains_point(points)
149 | assert result.shape == expected_shape
150 | assert result.dims == expected_dims
151 |
--------------------------------------------------------------------------------
/tests/test_unit/test_roi/test_conditions.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import xarray as xr
4 |
5 | from movement.roi import compute_region_occupancy
6 |
7 |
8 | @pytest.mark.parametrize(
9 | "region_fixtures, data, expected_output",
10 | [
11 | pytest.param(
12 | ["triangle", "unit_square", "unit_square_with_hole"],
13 | np.array([[0.15, 0.15], [0.85, 0.85], [0.5, 0.5], [1.5, 1.5]]),
14 | {
15 | "data": np.array(
16 | [
17 | [True, False, True, False],
18 | [True, True, True, False],
19 | [True, True, False, False],
20 | ]
21 | ),
22 | "coords": ["triangle", "Unit square", "Unit square with hole"],
23 | },
24 | id="triangle, unit_square, unit_square_with_hole",
25 | ),
26 | pytest.param(
27 | ["triangle", "triangle", "triangle"],
28 | np.array([[0.15, 0.15], [0.85, 0.85], [0.5, 0.5], [1.5, 1.5]]),
29 | {
30 | "data": np.array([[True, False, True, False]] * 3),
31 | "coords": ["triangle_0", "triangle_1", "triangle_2"],
32 | },
33 | id="3 superimposed triangles with same name",
34 | ),
35 | pytest.param(
36 | ["triangle", "triangle_different_name"],
37 | np.array([[0.5, 0.5]]),
38 | {
39 | "data": np.array([True] * 2),
40 | "coords": ["triangle", "pizza_slice"],
41 | },
42 | id="2 superimposed triangles with different names",
43 | ),
44 | pytest.param(
45 | ["triangle", "triangle_moved_01", "triangle_moved_100"],
46 | np.array([[0.5, 0.5]]),
47 | {
48 | "data": np.array([[True], [True], [False]]),
49 | "coords": ["triangle_0", "triangle_1", "triangle_2"],
50 | },
51 | id="3 different triangles with same name",
52 | ),
53 | pytest.param(
54 | ["triangle_different_name"],
55 | np.array([[0.5, 0.5]]),
56 | {
57 | "data": np.array([[True]]),
58 | "coords": ["pizza_slice"],
59 | },
60 | id="1 pizza slice triangle",
61 | ),
62 | ],
63 | )
64 | def test_region_occupancy(
65 | request: pytest.FixtureRequest,
66 | region_fixtures: list[str],
67 | data,
68 | expected_output: dict,
69 | ) -> None:
70 | """Tests region_occupancy for several RoIs.
71 |
72 | Checks whether the dimension, data, and coordinates of the computed
73 | occupancies are correct.
74 | """
75 | regions = [request.getfixturevalue(r) for r in region_fixtures]
76 | data = xr.DataArray(
77 | data=data,
78 | dims=["time", "space"],
79 | coords={"space": ["x", "y"]},
80 | )
81 | occupancies = compute_region_occupancy(data, regions)
82 |
83 | assert occupancies.dims == ("region", "time")
84 | assert (expected_output["data"] == occupancies.data).all()
85 | assert occupancies.region.values.tolist() == expected_output["coords"]
86 |
87 |
88 | def test_region_occupancy_many_regions(
89 | triangle, unit_square, unit_square_with_hole, triangle_different_name
90 | ):
91 | """Tests occupancy for many RoIs with identical names.
92 |
93 | Ensures correct data and coordinate names for:
94 | - 1000 triangles suffixed _000 to _999
95 | - 100 unit squares suffixed _00 to _99
96 | - 10 unit squares with holes suffixed _0 to _9
97 | - 1 triangle named "pizza_slice" without suffix
98 |
99 | This test checks unique naming of coordinates in the computed
100 | occupancies when up to 1000 regions with identical names are passed,
101 | which is not covered in the other tests.
102 | """
103 | regions = (
104 | [triangle] * 1000
105 | + [unit_square] * 100
106 | + [unit_square_with_hole] * 10
107 | + [triangle_different_name] * 1
108 | )
109 |
110 | data = xr.DataArray(
111 | data=np.array([[0.5, 0.5]]),
112 | dims=["time", "space"],
113 | coords={"space": ["x", "y"]},
114 | )
115 | expected_output = xr.DataArray(
116 | data=np.array([[True]] * 1100 + [[False]] * 10 + [[True]] * 1),
117 | dims=["region", "time"],
118 | coords={
119 | "region": [f"triangle_{i:03d}" for i in range(1000)]
120 | + [f"Unit square_{i:02d}" for i in range(100)]
121 | + [f"Unit square with hole_{i:01d}" for i in range(10)]
122 | + ["pizza_slice"]
123 | },
124 | )
125 | occupancies = compute_region_occupancy(data, regions)
126 | xr.testing.assert_identical(occupancies, expected_output)
127 |
128 |
129 | def test_region_occupancy_multiple_dims(triangle, two_individuals):
130 | """Tests region occupancy for data with common dimensions.
131 |
132 | This test ensures that the 'space' dimension is removed and the 'region'
133 | dimension is added, while all other dimensions ('time', 'keypoints',
134 | 'individuals') are preserved.
135 | """
136 | regions = [triangle, triangle, triangle]
137 | occupancies = compute_region_occupancy(two_individuals, regions)
138 |
139 | input_dims = set(two_individuals.dims)
140 | output_dims = set(occupancies.dims)
141 | shared_dims = input_dims & output_dims
142 |
143 | assert shared_dims == {"time", "keypoints", "individuals"}
144 | assert input_dims - output_dims == {"space"} # 'space' is removed
145 | assert output_dims - input_dims == {"region"} # 'region' is added
146 | assert occupancies.region.shape == (len(regions),)
147 |
--------------------------------------------------------------------------------
/tests/test_unit/test_reports.py:
--------------------------------------------------------------------------------
1 | from contextlib import nullcontext as does_not_raise
2 |
3 | import numpy as np
4 | import pytest
5 | import xarray as xr
6 |
7 | from movement.utils.reports import report_nan_values
8 |
9 |
10 | def assert_components_in_report(components, report_str):
11 | """Assert the expected components are in the report string."""
12 | assert all(
13 | component in report_str for component in components.get("expected", [])
14 | ) and all(
15 | component not in report_str
16 | for component in components.get("not_expected", [])
17 | )
18 |
19 |
20 | @pytest.mark.parametrize(
21 | "data, expectations",
22 | [
23 | ("valid_poses_dataset", {"expected": ["No missing points"]}),
24 | ("valid_bboxes_dataset", {"expected": ["No missing points"]}),
25 | (
26 | "valid_poses_dataset_with_nan",
27 | {
28 | "expected": [
29 | "position",
30 | "keypoints",
31 | "centroid",
32 | "left",
33 | "right",
34 | "individuals",
35 | "id_0",
36 | "3/10",
37 | "1/10",
38 | "10/10",
39 | ],
40 | "not_expected": ["id_1"],
41 | },
42 | ),
43 | (
44 | "valid_bboxes_dataset_with_nan",
45 | {
46 | "expected": ["position", "individuals", "id_0", "3/10"],
47 | "not_expected": ["id_1"],
48 | },
49 | ),
50 | ],
51 | )
52 | def test_report_nan_values_full_dataset(data, expectations, request):
53 | """Test that the nan-value reporting function handles full and
54 | valid data with or without NaN values and that the report contains
55 | the correct NaN counts, keypoints, and individuals.
56 | """
57 | da = request.getfixturevalue(data).position
58 | report_str = report_nan_values(da)
59 | assert_components_in_report(expectations, report_str)
60 |
61 |
62 | @pytest.mark.parametrize(
63 | "data, selection_fn, expectations",
64 | [
65 | (
66 | "valid_poses_dataset_with_nan",
67 | lambda ds: ds.isel(individuals=0),
68 | {
69 | "expected": [
70 | "centroid",
71 | "left",
72 | "right",
73 | "3/10",
74 | "1/10",
75 | "10/10",
76 | ],
77 | "not_expected": ["id_0"],
78 | },
79 | ),
80 | (
81 | "valid_bboxes_dataset_with_nan",
82 | lambda ds: ds.isel(individuals=0),
83 | {
84 | "expected": ["3/10"],
85 | "not_expected": ["id_0"],
86 | },
87 | ),
88 | (
89 | "valid_poses_dataset_with_nan",
90 | lambda ds: ds.isel(keypoints=0),
91 | {
92 | "expected": ["id_0", "3/10"],
93 | "not_expected": ["centroid"],
94 | },
95 | ),
96 | (
97 | "valid_poses_dataset_with_nan",
98 | lambda ds: ds.isel(individuals=0, keypoints=0),
99 | {
100 | "expected": ["3/10"],
101 | "not_expected": ["centroid", "id_0"],
102 | },
103 | ),
104 | ],
105 | ids=[
106 | "ind_dim_with_ndim_0-poses", # individuals dim is scalar
107 | "ind_dim_with_ndim_0-bboxes", # individuals dim is scalar
108 | "kp_dim_with_ndim_0-poses", # keypoints dim is scalar
109 | "both_dims_with_ndim_0-poses", # both dims are scalar
110 | ],
111 | ) # If ndim=0, the dim coords are not explicitly reported
112 | def test_report_nan_values_scalar_dims(
113 | data, selection_fn, expectations, request
114 | ):
115 | """Test that the nan-value reporting function handles data with
116 | scalar dimensions (i.e. dimension.ndim == 0), for example, when
117 | using ``isel()`` or ``sel()`` to select a single individual or
118 | keypoint.
119 | """
120 | da = selection_fn(request.getfixturevalue(data)).position
121 | report_str = report_nan_values(da)
122 | assert_components_in_report(expectations, report_str)
123 |
124 |
125 | @pytest.mark.parametrize(
126 | "data, fetch_data, expectations",
127 | [
128 | (
129 | "valid_poses_dataset_with_nan",
130 | lambda ds: ds.rename({"space": "other"}),
131 | does_not_raise({"expected": ["position", "other", "x", "y"]}),
132 | ), # count NaNs separately for x and y
133 | (
134 | "simple_data_array_with_nan",
135 | lambda: xr.DataArray([1, np.nan, 3], dims="time"),
136 | does_not_raise({"expected": ["data", "1/3"]}),
137 | ), # generic data array with required time dim
138 | (
139 | "invalid_data_array_with_nan",
140 | lambda: xr.DataArray([1, np.nan, 3], dims="dim1"),
141 | pytest.raises(ValueError, match=".*must contain.*time.*"),
142 | ), # invalid data array without required time dim
143 | ],
144 | ids=["separate_x_y_dims", "simple_data_array", "missing_time_dim"],
145 | )
146 | def test_report_nan_values_arbitrary_dims(
147 | data, fetch_data, expectations, request
148 | ):
149 | """Test that the nan-value reporting function handles data with
150 | arbitrary dimensions as long as the required `time` dimension is
151 | present.
152 | """
153 | da = (
154 | fetch_data(request.getfixturevalue(data).position)
155 | if data == "valid_poses_dataset_with_nan"
156 | else fetch_data()
157 | )
158 | with expectations as e:
159 | report_str = report_nan_values(da)
160 | assert_components_in_report(e, report_str)
161 |
--------------------------------------------------------------------------------
/movement/napari/convert.py:
--------------------------------------------------------------------------------
1 | """Conversion functions from ``movement`` datasets to napari layers."""
2 |
3 | import numpy as np
4 | import pandas as pd
5 | import xarray as xr
6 |
7 |
8 | def _construct_properties_dataframe(ds: xr.Dataset) -> pd.DataFrame:
9 | """Construct a properties DataFrame from a ``movement`` dataset."""
10 | data = {
11 | "individual": ds.coords["individuals"].values,
12 | "time": ds.coords["time"].values,
13 | "confidence": ds["confidence"].values.flatten(),
14 | }
15 | desired_order = list(data.keys())
16 | if "keypoints" in ds.coords:
17 | data["keypoint"] = ds.coords["keypoints"].values
18 | desired_order.insert(1, "keypoint")
19 |
20 | # sort
21 | return pd.DataFrame(data).reindex(columns=desired_order)
22 |
23 |
24 | def _construct_track_and_time_cols(
25 | ds: xr.Dataset,
26 | ) -> tuple[np.ndarray, np.ndarray]:
27 | """Compute napari track_id and time columns from a ``movement`` dataset."""
28 | n_frames = ds.sizes["time"]
29 | n_individuals = ds.sizes["individuals"]
30 | n_keypoints = ds.sizes.get("keypoints", 1)
31 | n_tracks = n_individuals * n_keypoints
32 |
33 | # Each keypoint of each individual is a separate track
34 | track_id_col = np.repeat(np.arange(n_tracks), n_frames).reshape(-1, 1)
35 | time_col = np.tile(np.arange(n_frames), (n_tracks)).reshape(-1, 1)
36 |
37 | return track_id_col, time_col
38 |
39 |
40 | def ds_to_napari_layers(
41 | ds: xr.Dataset,
42 | ) -> tuple[np.ndarray, np.ndarray | None, pd.DataFrame]:
43 | """Convert ``movement`` dataset to napari Tracks array and properties.
44 |
45 | Parameters
46 | ----------
47 | ds : xr.Dataset
48 | ``movement`` dataset containing pose or bounding box tracks,
49 | confidence scores, and associated metadata.
50 |
51 | Returns
52 | -------
53 | points_as_napari : np.ndarray
54 | position data as a napari Tracks array with shape (N, 4),
55 | where N is n_keypoints * n_individuals * n_frames
56 | and the 4 columns are (track_id, frame_idx, y, x).
57 | bboxes_as_napari : np.ndarray | None
58 | bounding box data as a napari Shapes array with shape (N, 4, 4),
59 | where N is n_individuals * n_frames and each (4, 4) entry is
60 | a matrix of 4 rows (1 per corner vertex, starting from upper left
61 | and progressing in counterclockwise order) with the columns
62 | (track_id, frame, y, x). Returns None when the input dataset doesn't
63 | have a "shape" variable.
64 | properties : pd.DataFrame
65 | DataFrame with properties (individual, keypoint, time, confidence)
66 | for use with napari layers.
67 |
68 | Notes
69 | -----
70 | A corresponding napari Points array can be derived from the Tracks array
71 | by taking its last 3 columns: (frame_idx, y, x). See the documentation
72 | on the napari Tracks [1]_ and Points [2]_ layers.
73 |
74 | References
75 | ----------
76 | .. [1] https://napari.org/stable/howtos/layers/tracks.html
77 | .. [2] https://napari.org/stable/howtos/layers/points.html
78 |
79 | """
80 | # Construct the track_ID and time columns for the napari Tracks array
81 | track_id_col, time_col = _construct_track_and_time_cols(ds)
82 |
83 | # Reorder axes to (individuals, keypoints, frames, xy)
84 | axes_reordering: tuple[int, ...] = (2, 0, 1)
85 | if "keypoints" in ds.coords:
86 | axes_reordering = (3,) + axes_reordering
87 | yx_cols = np.transpose(
88 | ds.position.values, # from: frames, xy, keypoints, individuals
89 | axes_reordering, # to: individuals, keypoints, frames, xy
90 | ).reshape(-1, 2)[:, [1, 0]] # swap x and y columns
91 |
92 | points_as_napari = np.hstack((track_id_col, time_col, yx_cols))
93 | bboxes_as_napari = None
94 |
95 | # Construct the napari Shapes array if the input dataset is a
96 | # bounding boxes one
97 | if ds.ds_type == "bboxes":
98 | # Compute bbox corners
99 | xmin_ymin = ds.position - (ds.shape / 2)
100 | xmax_ymax = ds.position + (ds.shape / 2)
101 |
102 | # initialise xmax, ymin corner as xmin, ymin
103 | xmax_ymin = xmin_ymin.copy()
104 | # overwrite its x coordinate to xmax
105 | xmax_ymin.loc[{"space": "x"}] = xmax_ymax.loc[{"space": "x"}]
106 |
107 | # initialise xmin, ymin corner as xmin, ymin
108 | xmin_ymax = xmin_ymin.copy()
109 | # overwrite its y coordinate to ymax
110 | xmin_ymax.loc[{"space": "y"}] = xmax_ymax.loc[{"space": "y"}]
111 |
112 | # Add track_id and time columns to each corner array
113 | corner_arrays_with_track_id_and_time = [
114 | np.c_[
115 | track_id_col,
116 | time_col,
117 | np.transpose(corner.values, axes_reordering).reshape(-1, 2),
118 | ]
119 | for corner in [xmin_ymin, xmin_ymax, xmax_ymax, xmax_ymin]
120 | ]
121 |
122 | # Concatenate corner arrays along columns
123 | corners_array = np.concatenate(
124 | corner_arrays_with_track_id_and_time, axis=1
125 | )
126 |
127 | # Reshape to napari expected format
128 | # goes through corners counterclockwise from xmin_ymin
129 | # in image coordinates
130 | corners_array = corners_array.reshape(
131 | -1, 4, 4
132 | ) # last dimension: track_id, time, x, y
133 | bboxes_as_napari = corners_array[
134 | :, :, [0, 1, 3, 2]
135 | ] # swap x and y columns
136 |
137 | # Construct the properties DataFrame
138 | # Stack individuals, time and keypoints (if present) dimensions
139 | # into a new single dimension named "tracks"
140 | dimensions_to_stack: tuple[str, ...] = ("individuals", "time")
141 | if "keypoints" in ds.coords:
142 | dimensions_to_stack += ("keypoints",) # add last
143 | ds_ = ds.stack(tracks=sorted(dimensions_to_stack))
144 |
145 | properties = _construct_properties_dataframe(ds_)
146 |
147 | return points_as_napari, bboxes_as_napari, properties
148 |
--------------------------------------------------------------------------------
/movement/utils/logging.py:
--------------------------------------------------------------------------------
1 | """Logging utilities for the ``movement`` package."""
2 |
3 | import inspect
4 | import json
5 | import sys
6 | import warnings
7 | from datetime import datetime
8 | from functools import wraps
9 | from pathlib import Path
10 |
11 | from loguru import logger as loguru_logger
12 |
13 | DEFAULT_LOG_DIRECTORY = Path.home() / ".movement"
14 |
15 |
16 | class MovementLogger:
17 | """A custom logger extending the :mod:`loguru logger `."""
18 |
19 | def __init__(self):
20 | """Initialize the logger with the :mod:`loguru._logger`."""
21 | self.logger = loguru_logger
22 |
23 | def configure(
24 | self,
25 | log_file_name: str = "movement",
26 | log_directory: Path = DEFAULT_LOG_DIRECTORY,
27 | console: bool = True,
28 | ):
29 | """Configure a rotating file logger and optionally a console logger.
30 |
31 | This method configures a rotating log file that
32 | logs at the DEBUG level with a maximum size of 5 MB
33 | and retains the last log file.
34 | It also optionally adds a console (:data:`sys.stderr`) handler
35 | that logs at the WARNING level.
36 | Finally, it redirects warnings from the :mod:`warnings` module
37 | to the logger.
38 |
39 | Parameters
40 | ----------
41 | log_file_name : str, optional
42 | The name of the log file. Defaults to ``"movement"``.
43 | log_directory : pathlib.Path, optional
44 | The directory to store the log file in. Defaults to
45 | ``"~/.movement"``. A different directory can be specified,
46 | for example, for testing purposes.
47 | console : bool, optional
48 | Whether to add a console logger. Defaults to ``True``.
49 |
50 | """
51 | log_directory.mkdir(parents=True, exist_ok=True)
52 | log_file = (log_directory / f"{log_file_name}.log").as_posix()
53 | self.remove()
54 | if console:
55 | self.add(sys.stderr, level="WARNING")
56 | self.add(log_file, level="DEBUG", rotation="5 MB", retention=1)
57 | # Redirect warnings to the logger
58 | warnings.showwarning = showwarning
59 | return log_file
60 |
61 | def _log_and_return_exception(self, log_method, message, *args, **kwargs):
62 | """Log the message and return an Exception if specified."""
63 | log_method(message, *args, **kwargs)
64 | if isinstance(message, Exception):
65 | return message
66 |
67 | def error(self, message, *args, **kwargs):
68 | """Log error message and optionally return an Exception.
69 |
70 | This method overrides loguru's
71 | :meth:`logger.error() ` to optionally
72 | return an Exception if the message is an Exception.
73 | """
74 | return self._log_and_return_exception(
75 | self.logger.error, message, *args, **kwargs
76 | )
77 |
78 | def exception(self, message, *args, **kwargs):
79 | """Log error message with traceback and optionally return an Exception.
80 |
81 | This method overrides loguru's
82 | :meth:`logger.exception() ` to
83 | optionally return an Exception if the message is an Exception.
84 | """
85 | return self._log_and_return_exception(
86 | self.logger.exception, message, *args, **kwargs
87 | )
88 |
89 | def __getattr__(self, name):
90 | """Redirect attribute access to the loguru logger."""
91 | return getattr(self.logger, name)
92 |
93 | def __repr__(self):
94 | """Return the loguru logger's representation."""
95 | return repr(self.logger)
96 |
97 |
98 | logger = MovementLogger()
99 |
100 |
101 | def showwarning(message, category, filename, lineno, file=None, line=None):
102 | """Redirect alerts from the :mod:`warnings` module to the logger.
103 |
104 | This function replaces :func:`logging.captureWarnings` which redirects
105 | warnings issued by the :mod:`warnings` module to the logging system.
106 | """
107 | formatted_message = warnings.formatwarning(
108 | message, category, filename, lineno, line
109 | )
110 | logger.opt(depth=2).warning(formatted_message)
111 |
112 |
113 | def log_to_attrs(func):
114 | """Log the operation performed by the wrapped function.
115 |
116 | This decorator appends log entries to the data's ``log``
117 | attribute. The wrapped function must accept an :class:`xarray.Dataset`
118 | or :class:`xarray.DataArray` as its first argument and return an
119 | object of the same type.
120 | """
121 |
122 | @wraps(func)
123 | def wrapper(*args, **kwargs):
124 | result = func(*args, **kwargs)
125 |
126 | log_entry = {
127 | "operation": func.__name__,
128 | "datetime": str(datetime.now()),
129 | }
130 |
131 | # Extract argument names from the function signature
132 | signature = inspect.signature(func)
133 | bound_args = signature.bind(*args, **kwargs)
134 | bound_args.apply_defaults()
135 |
136 | # Store each argument
137 | # (excluding the first, which is the Dataset/DataArray itself)
138 | for param_name, value in list(bound_args.arguments.items())[1:]:
139 | if param_name == "kwargs" and not value:
140 | continue # Skip empty kwargs
141 | log_entry[param_name] = repr(value)
142 |
143 | if result is not None and hasattr(result, "attrs"):
144 | log_str = result.attrs.get("log", "[]")
145 | try:
146 | log_list = json.loads(log_str)
147 | except json.JSONDecodeError:
148 | log_list = []
149 | logger.warning(
150 | f"Failed to decode existing log in attributes: {log_str}. "
151 | f"Overwriting with an empty list."
152 | )
153 |
154 | log_list.append(log_entry)
155 | result.attrs["log"] = json.dumps(log_list, indent=2)
156 |
157 | return result
158 |
159 | return wrapper
160 |
--------------------------------------------------------------------------------
/movement/roi/polygon.py:
--------------------------------------------------------------------------------
1 | """2-dimensional regions of interest."""
2 |
3 | from __future__ import annotations
4 |
5 | from collections.abc import Sequence
6 | from typing import Any
7 |
8 | import matplotlib.pyplot as plt
9 | import numpy as np
10 | from matplotlib.patches import PathPatch as PltPatch
11 | from matplotlib.path import Path as PltPath
12 |
13 | from movement.roi.base import BaseRegionOfInterest, PointLikeList
14 | from movement.roi.line import LineOfInterest
15 |
16 |
17 | class PolygonOfInterest(BaseRegionOfInterest):
18 | """Representation of a two-dimensional region in the x-y plane.
19 |
20 | This class can be used to represent polygonal regions or subregions
21 | of the area in which the experimental data was gathered. These might
22 | include the arms of a maze, a nesting area, a food source, or other
23 | similar areas of the experimental enclosure that have some significance.
24 |
25 | An instance of this class can be used to represent these regions of
26 | interest (RoIs) in an analysis. The basic usage is to construct an
27 | instance of this class by passing in a list of points, which will then be
28 | joined (in sequence) by straight lines between consecutive pairs of points,
29 | to form the exterior boundary of the RoI. Note that the exterior boundary
30 | (accessible as via the ``.exterior`` property) is a (closed)
31 | :class:`LineOfInterest`, and may be treated
32 | accordingly.
33 |
34 | The class also supports holes - subregions properly contained inside the
35 | region that are not part of the region itself. These can be specified by
36 | the ``holes`` argument, and define the interior boundaries of the region.
37 | These interior boundaries are accessible via the ``.interior_boundaries``
38 | property, and the polygonal regions that make up the holes are accessible
39 | via the ``holes`` property.
40 | """
41 |
42 | def __init__(
43 | self,
44 | exterior_boundary: PointLikeList,
45 | holes: Sequence[PointLikeList] | None = None,
46 | name: str | None = None,
47 | ) -> None:
48 | """Create a new region of interest (RoI).
49 |
50 | Parameters
51 | ----------
52 | exterior_boundary : tuple of (x, y) pairs
53 | The points (in sequence) that make up the boundary of the region.
54 | At least three points must be provided.
55 | holes : sequence of sequences of (x, y) pairs, default None
56 | A sequence of items, where each item will be interpreted as the
57 | ``exterior_boundary`` of an internal hole within the region. See
58 | the ``holes`` argument to ``shapely.Polygon`` for details.
59 | name : str, optional
60 | Name of the RoI that is to be created. A default name will be
61 | inherited from the base class if not provided.
62 |
63 | See Also
64 | --------
65 | movement.roi.BaseRegionOfInterest : The base class that
66 | constructor arguments are passed to, and defaults are inherited
67 | from.
68 |
69 | """
70 | super().__init__(
71 | points=exterior_boundary, dimensions=2, holes=holes, name=name
72 | )
73 |
74 | @property
75 | def _default_plot_args(self) -> dict[str, Any]:
76 | return {
77 | **super()._default_plot_args,
78 | "facecolor": "lightblue",
79 | "edgecolor": "black",
80 | }
81 |
82 | @property
83 | def exterior_boundary(self) -> LineOfInterest:
84 | """The exterior boundary of this RoI."""
85 | return LineOfInterest(
86 | self.region.exterior.coords,
87 | loop=True,
88 | name=f"Exterior boundary of {self.name}",
89 | )
90 |
91 | @property
92 | def holes(self) -> tuple[PolygonOfInterest, ...]:
93 | """The interior holes of this RoI.
94 |
95 | Holes are regions properly contained within the exterior boundary of
96 | the RoI that are not part of the RoI itself (like the centre of a
97 | doughnut, for example). A region with no holes returns the empty tuple.
98 | """
99 | return tuple(
100 | PolygonOfInterest(
101 | int_boundary.coords, name=f"Hole {i} of {self.name}"
102 | )
103 | for i, int_boundary in enumerate(self.region.interiors)
104 | )
105 |
106 | @property
107 | def interior_boundaries(self) -> tuple[LineOfInterest, ...]:
108 | """The interior boundaries of this RoI.
109 |
110 | Interior boundaries are the boundaries of holes contained within the
111 | polygon. A region with no holes returns the empty tuple.
112 | """
113 | return tuple(
114 | LineOfInterest(
115 | int_boundary.coords,
116 | loop=True,
117 | name=f"Interior boundary {i} of {self.name}",
118 | )
119 | for i, int_boundary in enumerate(self.region.interiors)
120 | )
121 |
122 | def _plot(
123 | self, fig: plt.Figure, ax: plt.Axes, **matplotlib_kwargs
124 | ) -> tuple[plt.Figure, plt.Axes]:
125 | """Polygonal regions need to use patch to be plotted.
126 |
127 | In addition, ``matplotlib`` requires hole coordinates to be listed in
128 | the reverse orientation to the exterior boundary. Running
129 | :func:`numpy.flip` on the exterior coordinates is a cheap way to ensure
130 | that we adhere to this convention, since our geometry is normalised
131 | upon creation, so this amounts to reversing the order of the
132 | coordinates.
133 | """
134 | exterior_boundary_as_path = PltPath(
135 | np.flip(np.asarray(self.exterior_boundary.coords), axis=0)
136 | )
137 | interior_boundaries_as_paths = [
138 | PltPath(np.asarray(ib.coords)) for ib in self.interior_boundaries
139 | ]
140 | path = PltPath.make_compound_path(
141 | exterior_boundary_as_path,
142 | *interior_boundaries_as_paths,
143 | )
144 |
145 | polygon_shape = PltPatch(path, **matplotlib_kwargs)
146 | ax.add_patch(polygon_shape)
147 | ax.autoscale_view(tight=True)
148 | return fig, ax
149 |
--------------------------------------------------------------------------------
/movement/roi/line.py:
--------------------------------------------------------------------------------
1 | """1-dimensional lines of interest."""
2 |
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import xarray as xr
6 | from numpy.typing import ArrayLike
7 |
8 | from movement.roi.base import BaseRegionOfInterest, PointLikeList
9 | from movement.utils.broadcasting import broadcastable_method
10 |
11 |
12 | class LineOfInterest(BaseRegionOfInterest):
13 | """Representation of boundaries or other lines of interest.
14 |
15 | This class can be used to represent boundaries or other internal divisions
16 | of the area in which the experimental data was gathered. These might
17 | include segments of a wall that are removed partway through a behavioural
18 | study, or coloured marking on the floor of the experimental enclosure that
19 | have some significance. Instances of this class also constitute the
20 | boundary of two-dimensional regions (polygons) of interest.
21 |
22 | An instance of this class can be used to represent these "one dimensional
23 | regions" (lines of interest, LoIs) in an analysis. The basic usage is to
24 | construct an instance of this class by passing in a list of points, which
25 | will then be joined (in sequence) by straight lines between consecutive
26 | pairs of points, to form the LoI that is to be studied.
27 | """
28 |
29 | def __init__(
30 | self,
31 | points: PointLikeList,
32 | loop: bool = False,
33 | name: str | None = None,
34 | ) -> None:
35 | """Create a new line of interest (LoI).
36 |
37 | Parameters
38 | ----------
39 | points : tuple of (x, y) pairs
40 | The points (in sequence) that make up the line segment. At least
41 | two points must be provided.
42 | loop : bool, default False
43 | If True, the final point in ``points`` will be connected by an
44 | additional line segment to the first, creating a closed loop.
45 | (See Notes).
46 | name : str, optional
47 | Name of the LoI that is to be created. A default name will be
48 | inherited from the base class if not provided, and
49 | defaults are inherited from.
50 |
51 | Notes
52 | -----
53 | The constructor supports 'rings' or 'closed loops' via the ``loop``
54 | argument. However, if you want to define an enclosed region for your
55 | analysis, we recommend you create a
56 | :class:`PolygonOfInterest`
57 | and use its ``boundary`` property instead.
58 |
59 | See Also
60 | --------
61 | movement.roi.BaseRegionOfInterest
62 | The base class that constructor arguments are passed to.
63 |
64 | """
65 | super().__init__(points, dimensions=1, closed=loop, name=name)
66 |
67 | def _plot(
68 | self, fig: plt.Figure, ax: plt.Axes, **matplotlib_kwargs
69 | ) -> tuple[plt.Figure, plt.Axes]:
70 | """LinesOfInterest can simply be plotted as lines."""
71 | ax.plot(
72 | [c[0] for c in self.coords],
73 | [c[1] for c in self.coords],
74 | **matplotlib_kwargs,
75 | )
76 | return fig, ax
77 |
78 | @broadcastable_method(
79 | only_broadcastable_along="space", new_dimension_name="normal"
80 | )
81 | def normal(self, on_same_side_as: ArrayLike = (0.0, 0.0)) -> np.ndarray:
82 | """Compute the unit normal to this line.
83 |
84 | The unit normal is a vector perpendicular to the input line
85 | whose norm is equal to 1. The direction of the normal vector
86 | is not fully defined: the line divides the 2D plane in two
87 | halves, and the normal could be pointing to either of the half-planes.
88 | For example, a horizontal line divides the 2D plane in a
89 | bottom and a top half-plane, and we can choose whether
90 | the normal points "upwards" or "downwards". We use a sample
91 | point to define the half-plane the normal vector points to.
92 |
93 | If this is a multi-segment line, the method raises an error.
94 |
95 | Parameters
96 | ----------
97 | on_same_side_as : ArrayLike
98 | A sample point in the (x,y) plane the normal is in. If multiple
99 | points are given, one normal vector is returned for each point
100 | given. By default, the origin is used.
101 |
102 | Raises
103 | ------
104 | ValueError : When the normal is requested for a multi-segment geometry.
105 |
106 | """
107 | # A multi-segment geometry always has at least 3 coordinates.
108 | if len(self.coords) > 2:
109 | raise ValueError(
110 | "Normal is not defined for multi-segment geometries."
111 | )
112 |
113 | on_same_side_as = np.array(on_same_side_as)
114 |
115 | parallel_to_line = np.array(self.region.coords[1]) - np.array(
116 | self.region.coords[0]
117 | )
118 | normal = np.array([parallel_to_line[1], -parallel_to_line[0]])
119 | normal /= np.sqrt(np.sum(normal**2))
120 |
121 | if np.dot((on_same_side_as - self.region.coords[0]), normal) < 0:
122 | normal *= -1.0
123 | return normal
124 |
125 | def compute_angle_to_normal(
126 | self,
127 | direction: xr.DataArray,
128 | position: xr.DataArray,
129 | in_degrees: bool = False,
130 | ) -> xr.DataArray:
131 | """Compute the angle between the normal to the segment and a direction.
132 |
133 | The returned angle is the signed angle between the normal to the
134 | segment and the ``direction`` vector(s) provided.
135 |
136 | Parameters
137 | ----------
138 | direction : xarray.DataArray
139 | An array of vectors representing a given direction,
140 | e.g., the forward vector(s).
141 | position : xr.DataArray
142 | Spatial positions, considered the origin of the ``direction``.
143 | in_degrees : bool
144 | If ``True``, angles are returned in degrees. Otherwise angles are
145 | returned in radians. Default ``False``.
146 |
147 | See Also
148 | --------
149 | movement.utils.vector.compute_signed_angle_2d :
150 | For the definition of the signed angle between two vectors.
151 |
152 | """
153 | return self._boundary_angle_computation(
154 | position=position,
155 | reference_vector=direction,
156 | how_to_compute_vector_to_region=lambda p: self._reassign_space_dim(
157 | -1.0 * self.normal(p), "normal"
158 | ),
159 | in_degrees=in_degrees,
160 | )
161 |
--------------------------------------------------------------------------------
|