├── .github
└── workflows
│ ├── build-readme-pypi.yml
│ ├── ci.yml
│ └── docs.yml
├── .gitignore
├── .pylintrc
├── .zenodo.json
├── CITATION.cff
├── LICENSE
├── README-pypi.md
├── README.md
├── assets
├── DeeepTrack2-logo.svg
├── DeepTrack2-logo.png
├── MPT-packed.gif
├── SPT-ideal.gif
├── SPT-noisy.gif
├── Tracing.gif
├── interface_v2.mp4
└── strip_gh_only_blocks.py
├── deeptrack
├── __init__.py
├── aberrations.py
├── augmentations.py
├── backend
│ ├── __init__.py
│ ├── _config.py
│ ├── array_api_compat_ext
│ │ ├── __init__.py
│ │ └── torch
│ │ │ ├── __init__.py
│ │ │ └── random.py
│ ├── core.py
│ ├── mie.py
│ ├── pint_definition.py
│ ├── polynomials.py
│ └── units.py
├── deeplay
│ └── __init__.py
├── elementwise.py
├── extras
│ ├── __init__.py
│ └── radialcenter.py
├── features.py
├── holography.py
├── image.py
├── math.py
├── noises.py
├── optics.py
├── properties.py
├── pytorch
│ ├── __init__.py
│ ├── data.py
│ └── features.py
├── scatterers.py
├── sequences.py
├── sources
│ ├── __init__.py
│ ├── base.py
│ ├── folder.py
│ └── rng.py
├── statistics.py
├── tests
│ ├── __init__.py
│ ├── backend
│ │ ├── __init__.py
│ │ ├── test__config.py
│ │ ├── test_core.py
│ │ ├── test_mie.py
│ │ └── test_polynomials.py
│ ├── extras
│ │ ├── __init__.py
│ │ └── test_radialcenter.py
│ ├── test_aberrations.py
│ ├── test_augmentations.py
│ ├── test_elementwise.py
│ ├── test_features.py
│ ├── test_holography.py
│ ├── test_image.py
│ ├── test_math.py
│ ├── test_noises.py
│ ├── test_optics.py
│ ├── test_properties.py
│ ├── test_scatterers.py
│ ├── test_sequences.py
│ ├── test_statistics.py
│ └── test_utils.py
├── types.py
└── utils.py
├── package.json
├── pyproject.toml
├── requirements.txt
├── setup.cfg
├── setup.py
└── tutorials
├── 1-getting-started
├── DTGS101_intro.ipynb
├── DTGS106_particle_image_modalities.ipynb
├── DTGS111_datafiles.ipynb
├── DTGS121_tracking_particle_cnn.ipynb
├── DTGS126_characterizing_aberrations_cnn.ipynb
├── DTGS127_characterizing_aberrations_optuna.ipynb
├── DTGS131_tracking_multiple_particles_unet.ipynb
├── DTGS141_distinguishing_particles_in_brightfield.ipynb
├── DTGS151_unsupervised_object_detection_with_lodestar.ipynb
└── DTGS161_torch_fitting.ipynb
├── 2-examples
├── DTEx201_MNIST.ipynb
├── DTEx202_single_particle_tracking.ipynb
├── DTEx203_particle_sizing.ipynb
├── DTEx204_multi_molecule_tracking.ipynb
├── DTEx205_inline_holography_3d_tracking.ipynb
├── DTEx206_cell_counting.ipynb
├── DTEx207_GAN_image_generation.ipynb
├── DTEx231A_LodeSTAR_autotracker_template.ipynb
├── DTEx231B_LodeSTAR_tracking_particles_of_various_shapes.ipynb
├── DTEx231C_LodeSTAR_measure_mass_experimental.ipynb
├── DTEx231D_LodeSTAR_track_BF-C2DL-HSC.ipynb
├── DTEx231E_LodeSTAR_track_Fluo-C2DL-Huh7.ipynb
├── DTEx231F_LodeSTAR_track_PhC-C2DL-PSC.ipynb
├── DTEx231G_LodeSTAR_track_plankton.ipynb
├── DTEx231H_LodeSTAR_track_3D_holography.ipynb
├── DTEx231I_LodeSTAR_measure_mass_simulated.ipynb
├── DTEx231J_LodeSTAR_measure_mass_cell.ipynb
├── DTEx241A_MAGIK_cell_migration_analysis.ipynb
├── DTEx241B_MAGIK_tracking_hela_cells.ipynb
├── assets
│ └── overview.png
└── readme_to_incorporate into MAGIK examples.md
├── 3-advanced-topics
├── DTAT301_features.ipynb
├── DTAT306_properties.ipynb
├── DTAT311_image.ipynb
├── DTAT321_scatterers.ipynb
├── DTAT323_optics.ipynb
├── DTAT324_holography.ipynb
├── DTAT325_aberrations.ipynb
├── DTAT327_noises.ipynb
├── DTAT329_augmentations.ipynb
├── DTAT341_sequences.ipynb
├── DTAT381_math.ipynb
├── DTAT383_utils.ipynb
├── DTAT385_statistics.ipynb
├── DTAT387_types.ipynb
├── DTAT389_elementwise.ipynb
├── DTAT391A_sources.base.ipynb
├── DTAT391B_sources.folder.ipynb
├── DTAT391C_sources.rng.ipynb
├── DTAT393A_pytorch.features.ipynb
├── DTAT393B_pytorch.data.ipynb
├── DTAT395_extras.radialcenter.ipynb
├── DTAT399A_backend.core.ipynb
├── DTAT399B_backend.pint_definition.ipynb
├── DTAT399C_backend.units.ipynb
├── DTAT399D_backend.polynomials.ipynb
├── DTAT399E_backend.mie.ipynb
└── DTAT399F_backend._config.ipynb
├── 4-developers
├── DTDV401_overview.ipynb
├── DTDV411_style.ipynb
└── DTDV421_backends.ipynb
└── test_notebooks.py
/.github/workflows/build-readme-pypi.yml:
--------------------------------------------------------------------------------
1 | name: Build PyPI README
2 |
3 | # Required so the Actions token can push code back to the repo
4 | permissions:
5 | contents: write
6 |
7 | on:
8 | push:
9 | branches: ["develop"]
10 | pull_request:
11 | branches: ["develop"]
12 |
13 | jobs:
14 | build-readme:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - name: Check out code
18 | uses: actions/checkout@v3
19 | with:
20 | persist-credentials: true # Ensures we can push changes
21 |
22 | - name: Set up Python
23 | uses: actions/setup-python@v4
24 | with:
25 | python-version: 3.9
26 |
27 | - name: Install dependencies
28 | run: |
29 | pip install --upgrade pip
30 | # (Optional) pip install -r requirements.txt
31 |
32 | - name: Generate PyPI README
33 | run: |
34 | python assets/strip_gh_only_blocks.py
35 | ls -lah README-pypi.md # Confirm file is generated
36 |
37 | - name: Commit and push changes
38 | run: |
39 | # Configure git
40 | git config user.name "github-actions"
41 | git config user.email "github-actions@github.com"
42 |
43 | # Stage the new/updated README-pypi.md
44 | git add README-pypi.md
45 |
46 | # Commit only if there are changes
47 | if ! git diff --cached --exit-code; then
48 | git commit -m "Auto-update README-pypi.md"
49 | git push origin HEAD:develop
50 | else
51 | echo "No changes to commit."
52 | fi
53 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: Python package
5 |
6 | on:
7 | push:
8 | branches: ["develop"]
9 | pull_request:
10 | branches: ["develop"]
11 |
12 | jobs:
13 | build:
14 | runs-on: ${{ matrix.os }}
15 | strategy:
16 | fail-fast: false
17 | matrix:
18 | python-version: ["3.9", "3.10", "3.11", "3.12"]
19 | os: [ubuntu-latest, macos-latest, windows-latest]
20 | install-deeplay: ["", "deeplay"]
21 |
22 | if:
23 | steps:
24 | - uses: actions/checkout@v3
25 | - name: Set up Python ${{ matrix.python-version }}
26 | uses: actions/setup-python@v3
27 | with:
28 | python-version: ${{ matrix.python-version }}
29 | - name: Install dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | python -m pip install flake8
33 | python -m pip install -e .
34 | - name: Install deeplay
35 | if: ${{ matrix.install-deeplay == 'deeplay' }}
36 | run: |
37 | python -m pip install deeplay
38 | - name: Lint with flake8
39 | run: |
40 | # stop the build if there are Python syntax errors or undefined names
41 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
42 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
43 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
44 | - name: Test with unittest
45 | run: |
46 | python -m unittest discover -v deeptrack.tests
47 |
48 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | name: Update Documentation
2 |
3 | on:
4 | release:
5 | types:
6 | - published
7 | workflow_dispatch:
8 | inputs:
9 | test_tag:
10 | description: "Release tag to simulate"
11 | required: true
12 |
13 | jobs:
14 | update-docs:
15 | name: Update Documentation
16 | runs-on: ubuntu-latest
17 |
18 | steps:
19 | # Step 1: Check out the docs branch
20 | - name: Checkout docs branch
21 | uses: actions/checkout@v3
22 | with:
23 | ref: docs
24 |
25 | # Step 2: Set up Python
26 | - name: Set up Python
27 | uses: actions/setup-python@v4
28 | with:
29 | python-version: "3.9"
30 |
31 | # Step 3: Install dependencies
32 | - name: Install dependencies
33 | run: |
34 | python -m pip install --upgrade pip
35 | pip install -r doc_requirements.txt
36 |
37 | # Step 4: Pull the release code into a separate directory
38 | - name: Checkout release code
39 | uses: actions/checkout@v3
40 | with:
41 | path: release-code
42 | # Use the test tag from workflow_dispatch or the actual release tag
43 | ref: ${{ github.event.inputs.test_tag || github.event.release.tag_name }}
44 |
45 | - name: Install the package
46 | run: |
47 | cd release-code
48 | pip install -e .
49 |
50 | - name: Create the markdown files
51 | run: |
52 | python generate_doc_markdown.py deeptrack --exclude=tests,test,pytorch,deeplay
53 |
54 | # Step 5: Set version variable
55 | - name: Set version variable
56 | run: |
57 | VERSION=${{ github.event.inputs.test_tag || github.event.release.tag_name }}
58 | echo "VERSION=$VERSION" >> $GITHUB_ENV
59 |
60 | # Step 6: Update switcher.json
61 | - name: Update switcher.json
62 | run: |
63 | SWITCHER_FILE=_static/switcher.json
64 | jq --arg version "$VERSION" \
65 | '. |= [{"name": $version, "version": $version, "url": "https://DeepTrackAI.github.io/DeepTrack2/\($version)/"}] + .' \
66 | $SWITCHER_FILE > temp.json && mv temp.json $SWITCHER_FILE
67 |
68 | # Step 7: Build documentation using Sphinx into html
69 | - name: Build documentation
70 | env:
71 | SPHINX_APIDOC_DIR: release-code
72 | run: make html
73 |
74 | # Step 8: Copy built HTML to `docs/latest` and `docs/{version}`
75 | - name: Copy built HTML
76 | run: |
77 | mkdir -p docs/latest
78 | mkdir -p docs/$VERSION
79 | cp -r _build/html/* docs/latest/
80 | cp -r _build/html/* docs/$VERSION/
81 |
82 | # Step 9: Clean up `release-code` directory
83 | - name: Remove release-code directory
84 | run: rm -rf release-code
85 |
86 | # Step 10: Commit and push changes
87 | - name: Commit and push changes
88 | run: |
89 | git config user.name "github-actions[bot]"
90 | git config user.email "github-actions[bot]@users.noreply.github.com"
91 | git add docs/latest docs/$VERSION _static/switcher.json
92 | git commit -m "Update docs for release $VERSION"
93 | git push
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | trial*.ipynb
2 |
3 | **/.ipynb_checkpoints
4 | **/__pycache__
5 | deeptrack-app/*
6 |
7 | lightning_logs
8 |
9 | paper-examples/models/*
10 | tutorials/3-advanced-topics/dummy_directory/
11 |
12 | build/*
13 | dist/*
14 | *.egg-info/
15 | */theory
16 | _src/build/**/*
17 |
18 | ParticleSizing
19 | 3DTracking
20 | CellData
21 | ParticleTracking
22 | data/
23 | examples/**/*/models/
24 | **/node_modules/
25 |
26 | *_dataset/
27 |
28 | .DS_Store
29 |
--------------------------------------------------------------------------------
/.zenodo.json:
--------------------------------------------------------------------------------
1 | {
2 | "creators": [
3 | {
4 | "orcid": "0000-0001-9386-4753",
5 | "affiliation": "Gothenburg University",
6 | "name": "Midtvedt, Benjamin"
7 | },
8 | {
9 | "orcid": "0000-0002-9197-3451",
10 | "affiliation": "Gothenburg University",
11 | "name": "Pineda, Jesus"
12 | },
13 | {
14 | "orcid": "0000-0001-7275-6921",
15 | "affiliation": "Chalmers University of Technology",
16 | "name": "Klein Morberg, Henrik"
17 | },
18 | {
19 | "orcid": "0000-0002-8625-0996",
20 | "affiliation": "University of Vic",
21 | "name": "Manzo, Carlo"
22 | },
23 | {
24 | "orcid": "0000-0001-5057-1846",
25 | "affiliation": "Gothenburg University",
26 | "name": "Volpe, Giovanni"
27 | }
28 | ],
29 |
30 | "title": "DeepTrack2",
31 |
32 | "related_identifiers": [
33 | {
34 | "scheme": "doi",
35 | "identifier": "10.1063/5.0034891",
36 | "relation": "isDocumentedBy",
37 | "resource_type": "publication-article"
38 | }
39 | ],
40 |
41 | "description": "A Python software platform for microscopy enhanced by deep learning." ,
42 |
43 | "keywords": ["Deep Learning", "Software", "Microscopy", "Particle Tracking", "Python"],
44 |
45 | "upload_type": "software",
46 |
47 | "communities": [
48 | {"identifier": "www.deeptrack.org"},
49 | {"identifier": "https://github.com/softmatterlab/DeepTrack2"}
50 | ]
51 | }
52 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | # This CITATION.cff file was generated with cffinit.
2 | # Visit https://bit.ly/cffinit to generate yours today!
3 |
4 | cff-version: 1.2.0
5 | title: DeepTrack2
6 | message: >-
7 | If you use this software, please cite it through
8 | this publication: Benjamin Midtvedt, Saga
9 | Helgadottir, Aykut Argun, Jesús Pineda, Daniel
10 | Midtvedt, Giovanni Volpe. "Quantitative Digital
11 | Microscopy with Deep Learning." Applied Physics
12 | Reviews 8 (2021), 011310.
13 | https://doi.org/10.1063/5.0034891
14 | type: software
15 | authors:
16 | - given-names: Benjamin
17 | family-names: Midtvedt
18 | orcid: 'https://orcid.org/0000-0001-9386-4753'
19 | - given-names: Jesus
20 | family-names: Pineda
21 | orcid: 'https://orcid.org/0000-0002-9197-3451'
22 | - given-names: Henrik
23 | family-names: Klein Moberg
24 | orcid: 'https://orcid.org/0000-0001-7275-6921'
25 | - given-names: Harshith
26 | family-names: Bachimanchi
27 | orcid: 'https://orcid.org/0000-0001-9497-8410'
28 | - given-names: Carlo
29 | family-names: Manzo
30 | orcid: 'https://orcid.org/0000-0002-8625-0996'
31 | - given-names: Giovanni
32 | family-names: Volpe
33 | orcid: 'https://orcid.org/0000-0001-5057-1846'
34 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Soft Matter Lab
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/assets/DeepTrack2-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepTrackAI/DeepTrack2/c1ad44b7a0a2e3f38e054d14e13332a5609a953b/assets/DeepTrack2-logo.png
--------------------------------------------------------------------------------
/assets/MPT-packed.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepTrackAI/DeepTrack2/c1ad44b7a0a2e3f38e054d14e13332a5609a953b/assets/MPT-packed.gif
--------------------------------------------------------------------------------
/assets/SPT-ideal.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepTrackAI/DeepTrack2/c1ad44b7a0a2e3f38e054d14e13332a5609a953b/assets/SPT-ideal.gif
--------------------------------------------------------------------------------
/assets/SPT-noisy.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepTrackAI/DeepTrack2/c1ad44b7a0a2e3f38e054d14e13332a5609a953b/assets/SPT-noisy.gif
--------------------------------------------------------------------------------
/assets/Tracing.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepTrackAI/DeepTrack2/c1ad44b7a0a2e3f38e054d14e13332a5609a953b/assets/Tracing.gif
--------------------------------------------------------------------------------
/assets/interface_v2.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepTrackAI/DeepTrack2/c1ad44b7a0a2e3f38e054d14e13332a5609a953b/assets/interface_v2.mp4
--------------------------------------------------------------------------------
/assets/strip_gh_only_blocks.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | def main():
4 | """Create README-pypi.md from README.md."""
5 |
6 | with open("README.md", "r", encoding="utf-8") as f:
7 | text = f.read()
8 |
9 | # Regex to remove content between special markers
10 | pattern = r".*?"
11 | cleaned_text = re.sub(pattern, "", text, flags=re.DOTALL)
12 |
13 | with open("README-pypi.md", "w", encoding="utf-8") as f:
14 | f.write(cleaned_text)
15 |
16 | print("README-pypi.md has been generated successfully!")
17 |
18 | if __name__ == "__main__":
19 | main()
20 |
--------------------------------------------------------------------------------
/deeptrack/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | import lazy_import
3 | from typing import TYPE_CHECKING
4 |
5 | from pint import UnitRegistry
6 | from deeptrack.backend.pint_definition import pint_definitions
7 |
8 | import warnings
9 | import importlib.util
10 |
11 | # Check if TensorFlow is installed and issues a compatibility warning.
12 | tensorflow_installed = importlib.util.find_spec("tensorflow") is not None
13 | if tensorflow_installed:
14 | warnings.warn(
15 | (
16 | "TensorFlow is detected in your environment. "
17 | "DeepTrack2 version 2.0++ no longer supports TensorFlow. "
18 | "If you need TensorFlow support, "
19 | "please install the legacy version 1.7 of DeepTrack2:\n\n"
20 | " pip install deeptrack==1.7\n\n"
21 | "For more details, refer to the DeepTrack documentation."
22 | ),
23 | UserWarning,
24 | )
25 |
26 | # Create a unit registry with custom pixel-related units.
27 | units = UnitRegistry(pint_definitions.split("\n"))
28 |
29 | from deeptrack.backend import *
30 |
31 | from deeptrack.features import *
32 | from deeptrack.aberrations import *
33 | from deeptrack.augmentations import *
34 |
35 | from deeptrack.math import *
36 | from deeptrack.noises import *
37 | from deeptrack.optics import *
38 | from deeptrack.scatterers import *
39 | from deeptrack.sequences import *
40 | from deeptrack.elementwise import *
41 | from deeptrack.statistics import *
42 | from deeptrack.holography import *
43 |
44 | from deeptrack.image import strip
45 |
46 |
47 | # if not HAS_TORCH:
48 | pytorch = lazy_import.lazy_module("deeptrack.pytorch")
49 | deeplay = lazy_import.lazy_module("deeptrack.deeplay")
50 |
51 |
52 | if TYPE_CHECKING:
53 | from . import generators
54 | from . import models
55 | from . import datasets
56 | from . import losses
57 | from . import layers
58 | from . import visualization
59 | from . import pytorch
60 | from . import deeplay
61 |
62 | from deeptrack import tests
63 |
64 | from deeptrack import (
65 | image,
66 | utils,
67 | backend,
68 | # Fake imports for IDE autocomplete
69 | # Does not actually import anything
70 | )
71 |
--------------------------------------------------------------------------------
/deeptrack/backend/__init__.py:
--------------------------------------------------------------------------------
1 | from deeptrack.backend._config import *
2 | from deeptrack.backend.core import *
3 |
4 | __all__ = [
5 | "config", # deeptrack.backend._config
6 | "OPENCV_AVAILABLE", # deeptrack.backend._config
7 | "TORCH_AVAILABLE", # deeptrack.backend._config
8 | "xp", # deeptrack.backend._config
9 | "DeepTrackDataDict", # deeptrack.backend.core
10 | "DeepTrackDataObject", # deeptrack.backend.core
11 | "DeepTrackNode", # deeptrack.backend.core
12 | ]
13 |
--------------------------------------------------------------------------------
/deeptrack/backend/array_api_compat_ext/__init__.py:
--------------------------------------------------------------------------------
1 | from array_api_compat import torch as apctorch
2 | from deeptrack.backend.array_api_compat_ext.torch import random
3 |
4 | # NumPy and PyTorch random functions are incompatible with each other.
5 | # The current array_api_compat module does not fix this incompatibility.
6 | # So we implement our own patch, which implements a numpy-compatible interface
7 | # for the torch random functions.
8 | apctorch.random = random
9 |
--------------------------------------------------------------------------------
/deeptrack/backend/array_api_compat_ext/torch/__init__.py:
--------------------------------------------------------------------------------
1 | from deeptrack.backend.array_api_compat_ext.torch import random
2 |
3 |
4 | __all__ = ["random"]
5 |
--------------------------------------------------------------------------------
/deeptrack/backend/array_api_compat_ext/torch/random.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import torch
4 |
5 | __all__ = [
6 | "rand",
7 | "random",
8 | "random_sample",
9 | "randn",
10 | "beta",
11 | "binomial",
12 | "choice",
13 | "multinomial",
14 | "randint",
15 | "shuffle",
16 | "uniform",
17 | "normal",
18 | "poisson",
19 | ]
20 |
21 |
22 | def rand(*args: int) -> torch.Tensor:
23 | return torch.rand(*args)
24 |
25 |
26 | def random(size: tuple[int, ...] | None = None) -> torch.Tensor:
27 | return torch.rand(*size) if size else torch.rand()
28 |
29 |
30 | def random_sample(size: tuple[int, ...] | None = None) -> torch.Tensor:
31 | return torch.rand(*size) if size else torch.rand()
32 |
33 |
34 | def randn(*args: int) -> torch.Tensor:
35 | return torch.randn(*args)
36 |
37 |
38 | def beta(
39 | a: float,
40 | b: float,
41 | size: tuple[int, ...] | None = None,
42 | ) -> torch.Tensor:
43 | raise NotImplementedError("the beta distribution is not implemented in torch")
44 |
45 |
46 | def binomial(
47 | n: int,
48 | p: float,
49 | size: tuple[int, ...] | None = None,
50 | ) -> torch.Tensor:
51 | return torch.bernoulli(torch.full(size, p))
52 |
53 |
54 | def choice(
55 | a: torch.Tensor,
56 | size: tuple[int, ...] | None = None,
57 | replace: bool = True,
58 | p: torch.Tensor | None = None,
59 | ) -> torch.Tensor:
60 | raise NotImplementedError(
61 | "the choice function is not implemented in torch"
62 | )
63 |
64 |
65 | def multinomial(
66 | n: int,
67 | pvals: torch.Tensor,
68 | size: tuple[int, ...] | None = None,
69 | ) -> torch.Tensor:
70 | return torch.multinomial(pvals, n, size)
71 |
72 |
73 | def randint(
74 | low: int,
75 | high: int,
76 | size: tuple[int, ...] | None = None,
77 | ) -> torch.Tensor:
78 | return torch.randint(low, high, size)
79 |
80 |
81 | def shuffle(x: torch.Tensor) -> torch.Tensor:
82 | return x[torch.randperm(x.shape[0])]
83 |
84 |
85 | def uniform(
86 | low: float,
87 | high: float,
88 | size: tuple[int, ...] | None = None,
89 | ) -> torch.Tensor:
90 | return torch.rand(*size) * (high - low) + low
91 |
92 |
93 | def normal(
94 | loc: float,
95 | scale: float,
96 | size: tuple[int, ...] | None = None,
97 | ) -> torch.Tensor:
98 | return torch.randn(*size) * scale + loc
99 |
100 |
101 | def poisson(
102 | lam: float,
103 | size: tuple[int, ...] | None = None,
104 | ) -> torch.Tensor:
105 | return torch.poisson(torch.full(size, lam))
106 |
107 |
108 | # TODO: implement the rest of the functions as they are needed
109 |
--------------------------------------------------------------------------------
/deeptrack/backend/mie.py:
--------------------------------------------------------------------------------
1 | """Mie scattering calculations.
2 |
3 | This module provides functions to perform Mie scattering calculations,
4 | including computation of spherical harmonics coefficients and related
5 | operations.
6 |
7 | Module Structure
8 | -----------------
9 | Functions:
10 |
11 | - `coefficients`: Coefficients for spherical harmonics.
12 | - `stratified_coefficients`: Coefficients for stratified spherical harmonics.
13 | - `harmonics`: Evaluates spherical harmonics of the Mie field.
14 |
15 | Example
16 | -------
17 | Define the parameters of the particle and the Mie scattering:
18 |
19 | >>> relative_refract_index = 1.5 + 0.01j
20 | >>> particle_radius = 0.5
21 | >>> max_order = 5
22 |
23 | Calculate Mie coefficients for a solid particle:
24 |
25 | >>> from deeptrack.backend import mie
26 | >>> A, B = mie.coefficients(relative_refract_index, particle_radius, max_order)
27 |
28 | Print them:
29 |
30 | >>> print("A coefficients:", A)
31 | >>> print("B coefficients:", B)
32 |
33 | """
34 |
35 | from typing import List, Tuple, Union
36 |
37 | import numpy as np
38 |
39 | from .polynomials import (
40 | ricbesh, ricbesy, ricbesj, dricbesh, dricbesj, dricbesy
41 | )
42 |
43 |
44 | def coefficients(
45 | m: Union[float, complex],
46 | a: float,
47 | L: int,
48 | ) -> Tuple[np.ndarray, np.ndarray]:
49 | """Calculate the Mie scattering coefficients for a spherical particle.
50 |
51 | These coefficients are used in the computation of the scattering
52 | and absorption of light by the particle. The terms up to (and including)
53 | order L are calculated using Riccati-Bessel polynomials.
54 |
55 | Parameters
56 | ----------
57 | m : float or complex
58 | The relative refractive index of the particle n_particle / n_medium.
59 | a : float
60 | The radius of the particle (> 0).
61 | L : int
62 | The maximum order of the spherical harmonics to be calculated.
63 |
64 | Returns
65 | -------
66 | Tuple[np.ndarray, np.ndarray]
67 | A tuple containing two arrays of complex numbers, A and B, which
68 | are the Mie scattering coefficients up to (and including) order L.
69 |
70 | """
71 |
72 | A = np.zeros((L,), dtype=np.complex128)
73 | B = np.zeros((L,), dtype=np.complex128)
74 |
75 | for l in range(1, L + 1):
76 | Sx = ricbesj(l, a)
77 | dSx = dricbesj(l, a)
78 | Smx = ricbesj(l, m * a)
79 | dSmx = dricbesj(l, m * a)
80 | xix = ricbesh(l, a)
81 | dxix = dricbesh(l, a)
82 |
83 | A[l - 1] = (
84 | (m * Smx * dSx - Sx * dSmx)
85 | /
86 | (m * Smx * dxix - xix * dSmx)
87 | )
88 | B[l - 1] = (
89 | (Smx * dSx - m * Sx * dSmx)
90 | /
91 | (Smx * dxix - m * xix * dSmx)
92 | )
93 |
94 | return A, B
95 |
96 |
97 | def stratified_coefficients(
98 | m: List[complex],
99 | a: List[float],
100 | L: int,
101 | ) -> Tuple[np.ndarray, np.ndarray]:
102 | """Calculate the Mie scattering coefficients for stratified spherical
103 | particles.
104 |
105 | This function calculates the terms up to (and including) order L using
106 | Riccati-Bessel polynomials.
107 |
108 | Parameters
109 | ----------
110 | m : List[float or complex]
111 | The relative refractive indices of the particle layers
112 | (n_particle / n_medium).
113 | a : List[float]
114 | The radii of the particle layers (> 0).
115 | L : int
116 | The maximum order of the spherical harmonics to be calculated.
117 |
118 | Returns
119 | -------
120 | Tuple[np.ndarray, np.ndarray]
121 | A tuple containing arrays of coefficients an and bn, up to (and
122 | including) order L.
123 |
124 | """
125 | n_layers = len(a)
126 |
127 | if n_layers == 1:
128 | return coefficients(m[0], a[0], L)
129 |
130 | an = np.zeros((L,), dtype=np.complex128)
131 | bn = np.zeros((L,), dtype=np.complex128)
132 |
133 | for n in range(L):
134 | A = np.zeros((2 * n_layers, 2 * n_layers), dtype=np.complex128)
135 | C = np.zeros((2 * n_layers, 2 * n_layers), dtype=np.complex128)
136 |
137 | for i in range(2 * n_layers):
138 | for j in range(2 * n_layers):
139 | p = np.floor((j + 1) / 2).astype(np.int32)
140 | q = np.floor((i / 2)).astype(np.int32)
141 |
142 | if not ((p - q == 0) or (p - q == 1)):
143 | continue
144 |
145 | if np.mod(i, 2) == 0:
146 | if (j < 2 * n_layers - 1) and ((j == 0) or
147 | (np.mod(j, 2) == 1)):
148 | A[i, j] = dricbesj(n + 1, m[p] * a[q])
149 | elif np.mod(j, 2) == 0:
150 | A[i, j] = dricbesy(n + 1, m[p] * a[q])
151 | else:
152 | A[i, j] = dricbesj(n + 1, a[q])
153 |
154 | C[i, j] = (
155 | m[p] * A[i, j]
156 | if j != 2 * n_layers - 1
157 | else A[i, j]
158 | )
159 | else:
160 | if (j < 2 * n_layers - 1) and ((j == 0) or
161 | (np.mod(j, 2) == 1)):
162 | C[i, j] = ricbesj(n + 1, m[p] * a[q])
163 | elif np.mod(j, 2) == 0:
164 | C[i, j] = ricbesy(n + 1, m[p] * a[q])
165 | else:
166 | C[i, j] = ricbesj(n + 1, a[q])
167 |
168 | A[i, j] = (
169 | m[p] * C[i, j]
170 | if j != 2 * n_layers - 1
171 | else C[i, j]
172 | )
173 |
174 | B = A.copy()
175 | B[-2, -1] = dricbesh(n + 1, a[-1])
176 | B[-1, -1] = ricbesh(n + 1, a[-1])
177 | an[n] = np.linalg.det(A) / np.linalg.det(B)
178 |
179 | D = C.copy()
180 | D[-2, -1] = dricbesh(n + 1, a[-1])
181 | D[-1, -1] = ricbesh(n + 1, a[-1])
182 | bn[n] = np.linalg.det(C) / np.linalg.det(D)
183 |
184 | return an, bn
185 |
186 |
187 | def harmonics(
188 | x: np.ndarray,
189 | L: int,
190 | ) -> Tuple[np.ndarray, np.ndarray]:
191 | """Calculate the spherical harmonics of the Mie field.
192 |
193 | The harmonics are calculated up to order L using an iterative method.
194 |
195 | Parameters
196 | ----------
197 | x : np.ndarray
198 | An array representing the cosine of the polar angle (theta) for each
199 | evaluation point relative to the scattering particle's center
200 | (the origin).
201 | The polar angle is the angle between the z-axis (aligned with the
202 | direction of wave propagation) and the vector from the particle's
203 | center to the evaluation point.
204 |
205 | Values in `x` should lie in the range [-1, 1], where `x = 1`
206 | corresponds to theta = 0° (point directly forward along the z-axis),
207 | `x = -1` corresponds to theta = 180° (point directly backward along the
208 | z-axis), and `x = 0` corresponds to theta = 90° (point perpendicular to
209 | the z-axis).
210 |
211 | L : int
212 | The order up to which to evaluate the harmonics.
213 |
214 | Returns
215 | -------
216 | Tuple[np.ndarray, np.ndarray]
217 | A tuple containing arrays of harmonics PI and TAU of
218 | shape (L, *x.shape).
219 |
220 | """
221 |
222 | PI = np.zeros((L, *x.shape))
223 | TAU = np.zeros((L, *x.shape))
224 |
225 | PI[0, :] = 1
226 | PI[1, :] = 3 * x
227 | TAU[0, :] = x
228 | TAU[1, :] = 6 * x * x - 3
229 |
230 | for i in range(3, L + 1):
231 | PI[i - 1] = (
232 | (2 * i - 1) / (i - 1) * x * PI[i - 2] - i / (i - 1) * PI[i - 3]
233 | )
234 | TAU[i - 1] = i * x * PI[i - 1] - (i + 1) * PI[i - 2]
235 |
236 | return PI, TAU
237 |
--------------------------------------------------------------------------------
/deeptrack/backend/polynomials.py:
--------------------------------------------------------------------------------
1 | """Bessel and Riccati-Bessel polynomials.
2 |
3 | This module defines a set of functions for computing Bessel and Riccati-Bessel
4 | polynomials and their derivatives. It expands the corresponding capabilities of
5 | `scipy`.
6 |
7 | Module Structure
8 | -----------------
9 | Functions:
10 |
11 | - `besselj`: Bessel polynomial of the 1st kind.
12 | - `dbesselj`: First derivative of the Bessel polynomial of the 1st kind.
13 | - `bessely`: Bessel polynomial of the 2nd kind.
14 | - `dbessely`: First derivative of the Bessel polynomial of the 2nd kind.
15 | - `ricbesj`: Riccati-Bessel polynomial of the 1st kind.
16 | - `dricbesj`: First derivative of the Riccati-Bessel polynomial of the 1st kind.
17 | - `ricbesy`: Riccati-Bessel polynomial of the 2nd kind.
18 | - `dricbesy`: First derivative of the Riccati-Bessel polynomial of the 2nd kind.
19 | - `ricbesh`: Riccati-Bessel polynomial of the 3rd kind.
20 | - `dricbesh`: First derivative of the Riccati-Bessel polynomial of the 3rd kind.
21 |
22 | """
23 |
24 | from typing import Union
25 |
26 | import numpy as np
27 | from scipy.special import jv, h1vp, yv
28 |
29 |
30 | def besselj(
31 | l: Union[int, float],
32 | x: Union[int, float, np.ndarray],
33 | ) -> Union[float, np.ndarray]:
34 | """The Bessel polynomial of the 1st kind.
35 |
36 | Parameters
37 | ----------
38 | l : int or float
39 | Polynomial order.
40 | x : int or float or np.ndarray
41 | The point(s) where the polynomial is evaluated.
42 |
43 | Returns
44 | -------
45 | float or np.ndarray
46 | The polynomial evaluated at x.
47 |
48 | """
49 |
50 | return jv(l, x)
51 |
52 |
53 | def dbesselj(
54 | l: Union[int, float],
55 | x: Union[int, float, np.ndarray],
56 | ) -> Union[float, np.ndarray]:
57 | """The first derivative of the Bessel polynomial of the 1st kind.
58 |
59 | Parameters
60 | ----------
61 | l : int or float
62 | Polynomial order.
63 | x : int or float or np.ndarray
64 | The point(s) where the polynomial is evaluated.
65 |
66 | Returns
67 | -------
68 | float or np.ndarray
69 | The polynomial evaluated at x.
70 |
71 | """
72 |
73 | return 0.5 * (besselj(l - 1, x) - besselj(l + 1, x))
74 |
75 |
76 | def bessely(
77 | l: Union[int, float],
78 | x: Union[int, float, np.ndarray],
79 | ) -> Union[float, np.ndarray]:
80 | """The Bessel polynomial of the 2nd kind.
81 |
82 | Parameters
83 | ----------
84 | l : int or float
85 | Polynomial order.
86 | x : int or float or np.ndarray
87 | The point(s) where the polynomial is evaluated.
88 |
89 | Returns
90 | -------
91 | float or np.ndarray
92 | The polynomial evaluated at x.
93 |
94 | """
95 |
96 | return yv(l, x)
97 |
98 |
99 | def dbessely(
100 | l: Union[int, float],
101 | x: Union[int, float, np.ndarray],
102 | ) -> Union[float, np.ndarray]:
103 | """The first derivative of the Bessel polynomial of the 2nd kind.
104 |
105 | Parameters
106 | ----------
107 | l : int or float
108 | Polynomial order.
109 | x : int or float or np.ndarray
110 | The point(s) where the polynomial is evaluated.
111 |
112 | Returns
113 | -------
114 | float or np.ndarray
115 | The polynomial evaluated at x.
116 |
117 | """
118 |
119 | return 0.5 * (bessely(l - 1, x) - bessely(l + 1, x))
120 |
121 |
122 | def ricbesj(
123 | l: Union[int, float],
124 | x: Union[int, float, np.ndarray],
125 | ) -> Union[float, np.ndarray]:
126 | """The Riccati-Bessel polynomial of the 1st kind.
127 |
128 | Parameters
129 | ----------
130 | l : int or float
131 | Polynomial order.
132 | x : int or float or np.ndarray
133 | The point(s) where the polynomial is evaluated.
134 |
135 | Returns
136 | -------
137 | float or np.ndarray
138 | The polynomial evaluated at x.
139 |
140 | """
141 |
142 | return np.sqrt(np.pi * x / 2) * besselj(l + 0.5, x)
143 |
144 |
145 | def dricbesj(
146 | l: Union[int, float],
147 | x: Union[int, float, np.ndarray],
148 | ) -> Union[float, np.ndarray]:
149 | """The first derivative of the Riccati-Bessel polynomial of the 1st kind.
150 |
151 | Parameters
152 | ----------
153 | l : int or float
154 | Polynomial order.
155 | x : int or float or np.ndarray
156 | The point(s) where the polynomial is evaluated.
157 |
158 | Returns
159 | -------
160 | float or np.ndarray
161 | The polynomial evaluated at x.
162 |
163 | """
164 |
165 | return 0.5 * np.sqrt(np.pi / x / 2) * besselj(l + 0.5, x) + np.sqrt(
166 | np.pi * x / 2
167 | ) * dbesselj(l + 0.5, x)
168 |
169 |
170 | def ricbesy(
171 | l: Union[int, float],
172 | x: Union[int, float, np.ndarray],
173 | ) -> Union[float, np.ndarray]:
174 | """The Riccati-Bessel polynomial of the 2nd kind.
175 |
176 | Parameters
177 | ----------
178 | l : int or float
179 | Polynomial order.
180 | x : int or float or np.ndarray
181 | The point(s) where the polynomial is evaluated.
182 |
183 | Returns
184 | -------
185 | float or np.ndarray
186 | The polynomial evaluated at x.
187 |
188 | """
189 |
190 | return -np.sqrt(np.pi * x / 2) * bessely(l + 0.5, x)
191 |
192 |
193 | def dricbesy(
194 | l: Union[int, float],
195 | x: Union[int, float, np.ndarray],
196 | ) -> Union[float, np.ndarray]:
197 | """The first derivative of the Riccati-Bessel polynomial of the 2nd kind.
198 |
199 | Parameters
200 | ----------
201 | l : int or float
202 | Polynomial order.
203 | x : int or float or np.ndarray
204 | The point(s) where the polynomial is evaluated.
205 |
206 | Returns
207 | -------
208 | float or np.ndarray
209 | The polynomial evaluated at x.
210 |
211 | """
212 |
213 | return -0.5 * np.sqrt(np.pi / 2 / x) * yv(l + 0.5, x) - np.sqrt(
214 | np.pi * x / 2
215 | ) * dbessely(l + 0.5, x)
216 |
217 |
218 | def ricbesh(
219 | l: Union[int, float],
220 | x: Union[int, float, np.ndarray],
221 | ) -> Union[float, np.ndarray]:
222 | """The Riccati-Bessel polynomial of the 3rd kind.
223 |
224 | Parameters
225 | ----------
226 | l : int or float
227 | Polynomial order.
228 | x : int or float or np.ndarray
229 | The point(s) where the polynomial is evaluated.
230 |
231 | Returns
232 | -------
233 | float or np.ndarray
234 | The polynomial evaluated at x.
235 |
236 | """
237 |
238 | return np.sqrt(np.pi * x / 2) * h1vp(l + 0.5, x, False)
239 |
240 |
241 | def dricbesh(
242 | l: Union[int, float],
243 | x: Union[int, float, np.ndarray],
244 | ) -> Union[float, np.ndarray]:
245 | """The first derivative of the Riccati-Bessel polynomial of the 3rd kind.
246 |
247 | Parameters
248 | ----------
249 | l : int or float
250 | Polynomial order.
251 | x : int or float or np.ndarray
252 | The point(s) where the polynomial is evaluated.
253 |
254 | Returns
255 | -------
256 | float or np.ndarray
257 | The polynomial evaluated at x.
258 |
259 | """
260 |
261 | xi = 0.5 * np.sqrt(np.pi / 2 / x) * h1vp(l + 0.5, x, False) + np.sqrt(
262 | np.pi * x / 2
263 | ) * h1vp(l + 0.5, x, True)
264 | return xi
265 |
--------------------------------------------------------------------------------
/deeptrack/backend/units.py:
--------------------------------------------------------------------------------
1 | from numpy import ndarray
2 | from pint import Quantity, Unit, Context
3 | from .. import units as u
4 |
5 |
6 | def get_active_voxel_size():
7 | """Gets the size of a voxel used for simulation."""
8 | grid_x = (1 * u.sxpx).to(u.m).magnitude
9 | grid_y = (1 * u.sypx).to(u.m).magnitude
10 | grid_z = (1 * u.szpx).to(u.m).magnitude
11 | return grid_x, grid_y, grid_z
12 |
13 |
14 | def get_active_scale():
15 | """Gets the active scale difference between optical units and simulation units."""
16 | current_xscale = (1 * u.xpx / u.sxpx).to_base_units().magnitude or 1
17 | current_yscale = (1 * u.ypx / u.sypx).to_base_units().magnitude or 1
18 | current_zscale = (1 * u.zpx / u.szpx).to_base_units().magnitude or 1
19 | return (current_xscale, current_yscale, current_zscale)
20 |
21 |
22 | def create_context(
23 | xpixel=None,
24 | ypixel=None,
25 | zpixel=None,
26 | xscale=None,
27 | yscale=None,
28 | zscale=None,
29 | ):
30 | """Creates a new context for unit conversions.
31 |
32 | If a value is None, the active value is used.
33 | If any (xyz)scale value is not none, they are multiplied with the active scale.
34 |
35 | Parameters
36 | ----------
37 | xpixel, ypixel, zpixel : float
38 | The size of pixels in each direction in meters
39 | xscale, yscale, zscale : int
40 | The upscale factor for internal simulations
41 | """
42 |
43 | current_xpixel = (1 * u.xpx).to(u.meter).magnitude
44 | current_ypixel = (1 * u.ypx).to(u.meter).magnitude
45 | current_zpixel = (1 * u.zpx).to(u.meter).magnitude
46 | current_xscale, current_yscale, current_zscale = get_active_scale()
47 |
48 | xpixel = xpixel if xpixel else current_xpixel
49 | ypixel = ypixel if ypixel else current_ypixel
50 | zpixel = zpixel if zpixel else current_zpixel
51 | xscale = int(xscale * current_xscale) if xscale else int(current_xscale)
52 | yscale = int(yscale * current_yscale) if yscale else int(current_yscale)
53 | zscale = int(zscale * current_zscale) if zscale else int(current_zscale)
54 |
55 | ctx = Context()
56 | ctx.redefine(f"pixel = {xpixel} meter")
57 | ctx.redefine(f"xpixel = {xpixel} meter")
58 | ctx.redefine(f"ypixel = {ypixel} meter")
59 | ctx.redefine(f"zpixel = {zpixel} meter")
60 | ctx.redefine(f"simulation_xpixel = {xpixel / xscale} meter")
61 | ctx.redefine(f"simulation_ypixel = {ypixel / yscale} meter")
62 | ctx.redefine(f"simulation_zpixel = {zpixel / zscale} meter")
63 | return ctx
64 |
65 |
66 | class ConversionTable:
67 | """Convert a dictionary of values to the desired units.
68 |
69 | The conversions are specified in the constructor. Each key in the dictionary corresponds
70 | to the name of a property. The value of the key is a tuple of two units. The first unit is
71 | the default unit, and the second is the desired unit.
72 |
73 | To convert a dictionary of values to the desired units, the `convert` method is called with the
74 | dictionary as an argument. The dictionary is converted to a dictionary of quantities, and the
75 | quantities are converted to the desired units. If any value is not a quantity, it is assumed to
76 | be in the default unit. If a value with the same key is not in `self.conversions`, it is left unchanged.
77 |
78 | Parameters
79 | ----------
80 | conversions : dict
81 | The dictionary of conversions. Each key is the name of a property, and the value is a tuple of two
82 | units. The first unit is the default unit, and the second is the desired unit.
83 | """
84 |
85 | def __init__(self, **conversions):
86 |
87 | for value in conversions.values():
88 | assert isinstance(
89 | value, tuple
90 | ), "Each element in the conversion table needs to be a tuple of two units"
91 | assert (
92 | len(value) == 2
93 | ), "Each element in the conversion table needs to be a tuple of two units"
94 | assert isinstance(value[0], Unit) and isinstance(
95 | value[1], Unit
96 | ), "Each element in the conversion table needs to be a tuple of two units"
97 | self.conversions = conversions
98 |
99 | def convert(self, **kwargs):
100 |
101 | for key, val in self.conversions.items():
102 |
103 | if key not in kwargs:
104 | continue
105 |
106 | quantity = kwargs[key]
107 |
108 | if not isinstance(quantity, (int, float, list, tuple, ndarray, Quantity)):
109 | continue
110 |
111 | default_unit, desired_unit = val
112 |
113 | # If not quantity, assume default
114 | if not isinstance(quantity, Quantity):
115 | quantity = quantity * default_unit
116 | quantity = quantity.to(desired_unit)
117 | quantity = quantity.to_reduced_units()
118 | kwargs[key] = quantity
119 |
120 | return kwargs
121 |
--------------------------------------------------------------------------------
/deeptrack/deeplay/__init__.py:
--------------------------------------------------------------------------------
1 | from deeplay import *
--------------------------------------------------------------------------------
/deeptrack/extras/__init__.py:
--------------------------------------------------------------------------------
1 | from .radialcenter import *
2 |
--------------------------------------------------------------------------------
/deeptrack/extras/radialcenter.py:
--------------------------------------------------------------------------------
1 | """Radial center calculation function
2 |
3 | This module provides a function to calculate the center location
4 | of a given intensity distribution.
5 |
6 | Key Features
7 | ------------
8 |
9 | - **Gradient-based analysis with least-squares method.**
10 |
11 | Uses intensity gradients to determine the
12 | radial symmetry of 2D intensity distributions.
13 |
14 |
15 | - **Flexible output**
16 |
17 | Allows inversion of the axis based on user preference.
18 |
19 |
20 | Module Structure
21 | ----------------
22 | Functions:
23 |
24 | - `radialcenter`: Calculates the center of a 2D intensity distribution.
25 |
26 | Example
27 | -------
28 | Calculate center of an image containing randomly generated Gaussian blur.
29 |
30 | >>> from deeptrack.extras import radialcenter as rc
31 |
32 | >>> linspace = np.linspace(-10, 10, 100)
33 | >>> gaussian = np.exp(-0.5 * (
34 | ... linspace[:, None] ** 2 + linspace[None, :] ** 2)
35 | ... )
36 | >>> intensity_map = np.random.normal(0, 0.005, (100, 100))
37 | >>> x, y = rc.radialcenter(gaussian_blur)
38 | >>> print(f"Center of distribution = {x}, {y}")
39 |
40 |
41 | Python implementation by Benjamin Midtvedt, University of Gothenburg, 2020
42 | Copyright 2011-2012, Raghuveer Parthasarathy, The University of Oregon
43 |
44 | Disclaimer / License
45 | This program is free software: you can redistribute it and/or
46 | modify it under the terms of the GNU General Public License as
47 | published by the Free Software Foundation, either version 3 of the
48 | License, or (at your option) any later version.
49 | This set of programs is distributed in the hope that it will be useful,
50 | but WITHOUT ANY WARRANTY; without even the implied warranty of
51 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
52 | General Public License for more details.
53 | You should have received a copy of the GNU General Public License
54 | (gpl.txt) along with this program.
55 | If not, see .
56 |
57 | Raghuveer Parthasarathy
58 | The University of Oregon
59 | August 21, 2011 (begun)
60 | last modified Apr. 6, 2012 (minor change)
61 | Copyright 2011-2012, Raghuveer Parthasarathy
62 | """
63 |
64 | from typing import Tuple
65 |
66 | import numpy as np
67 | import scipy.signal
68 |
69 | def radialcenter(
70 | I,
71 | invert_xy=False
72 | ) -> Tuple[float, float]:
73 | """Calculates the center of a 2D intensity distribution.
74 |
75 | Considers lines passing through each half-pixel point with slope
76 | parallel to the gradient of the intensity at that point. Considers the
77 | distance of closest approach between these lines and the coordinate
78 | origin, and determines (analytically) the origin that minimizes the
79 | weighted sum of these distances-squared.
80 |
81 | Parameters
82 | ----------
83 | I : np.ndarray
84 | 2D intensity distribution (i.e. a grayscale image)
85 | Size need not be an odd number of pixels along each dimension
86 |
87 | Returns
88 | -------
89 | float, float
90 | Coordinate pair x, y of the center of radial symmetry,
91 | px, from px #1 = left/topmost pixel.
92 | So a shape centered in the middle of a 2*N+1 x 2*N+1
93 | square (e.g. from make2Dgaussian.m with x0=y0=0) will return
94 | a center value at x0=y0=N+1.
95 |
96 | Note that y increases with increasing row number (i.e. "downward")
97 |
98 | """
99 | I = np.squeeze(I)
100 | Ny, Nx = I.shape[:2]
101 |
102 | # Grid coordinates are -n:n, where Nx (or Ny) = 2*n+1.
103 | # Grid midpoint coordinates are -n+0.5:n-0.5.
104 | # The two lines below replace:
105 | # xm = repmat(-(Nx-1)/2.0+0.5:(Nx-1)/2.0-0.5,Ny-1,1);
106 | # And are faster (by a factor of >15!).
107 | # The idea is taken from the repmat source code.
108 | xm_onerow = np.arange(-(Nx - 1) / 2.0 + 0.5, (Nx - 1) / 2.0 + 0.5)
109 | xm_onerow = np.reshape(xm_onerow, (1, xm_onerow.size))
110 | xm = xm_onerow[(0,) * (Ny - 1), :]
111 |
112 | # Similarly replacing:
113 | # ym = repmat((-(Ny-1)/2.0+0.5:(Ny-1)/2.0-0.5)', 1, Nx-1).
114 | ym_onecol = np.arange(
115 | -(Ny - 1) / 2.0 + 0.5, (Ny - 1) / 2.0 + 0.5
116 | ) # Note that y increases "downward."
117 | ym_onecol = np.reshape(ym_onecol, (ym_onecol.size, 1))
118 | ym = ym_onecol[:, (0,) * (Nx - 1)]
119 |
120 | # Calculate derivatives along 45-degree shifted coordinates (u and v).
121 | # Note that y increases "downward" (increasing row number) -- we'll deal
122 | # with this when calculating "m" below.
123 | dIdu = I[: Ny - 1, 1:Nx] - I[1:Ny, : Nx - 1]
124 | dIdv = I[: Ny - 1, : Nx - 1] - I[1:Ny, 1:Nx]
125 |
126 | # Apply a smoothing filter.
127 | h = np.ones((3, 3)) / 9
128 | fdu = scipy.signal.convolve2d(dIdu, h, "same")
129 | fdv = scipy.signal.convolve2d(dIdv, h, "same")
130 |
131 | # Gradient magnitude, squared.
132 | dImag2 = fdu * fdu + fdv * fdv
133 |
134 | # Slope of the gradient.
135 | # Note that we need a 45-degree rotation of
136 | # the u,v components to express the slope in the x-y coordinate system.
137 | # The negative sign "flips" the array to account for y increasing
138 | # "downward."
139 | m = -(fdv + fdu) / (fdu - fdv)
140 | m[np.isnan(m)] = 0
141 |
142 | # Handle infinite slopes by setting them to a large value.
143 | isinfbool = np.isinf(m)
144 | m[isinfbool] = 1000000
145 |
146 | # Shorthand "b," which also happens to be the
147 | # y intercept of the line of slope m that goes through each grid midpoint.
148 | b = ym - m * xm
149 |
150 | # Weighting: Weight by square of gradient magnitude and inverse
151 | # distance to gradient intensity centroid.
152 | sdI2 = np.sum(dImag2)
153 | xcentroid = np.sum(dImag2 * xm) / sdI2
154 | ycentroid = np.sum(dImag2 * ym) / sdI2
155 | w = dImag2 / np.sqrt(
156 | (xm - xcentroid) * (xm - xcentroid) + (ym - ycentroid) * (ym - ycentroid)
157 | )
158 |
159 | # Least squares solution to determine the radial symmetry center.
160 | # Inputs m, b, w are defined on a grid.
161 | # w are the weights for each point.
162 | wm2p1 = w / (m * m + 1)
163 | sw = np.sum(wm2p1)
164 | mwm2pl = m * wm2p1
165 | smmw = np.sum(m * mwm2pl)
166 | smw = np.sum(mwm2pl)
167 | smbw = np.sum(np.sum(b * mwm2pl))
168 | sbw = np.sum(np.sum(b * wm2p1))
169 | det = smw * smw - smmw * sw
170 | xc = (smbw * sw - smw * sbw) / det
171 | # Relative to image center.
172 | yc = (smbw * smw - smmw * sbw) / det
173 | # Relative to image center.
174 |
175 | # Adjust coordinates relative to the image center.
176 | xc = xc + (Nx + 1) / 2.0 - 1
177 | yc = yc + (Ny + 1) / 2.0 - 1
178 |
179 | if invert_xy:
180 | return yc, xc
181 | else:
182 | return xc, yc
183 |
--------------------------------------------------------------------------------
/deeptrack/noises.py:
--------------------------------------------------------------------------------
1 | """
2 | Features for introducing noise to images.
3 |
4 | This module provides classes to add various types of noise to images,
5 | including constant offsets, Gaussian noise, and Poisson-distributed noise.
6 |
7 | Module Structure
8 | ----------------
9 | Classes:
10 |
11 | - `Noise`: Abstract base class for noise models.
12 | - `Background` / `Offset`: Adds a constant value to an image.
13 | - `Gaussian`: Adds IID Gaussian noise.
14 | - `ComplexGaussian`: Adds complex-valued Gaussian noise.
15 | - `Poisson`: Adds Poisson-distributed noise based on signal-to-noise ratio.
16 |
17 | Example
18 | -------
19 | Add Gaussian noise to an image:
20 |
21 | >>> import numpy as np
22 | >>> image = np.ones((100, 100))
23 | >>> gaussian_noise = noises.Gaussian(mu=0, sigma=0.1)
24 | >>> noisy_image = gaussian_noise.resolve(image)
25 |
26 | Add Poisson noise with a specified signal-to-noise ratio:
27 |
28 | >>> poisson_noise = noises.Poisson(snr=0.5)
29 | >>> noisy_image = poisson_noise.resolve(image)
30 |
31 | """
32 |
33 | import numpy as np
34 |
35 | from .features import Feature
36 | from .image import Image
37 | from .types import PropertyLike
38 |
39 |
40 | class Noise(Feature):
41 | """Base abstract noise class."""
42 |
43 |
44 | class Background(Noise):
45 | """Adds a constant value to an image
46 |
47 | Parameters
48 | ----------
49 | offset : float
50 | The value to add to the image
51 | """
52 |
53 | def __init__(self, offset: PropertyLike[float], **kwargs):
54 | super().__init__(offset=offset, **kwargs)
55 |
56 | def get(self, image, offset, **kwargs):
57 | return image + offset
58 |
59 |
60 | # ALIASES
61 | Offset = Background
62 |
63 |
64 | class Gaussian(Noise):
65 | """Adds IID Gaussian noise to an image.
66 |
67 | Parameters
68 | ----------
69 | mu : float
70 | The mean of the Gaussian distribution.
71 | sigma : float
72 | The standard deviation of the Gaussian distribution.
73 | """
74 |
75 | def __init__(
76 | self,
77 | mu: PropertyLike[float] = 0,
78 | sigma: PropertyLike[float] = 1,
79 | **kwargs
80 | ):
81 | super().__init__(mu=mu, sigma=sigma, **kwargs)
82 |
83 | def get(self, image, mu, sigma, **kwargs):
84 | noisy_image = mu + image + np.random.randn(*image.shape) * sigma
85 | return noisy_image
86 |
87 |
88 | class ComplexGaussian(Noise):
89 | """Adds complex-valued IID Gaussian noise to an image.
90 |
91 | Parameters
92 | ----------
93 | mu : float
94 | The mean of the Gaussian distribution.
95 | sigma : float
96 | The standard deviation of the Gaussian distribution.
97 | """
98 |
99 | def __init__(
100 | self,
101 | mu: PropertyLike[float] = 0,
102 | sigma: PropertyLike[float] = 1,
103 | **kwargs
104 | ):
105 | super().__init__(mu=mu, sigma=sigma, **kwargs)
106 |
107 | def get(self, image, mu, sigma, **kwargs):
108 | real_noise = np.random.randn(*image.shape)
109 | imag_noise = np.random.randn(*image.shape) * 1j
110 | noisy_image = mu + image + (real_noise + imag_noise) * sigma
111 | return noisy_image
112 |
113 |
114 | class Poisson(Noise):
115 | """Adds Poisson-distributed noise to an image.
116 |
117 | Parameters
118 | ----------
119 | snr : float
120 | Signal-to-noise ratio of the final image. The signal is determined
121 | by the peak value of the image.
122 | background : float
123 | Value to be be used as the background. This is used to calculate the
124 | signal of the image.
125 | max_val : float, optional
126 | Maximum allowable value to prevent overflow in noise computation.
127 | Default is 1e8.
128 | """
129 |
130 | def __init__(
131 | self,
132 | *args,
133 | snr: PropertyLike[float] = 100,
134 | background: PropertyLike[float] = 0,
135 | max_val=1e8,
136 | **kwargs
137 | ):
138 | super().__init__(
139 | *args, snr=snr, background=background, max_val=max_val, **kwargs
140 | )
141 |
142 | def get(self, image, snr, background, max_val, **kwargs):
143 | image[image < 0] = 0
144 | immax = np.max(image)
145 | peak = np.abs(immax - background)
146 |
147 | rescale = snr ** 2 / peak ** 2
148 | rescale = np.clip(rescale, 1e-10, max_val / np.abs(immax))
149 | try:
150 | noisy_image = Image(np.random.poisson(image * rescale) / rescale)
151 | noisy_image.merge_properties_from(image)
152 | return noisy_image
153 | except ValueError:
154 | raise ValueError(
155 | "Numpy poisson function errored due to too large value. "
156 | "Set max_val in dt.Poisson to a lower value to fix."
157 | )
158 |
--------------------------------------------------------------------------------
/deeptrack/pytorch/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | import torch
3 | from .data import Dataset
4 | from .features import ToTensor
--------------------------------------------------------------------------------
/deeptrack/pytorch/data.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 | from typing import Union, Optional
5 | from deeptrack.image import Image
6 |
7 | class Dataset(torch.utils.data.Dataset):
8 | def __init__(self,
9 | pipeline,
10 | inputs=None,
11 | length=None,
12 | replace: Union[bool, float] = False,
13 | float_dtype: Optional[Union[torch.dtype, str]] = "default"):
14 | self.pipeline = pipeline
15 | self.replace = replace
16 | if inputs is None:
17 | if length is None:
18 | raise ValueError("Either inputs or length must be specified.")
19 | else:
20 | inputs = [[]] * length
21 | self.inputs = inputs
22 | self.data = [None for _ in inputs]
23 |
24 | if float_dtype == "default":
25 | float_dtype = torch.get_default_dtype()
26 | self.float_dtype = float_dtype
27 |
28 |
29 | def __getitem__(self, index):
30 | if self._should_replace(index):
31 | self.pipeline.update()
32 | res = self.pipeline(self.inputs[index])
33 | if not isinstance(res, (tuple, list)):
34 | res = (res, )
35 | res = tuple(res._value if isinstance(res, Image) else res for res in res)
36 | res = tuple(self._as_tensor(res) for res in res)
37 |
38 | # Convert all numpy arrays to torch tensors
39 | # res = tuple(self._as_tensor(r) for r in res)
40 |
41 | self.data[index] = res
42 |
43 | return self.data[index]
44 |
45 | def _as_tensor(self, x):
46 | if isinstance(x, (int, float, bool)):
47 | x = torch.from_numpy(np.array([x]))
48 | if isinstance(x, np.ndarray):
49 | x = torch.from_numpy(x)
50 | if x.ndim > 2 and x.dtype not in [np.uint8, np.uint16, np.uint32, np.uint64]:
51 | x = x.permute(-1, *range(x.ndim - 1))
52 | if isinstance(x, Image):
53 | self._as_tensor(x._value)
54 | else:
55 | x = torch.Tensor(x)
56 |
57 | # if float, convert to torch default float
58 | if self.float_dtype and x.dtype in [torch.float16, torch.float32, torch.float64]:
59 | x = x.to(self.float_dtype)
60 | if x.dtype in [torch.int8, torch.int16, torch.int32, torch.int64]:
61 | x = x.to(torch.long)
62 |
63 | return x
64 |
65 | def _should_replace(self, index):
66 | if self.data[index] is None:
67 | return True
68 |
69 | if isinstance(self.replace, bool):
70 | return self.replace
71 | elif callable(self.replace):
72 | try:
73 | return self.replace()
74 | except TypeError:
75 | return self.replace(index)
76 | elif isinstance(self.replace, float) and 0 <= self.replace <= 1:
77 | return np.random.rand() < self.replace
78 | else:
79 | raise TypeError("replace must be a boolean, a float between 0 and 1, or a callable.")
80 |
81 | def __len__(self):
82 | return len(self.inputs)
83 |
--------------------------------------------------------------------------------
/deeptrack/pytorch/features.py:
--------------------------------------------------------------------------------
1 | from deeptrack.features import Feature
2 | from deeptrack.backend import config
3 | import torch
4 | import numpy as np
5 | from typing import Literal
6 |
7 | class ToTensor(Feature):
8 |
9 | def __init__(self,
10 | dtype=None,
11 | device=None,
12 | add_dim_to_number=False,
13 | permute_mode: Literal["always", "never", "numpy", "numpy_and_not_int"] = "never",
14 | **kwargs):
15 | """Converts the input to a torch tensor.
16 |
17 | Parameters
18 | ----------
19 | dtype : torch.dtype, optional
20 | The dtype of the resulting tensor. If None, the dtype is inferred from the input.
21 | device : torch.device, optional
22 | The device of the resulting tensor. If None, the device is inferred from the input.
23 | add_dim_to_number : bool, optional
24 | If True, a dimension is added to single numbers. This is useful when the input is a
25 | single number, but the output should be a tensor with a single dimension.
26 | Default value is False.
27 | permute_mode : {"always", "never", "numpy", "numpy_and_not_int"}, optional
28 | Whether to permute the input to channel first. If "always", the input is always permuted.
29 | If "never", the input is never permuted. If "numpy", the input is permuted if it is a numpy
30 | array. If "numpy_and_not_int", the input is permuted if it is a numpy array and the dtype
31 | is not an integer.
32 | """
33 | super().__init__(dtype=dtype, device=device, add_dim_to_number=add_dim_to_number, permute_mode=permute_mode, **kwargs)
34 |
35 | def get(self, x, dtype, device, add_dim_to_number, permute_mode, **kwargs):
36 |
37 | is_numpy = isinstance(x, np.ndarray)
38 |
39 | dtype = dtype or x.dtype
40 | if isinstance(x, torch.Tensor):
41 | ...
42 | elif isinstance(x, np.ndarray):
43 | if any(stride < 0 for stride in x.strides):
44 | x = x.copy()
45 | x = torch.from_numpy(x)
46 | elif isinstance(x, (int, float, bool, complex)):
47 | if add_dim_to_number:
48 | x = torch.tensor([x])
49 | else:
50 | return x
51 | else:
52 | x = torch.Tensor(x)
53 |
54 | if (
55 | permute_mode == "always"
56 | or (permute_mode == "numpy" and is_numpy)
57 | or (permute_mode == "numpy_and_not_int" and is_numpy and dtype not in [torch.int8, torch.int16, torch.int32, torch.int64])
58 | ):
59 | x = x.permute(-1, *range(x.dim() - 1))
60 | if dtype:
61 | x = x.to(dtype)
62 | if device:
63 | x = x.to(device)
64 |
65 |
66 | return x
--------------------------------------------------------------------------------
/deeptrack/sequences.py:
--------------------------------------------------------------------------------
1 | """Features and tools for resolving sequences of images.
2 |
3 | Classes
4 | -------
5 | Sequence
6 | Resolves a feature as a sequence.
7 |
8 | Functions
9 | ---------
10 | Sequential
11 | Converts a feature to be resolved as a sequence.
12 | """
13 |
14 | from .features import Feature
15 | from .properties import SequentialProperty
16 | from .types import PropertyLike
17 | import random
18 | import numpy as np
19 |
20 | class Sequence(Feature):
21 | """Resolves a feature as a sequence.
22 |
23 | The input feature is resolved `sequence_length` times, with the kwarg
24 | arguments `sequene_length` and `sequence_step` passed to all properties
25 | of the feature set.
26 |
27 | Parameters
28 | ----------
29 | feature : Feature
30 | The feature to resolve as a sequence.
31 | sequence_length : int
32 | The number of times to resolve the feature.
33 |
34 | Attributes
35 | ----------
36 | feature : Feature
37 | The feature to resolve as a sequence.
38 | """
39 |
40 | __distributed__ = False
41 |
42 | def __init__(
43 | self, feature: Feature, sequence_length: PropertyLike[int] = 1, **kwargs
44 | ):
45 |
46 | super().__init__(sequence_length=sequence_length, **kwargs)
47 | self.feature = self.add_feature(feature)
48 | # Require update
49 | # self.update()
50 |
51 | def get(self, input_list, sequence_length=None, **kwargs):
52 |
53 | outputs = input_list or []
54 | for sequence_step in range(sequence_length):
55 | np.random.seed(random.randint(0, 1000000))
56 |
57 | propagate_sequential_data(
58 | self.feature,
59 | sequence_step=sequence_step,
60 | sequence_length=sequence_length,
61 | )
62 | out = self.feature()
63 |
64 | outputs.append(out)
65 |
66 | if isinstance(outputs[0], (tuple, list)):
67 | outputs = tuple(zip(*outputs))
68 |
69 | return outputs
70 |
71 |
72 | def Sequential(feature: Feature, **kwargs):
73 | """Converts a feature to be resolved as a sequence.
74 |
75 | Should be called on individual features, not combinations of features. All
76 | keyword arguments will be trated as sequential properties and will be
77 | passed to the parent feature.
78 |
79 | If a property from the keyword argument already exists on the feature, the
80 | existing property will be used to initilize the passed property (that is,
81 | it will be used for the first timestep).
82 |
83 | Parameters
84 | ----------
85 | feature : Feature
86 | Feature to make sequential.
87 | kwargs
88 | Keyword arguments to pass on as sequential properties of `feature`.
89 |
90 | """
91 |
92 | for property_name in kwargs.keys():
93 |
94 | if property_name in feature.properties:
95 | # Insert property with initialized value
96 | feature.properties[property_name] = SequentialProperty(
97 | feature.properties[property_name], **feature.properties
98 | )
99 | else:
100 | # insert empty property
101 | feature.properties[property_name] = SequentialProperty()
102 |
103 | feature.properties.add_dependency(feature.properties[property_name])
104 | feature.properties[property_name].add_child(feature.properties)
105 |
106 | for property_name, sampling_rule in kwargs.items():
107 |
108 | prop = feature.properties[property_name]
109 |
110 | all_kwargs = dict(
111 | previous_value=prop.previous_value,
112 | previous_values=prop.previous_values,
113 | sequence_length=prop.sequence_length,
114 | sequence_step=prop.sequence_step,
115 | )
116 |
117 | for key, val in feature.properties.items():
118 | if key == property_name:
119 | continue
120 |
121 | if isinstance(val, SequentialProperty):
122 | all_kwargs[key] = val
123 | all_kwargs["previous_" + key] = val.previous_values
124 | else:
125 | all_kwargs[key] = val
126 | if not prop.initialization:
127 | prop.initialization = prop.create_action(sampling_rule, **{k:all_kwargs[k] for k in all_kwargs if k != "previous_value"})
128 |
129 | prop.current = prop.create_action(sampling_rule, **all_kwargs)
130 |
131 | return feature
132 |
133 |
134 | def propagate_sequential_data(X, **kwargs):
135 | for dep in X.recurse_dependencies():
136 | if isinstance(dep, SequentialProperty):
137 | for key, value in kwargs.items():
138 | if hasattr(dep, key):
139 | getattr(dep, key).set_value(value)
140 |
--------------------------------------------------------------------------------
/deeptrack/sources/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import Source, SourceItem, Product, Subset, Sources, Join, random_split
2 | from .folder import ImageFolder
3 | from .rng import NumpyRNG, PythonRNG
4 |
--------------------------------------------------------------------------------
/deeptrack/sources/folder.py:
--------------------------------------------------------------------------------
1 | """Utility class for data sources in a directory structure.
2 |
3 | This module provies the `ImageFolder` DeepTrack2 class
4 | which enables control of image sources organized
5 | in a directory structure.
6 |
7 | The primary usage is to facilitate naming and
8 | organizing of data sources.
9 |
10 | Key Features
11 | ------------
12 | - **Attribute Access**
13 |
14 | Enables accessing attributes tied to a data source such as
15 | paths, directory structure, length etc.
16 |
17 | - **Labeling**
18 |
19 | Allows converting category names of images to integers,
20 | which is more flexible and easy to process in a data pipeline.
21 |
22 | - **Category Splitting**
23 |
24 | The sources of images can be split into subcategories of which the
25 | user specifies the name of.
26 |
27 |
28 | Module Structure
29 | ----------------
30 | `ImageFolder`: Data source for images organized in a directory structure.
31 |
32 | Allows for processing of image sources with `Dict` data strucutres,
33 | splitting, naming and labeling functions.
34 |
35 | Examples
36 | --------
37 | Print some information about a source of data:
38 |
39 | >>> from deeptrack.sources import folder
40 |
41 | >>> root = "data/train"
42 | >>> data_source = folder.ImageFolder(root)
43 |
44 | >>> print(f"Total images in training data: {len(train_data)}")
45 | >>> print(f"Classes: {train_data.classes}")
46 |
47 | """
48 |
49 | import glob
50 | import os
51 | from typing import List, Tuple
52 |
53 | from deeptrack.sources.base import Source
54 |
55 | known_extensions = ["png", "jpg", "jpeg", "tif", "tiff", "bmp", "gif"]
56 |
57 | class ImageFolder(Source):
58 | """Data source for images organized in a directory structure.
59 |
60 | This class assumes that the images are organized in a
61 | directory structure where:
62 |
63 | ```bash
64 | root/dog/xxx.png
65 | root/dog/xxy.png
66 | root/[...]/xxz.png
67 |
68 | root/cat/123.png
69 | root/cat/nsdf3.png
70 | root/[...]/asd932_.png
71 | ```
72 |
73 | The first level of directories (e.g., `dog`, `cat`) is used as labels
74 | for the images, and the images are expected to have file extensions
75 | included in `known_extensions`.
76 |
77 | Parameters
78 | ----------
79 | path: list
80 |
81 | List of paths to the image files.
82 |
83 | label: list
84 |
85 | List of corresponding labels for each image.
86 |
87 | label_name: list
88 |
89 | List of category names corresponding to each label.
90 |
91 |
92 | Methods
93 | -------
94 | classes: list
95 | Returns a list of unique class names (category names).
96 |
97 | __init__(root: str)
98 |
99 | Initializes the `ImageFolder` instance by scanning
100 | the directory structure.
101 |
102 | __len__()
103 | Returns the total number of images in the dataset.
104 |
105 | get_category_name(path: str, directory_level: int)
106 |
107 | Retrieves the category name (directory name) for the given image path
108 | at a specific directory level.
109 |
110 | label_to_name(label: int)
111 |
112 | Converts a label index to the corresponding category name.
113 |
114 | name_to_label(name: str)
115 |
116 | Converts a category name to the corresponding label index.
117 |
118 | split(*splits: str)
119 |
120 | Splits the dataset into subsets based on the folder structure.
121 | The first folder name in the path will be used to define the split.
122 |
123 | """
124 |
125 | path: str
126 | label: int
127 | label_name: str
128 |
129 | @property
130 | def classes(
131 | self
132 | ) -> List:
133 | return list(self._category_to_int.keys())
134 |
135 | def __init__(
136 | self,
137 | root: str
138 | ) -> None:
139 |
140 | self._root = root
141 |
142 | self._paths = glob.glob(f"{root}/**/*", recursive=True)
143 | self._paths = [
144 | path for path in self._paths if os.path.isfile(path)
145 | and path.split(".")[-1] in known_extensions
146 | ]
147 | self._paths.sort()
148 | self._length = len(self._paths)
149 |
150 | # Get category name as 1 directory down from root.
151 | category_per_path = [self.get_category_name(path, 0)
152 | for path in self._paths]
153 | unique_categories = set(category_per_path)
154 |
155 | # Create a dictionary mapping category name to integer.
156 | self._category_to_int = {category: i for i, category
157 | in enumerate(unique_categories)}
158 | self._int_to_category = {i: category for category, i
159 | in self._category_to_int.items()}
160 |
161 | # Create a list of integers corresponding to the category of each path.
162 | categories = [self._category_to_int[category]
163 | for category in category_per_path]
164 |
165 | super().__init__(
166 | path=self._paths,
167 | label=categories,
168 | label_name=category_per_path
169 | )
170 |
171 | def __len__(
172 | self
173 | ) -> int:
174 | return self._length
175 |
176 | def get_category_name(
177 | self,
178 | path: str,
179 | directory_level: int
180 | ) -> str:
181 |
182 | relative_path = path.replace(self._root, "", 1).lstrip(os.sep)
183 | folder = relative_path.split(os.sep)[directory_level] \
184 | if relative_path else ""
185 | return folder
186 |
187 | def label_to_name(
188 | self,
189 | label: int
190 | ) -> str:
191 | """Gets the category corresponding to a label"""
192 | return self._int_to_category[label]
193 |
194 | def name_to_label(
195 | self,
196 | name: str
197 | ) -> int:
198 | """Gets the label corresponding to a category"""
199 | return self._category_to_int[name]
200 |
201 | def split(
202 | self,
203 | *splits: str
204 | ) -> Tuple[str]:
205 | """Split the dataset into subsets.
206 |
207 | The splits are defined by the names of the first folder
208 | in the path of each image. For example, if the dataset
209 | contains images in the following structure:
210 |
211 | ```bash
212 | root/A/dog/xxx.png
213 | root/A/dog/xxy.png
214 | root/A/[...]/xxz.png
215 |
216 | root/B/cat/123.png
217 | root/B/cat/nsdf3.png
218 | root/B/[...]/asd932_.png
219 | ```
220 |
221 | Then the dataset can be split into two subsets, one containing
222 | all images in the `A` folder and one containing all images
223 | in the `B` folder.
224 |
225 | Parameters
226 | ----------
227 |
228 | splits: str
229 |
230 | The names of the categories to split into.
231 |
232 | """
233 |
234 | all_splits = set([self.get_category_name(path, 0)
235 | for path in self._paths])
236 |
237 | if len(splits) == 0:
238 |
239 | if len(all_splits) == 0:
240 | raise ValueError("No categories to split into")
241 | return self.split(*all_splits)
242 |
243 | if not all(split in all_splits for split in splits):
244 | raise ValueError(
245 | f"Unknown split. Available splits are {all_splits}"
246 | )
247 |
248 | output = []
249 |
250 | def update_root_source(
251 | item
252 | ) -> None:
253 | """Inner function which updates attributes of root source."""
254 | for key in item:
255 | getattr(self, key).invalidate()
256 | getattr(self, key).set_value(item[key])
257 |
258 |
259 | for split in splits:
260 | subfolder = ImageFolder(os.path.join(self._root, split))
261 | subfolder.on_activate(update_root_source)
262 | output.append(subfolder)
263 |
264 | return tuple(output)
265 |
266 |
--------------------------------------------------------------------------------
/deeptrack/sources/rng.py:
--------------------------------------------------------------------------------
1 | """Classes that extend Numpy and Python rng generators.
2 |
3 | This utility package extends the random number generator objects for both
4 | Python and Numpy by adding functions to generate several instances as well as
5 | dependency tracking with DeepTrackNode objects.
6 |
7 | Key Features
8 | ------------
9 | - **Extends Random Number Generators**
10 | Lets the user instance as many rng's as desired, with either
11 | Numpy or the Python standard library.
12 |
13 | Module Structure
14 | ----------------
15 |
16 | - `NumpyRNG`: Class that generates multiple numpy random number generators.
17 |
18 | - `PythonRNG`: Class that generates multiple python random number generators.
19 |
20 |
21 | Examples
22 | --------
23 | Generate 3 rng's with different seeds, and get a random number from them:
24 |
25 | >>> from deeptrack.sources import rng
26 |
27 | >>> python_rng = rng.PythonRNG(n_states=3, seed=123)
28 | >>> for i, generator in enumerate(python_rng._generate_states()):
29 | >>> print(f"RNG {i}: Random Number -> {generator.randint(0, 100)}")
30 |
31 | """
32 |
33 | import random
34 | from typing import Any, List, Callable
35 |
36 | import numpy as np
37 |
38 | from deeptrack.sources.base import Source
39 | from deeptrack.backend.core import DeepTrackNode
40 |
41 |
42 | class NumpyRNG(Source, np.random.RandomState):
43 | """Class that generates multiple numpy random number generators.
44 |
45 | It is used for creating multiple rng's with different seeds.
46 |
47 | Parameters
48 | ----------
49 | n_states: int
50 | The number of random number generators to create.
51 |
52 | seed: int, optional
53 | The seed used to initialize the first random generator.
54 | If not provided, a random seed will be generated automatically using
55 | `np.random.randint()`.
56 |
57 | Attributes
58 | ----------
59 | rng: list of numpy.Random
60 | A list of `numpy.Random` objects, each seeded with a unique value.
61 |
62 | Methods
63 | -------
64 | _generate_states(): list[np.random.RandomState]
65 | Generates and returns a list of independent `numpy.Random` objects.
66 |
67 | reset(): None
68 | Resets the list of random number generators with new seeds.
69 |
70 | __getattribute__(__name): Any
71 | Custom attribute access to allow lazy evaluation
72 | of random number generator methods.
73 |
74 | _create_lazy_callback(__name): callable
75 | Creates a lazy callback for accessing methods
76 | from the `numpy.Random` objects.
77 |
78 | set_index(index): self
79 | Sets the current index and resets the random number generators.
80 | """
81 |
82 | rng: list
83 |
84 | def __init__(
85 | self,
86 | n_states,
87 | seed=None
88 | ) -> None:
89 | self._n_states = n_states
90 |
91 | if seed is None:
92 | seed = np.random.randint(0, 2**31)
93 | self._seed = seed
94 |
95 | states = self._generate_states()
96 |
97 | super().__init__(rng=states)
98 |
99 | def _generate_states(
100 | self
101 | ) -> List[np.random.RandomState]:
102 |
103 | n_states = self._n_states
104 | seed = self._seed
105 |
106 | seed_generator = np.random.RandomState(seed)
107 | return [np.random.RandomState(
108 | seed_generator.randint(0, 2**31)
109 | ) for _ in range(n_states)]
110 |
111 | def reset(
112 | self
113 | ) -> None:
114 | self._dict["rng"] = self._generate_states()
115 |
116 |
117 | def __getattribute__(
118 | self,
119 | __name: str
120 | ) -> Any:
121 | if hasattr(
122 | np.random.RandomState, __name) and not __name.startswith("_"):
123 | return self._create_lazy_callback(__name)
124 | return super().__getattribute__(__name)
125 |
126 | def _create_lazy_callback(
127 | self,
128 | __name: str
129 | ) -> Callable[[DeepTrackNode], DeepTrackNode]:
130 | def lazy_callback(
131 | *args,
132 | **kwargs
133 | ) -> DeepTrackNode:
134 | node = DeepTrackNode(
135 | lambda: getattr(
136 | self._dict["rng"][self._current_index()], __name)\
137 | (*args, **kwargs))
138 | node.add_dependency(self._current_index)
139 | self._current_index.add_child(node)
140 | return node
141 | return lazy_callback
142 |
143 |
144 | def set_index(
145 | self,
146 | index
147 | ) -> Callable:
148 | self.reset()
149 | return super().set_index(index)
150 |
151 |
152 | class PythonRNG(Source, random.Random):
153 | """Class that generates multiple random.Random number generators.
154 |
155 | It is used for creating multiple rng's with different seeds.
156 |
157 | Parameters
158 | ----------
159 | n_states: int
160 | The number of random number generators to create.
161 |
162 | seed: int, optional
163 | The seed used to initialize the first random generator.
164 | If not provided, a random seed will be generated automatically
165 | using `random.Random.randint()`.
166 |
167 | Attributes
168 | ----------
169 | rng: list of random.Random
170 | A list of `random.Random` objects, each seeded with a unique value.
171 |
172 | Methods
173 | -------
174 | _generate_states(): list[random.Random]
175 | Generates and returns a list of independent `random.Random` objects.
176 |
177 | reset(): None
178 | Resets the list of random number generators with new seeds.
179 |
180 | __getattribute__(__name): Any
181 | Custom attribute access to allow lazy evaluation
182 | of random number generator methods.
183 |
184 | _create_lazy_callback(__name): callable
185 | Creates a lazy callback for accessing methods
186 | from the `random.Random` objects.
187 |
188 | set_index(index): self
189 | Sets the current index and resets the random number generators.
190 | """
191 |
192 |
193 | rng: list
194 |
195 | def __init__(
196 | self,
197 | n_states,
198 | seed=None
199 | ) -> None:
200 | self._n_states = n_states
201 |
202 | if seed is None:
203 | seed = np.random.randint(0, 2**31)
204 | self._seed = seed
205 |
206 | states = self._generate_states()
207 |
208 | super().__init__(rng=states)
209 |
210 | def _generate_states(
211 | self
212 | ) -> List[random.Random]:
213 |
214 | n_states = self._n_states
215 | seed = self._seed
216 |
217 | seed_generator = random.Random(seed)
218 | return [random.Random(
219 | seed_generator.randint(0, 2**31)
220 | ) for _ in range(n_states)]
221 |
222 | def reset(
223 | self
224 | ) -> None:
225 | self._dict["rng"] = self._generate_states()
226 |
227 |
228 | def __getattribute__(
229 | self,
230 | __name: str
231 | ) -> Any:
232 | if hasattr(
233 | np.random.RandomState, __name) and not __name.startswith("_"):
234 | return self._create_lazy_callback(__name)
235 | return super().__getattribute__(__name)
236 |
237 | def _create_lazy_callback(
238 | self,
239 | __name: str
240 | ) -> Callable[[DeepTrackNode], DeepTrackNode]:
241 | def lazy_callback(
242 | *args,
243 | **kwargs
244 | ) -> DeepTrackNode:
245 | node = DeepTrackNode(
246 | lambda: getattr(
247 | self._dict["rng"][self._current_index()], __name)\
248 | (*args, **kwargs)
249 | )
250 | node.add_dependency(self._current_index)
251 | self._current_index.add_child(node)
252 | return node
253 | return lazy_callback
254 |
255 |
256 | def set_index(
257 | self,
258 | index
259 | ) -> Callable:
260 | self.reset()
261 | return super().set_index(index)
262 |
--------------------------------------------------------------------------------
/deeptrack/tests/__init__.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import unittest
3 |
4 | from deeptrack.backend import config
5 |
6 |
7 | __all__ = ["BackendTestBase"]
8 |
9 |
10 | class BackendTestBase(unittest.TestCase):
11 | BACKEND = None
12 |
13 | @classmethod
14 | def setUpClass(cls):
15 | if cls.BACKEND is None:
16 | raise ValueError("BACKEND not set")
17 | config.set_backend(cls.BACKEND)
18 |
--------------------------------------------------------------------------------
/deeptrack/tests/backend/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/deeptrack/tests/backend/test_mie.py:
--------------------------------------------------------------------------------
1 | # pylint: disable=C0115:missing-class-docstring
2 | # pylint: disable=C0116:missing-function-docstring
3 | # pylint: disable=C0103:invalid-name
4 |
5 | # Use this only when running the test locally.
6 | # import sys
7 | # sys.path.append(".") # Adds the module to path.
8 |
9 | import unittest
10 |
11 | import numpy as np
12 |
13 | from deeptrack.backend import mie
14 |
15 |
16 | class TestMie(unittest.TestCase):
17 |
18 | def test_coefficients(self):
19 | m = 1.5 + 0.01j
20 | a = 0.5
21 | L = 5
22 | A, B = mie.coefficients(m, a, L)
23 |
24 | # Check the shape of the coefficients.
25 | self.assertEqual(A.shape, (L,))
26 | self.assertEqual(B.shape, (L,))
27 |
28 | # Check the type of the coefficients.
29 | self.assertIsInstance(A, np.ndarray)
30 | self.assertIsInstance(B, np.ndarray)
31 |
32 | # Check against test values from Sergio Aragon's Mie Scattering
33 | # in Mathematica.
34 | m = 4.0 / 3.0
35 | a = 50
36 | L = 1
37 | A, B = mie.coefficients(m, a, L)
38 | self.assertAlmostEqual(A.real, 0.5311058892948411929, delta=1e-8)
39 | self.assertAlmostEqual(A.imag, -0.4990314856310943073, delta=1e-8)
40 | self.assertAlmostEqual(B.real, 0.7919244759352004773, delta=1e-8)
41 | self.assertAlmostEqual(B.imag, -0.4059311522289938238, delta=1e-8)
42 |
43 | m = 1.5 + 1j
44 | a = 2
45 | L = 1
46 | A, B = mie.coefficients(m, a, L)
47 | self.assertAlmostEqual(A.real, 0.5465202033970914511, delta=1e-8)
48 | self.assertAlmostEqual(A.imag, -0.1523738572575972279, delta=1e-8)
49 | self.assertAlmostEqual(B.real, 0.3897147278879423235, delta=1e-8)
50 | self.assertAlmostEqual(B.imag, 0.2278960752564908264, delta=1e-8)
51 |
52 | m = 1.1 + 25j
53 | a = 2
54 | L = 2
55 | A, B = mie.coefficients(m, a, L)
56 | self.assertAlmostEqual(A[1].real, 0.324433578437, delta=1e-8)
57 | self.assertAlmostEqual(A[1].imag, -0.465627763266, delta=1e-8)
58 | self.assertAlmostEqual(B[1].real, 0.060464399088, delta=1e-8)
59 | self.assertAlmostEqual(B[1].imag, 0.236805417045, delta=1e-8)
60 |
61 |
62 | def test_stratified_coefficients(self):
63 | m = [1.5 + 0.01j, 1.2 + 0.02j]
64 | a = [0.5, 0.3]
65 | L = 5
66 | an, bn = mie.stratified_coefficients(m, a, L)
67 |
68 | # Check the shape of the coefficients.
69 | self.assertEqual(an.shape, (L,))
70 | self.assertEqual(bn.shape, (L,))
71 |
72 | # Check the type of the coefficients.
73 | self.assertIsInstance(an, np.ndarray)
74 | self.assertIsInstance(bn, np.ndarray)
75 |
76 |
77 | def test_harmonics(self):
78 | x = np.linspace(-1, 1, 100)
79 | L = 5
80 | PI, TAU = mie.harmonics(x, L)
81 |
82 | # Check the shape of the harmonics.
83 | self.assertEqual(PI.shape, (L, 100))
84 | self.assertEqual(TAU.shape, (L, 100))
85 |
86 | # Check the type of the harmonics.
87 | self.assertIsInstance(PI, np.ndarray)
88 | self.assertIsInstance(TAU, np.ndarray)
89 |
90 | # Check against test values.
91 | x = np.array([0.4])
92 | L = 4
93 | PI_expected = np.array([[1], [1.2], [-0.3],[-1.88]])
94 | TAU_expected = np.array([[0.4], [-2.04], [-5.16],[-1.508]])
95 | PI, TAU = mie.harmonics(x, L)
96 | self.assertTrue(np.allclose(PI, PI_expected))
97 | self.assertTrue(np.allclose(TAU, TAU_expected))
98 |
99 | x = np.array([0])
100 | L = 5
101 | PI_expected = np.array([[1], [0], [-1.5], [0], [1.875]])
102 | TAU_expected = np.array([[0], [-3],[0],[7.5],[0]])
103 | PI, TAU = mie.harmonics(x, L)
104 | self.assertTrue(np.allclose(PI, PI_expected))
105 | self.assertTrue(np.allclose(TAU, TAU_expected))
106 |
107 | x = np.array([-0.5])
108 | L = 3
109 | PI_expected = np.array([[1], [-1.5], [0.375]])
110 | TAU_expected = np.array([[-0.5], [-1.5], [5.4375]])
111 | PI, TAU = mie.harmonics(x, L)
112 | self.assertTrue(np.allclose(PI, PI_expected))
113 | self.assertTrue(np.allclose(TAU, TAU_expected))
114 |
115 |
116 | if __name__ == "__main__":
117 | unittest.main()
118 |
--------------------------------------------------------------------------------
/deeptrack/tests/backend/test_polynomials.py:
--------------------------------------------------------------------------------
1 | # pylint: disable=C0115:missing-class-docstring
2 | # pylint: disable=C0116:missing-function-docstring
3 | # pylint: disable=C0103:invalid-name
4 |
5 | # Use this only when running the test locally.
6 | # import sys
7 | # sys.path.append(".") # Adds the module to path.
8 |
9 | import unittest
10 |
11 | from deeptrack.backend import polynomials
12 |
13 |
14 | class TestPolynomials(unittest.TestCase):
15 |
16 | def test_Besselj(self):
17 | Besselj = polynomials.besselj
18 |
19 | self.assertEqual(Besselj(0, 0), 1)
20 | self.assertEqual(Besselj(1, 0), 0)
21 |
22 | self.assertEqual(Besselj(1, 5), -Besselj(-1, 5))
23 | self.assertTrue(Besselj(1, 5) < 0)
24 |
25 | def test_dBesselj(self):
26 | dBesselj = polynomials.dbesselj
27 |
28 | self.assertEqual(dBesselj(1, 0), 0.5)
29 |
30 | def test_Bessely(self):
31 | Bessely = polynomials.bessely
32 |
33 | self.assertEqual(Bessely(1, 3), -Bessely(-1, 3))
34 | self.assertTrue(Bessely(1, 3) > 0)
35 |
36 | def test_dBessely(self):
37 | dBessely = polynomials.dbessely
38 |
39 | self.assertEqual(dBessely(1, 3), -dBessely(-1, 3))
40 | self.assertTrue(dBessely(1, 3) > 0)
41 |
42 | def test_RicBesj(self):
43 | Ricbesj = polynomials.ricbesj
44 |
45 | self.assertEqual(Ricbesj(4, 0), 0)
46 | self.assertTrue(abs(Ricbesj(1, 8) - 0.2691) < 1e-3)
47 |
48 | def test_dRicBesj(self):
49 | dRicBesj = polynomials.dricbesj
50 |
51 | self.assertTrue(abs(dRicBesj(1, 1) - 0.5403) < 1e-3)
52 |
53 | def test_RicBesy(self):
54 | RicBesy = polynomials.ricbesy
55 |
56 | self.assertTrue(abs(RicBesy(2, 3) - 0.8011) < 1e-3)
57 |
58 | def test_dRicBesy(self):
59 | dRicBesy = polynomials.dricbesy
60 |
61 | self.assertTrue(abs(dRicBesy(1, 1) + 0.8414) < 1e-3)
62 |
63 | def test_RicBesh(self):
64 | RicBesh = polynomials.ricbesh
65 |
66 | self.assertTrue(abs(RicBesh(3, 2) - (0.1214 - 2.968j)) < 1e-3)
67 |
68 | def test_dRicBesh(self):
69 | dRicBesh = polynomials.dricbesh
70 |
71 | self.assertTrue(abs(dRicBesh(2, 6) - (-0.9321 - 0.2206j)) < 1e-3)
72 |
--------------------------------------------------------------------------------
/deeptrack/tests/extras/__init__.py:
--------------------------------------------------------------------------------
1 | #from .test_radialcenter import *
2 |
--------------------------------------------------------------------------------
/deeptrack/tests/extras/test_radialcenter.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import numpy as np
4 |
5 | from deeptrack.extras import radialcenter
6 |
7 |
8 | class TestRadialCenter(unittest.TestCase):
9 |
10 | def test_noise(self):
11 | intensity_map = np.random.normal(0, 0.005, (100, 100))
12 | x, y = radialcenter(intensity_map)
13 | self.assertIsInstance(x, float)
14 | self.assertIsInstance(y, float)
15 | self.assertAlmostEqual(x, 50.0,delta=5)
16 | self.assertAlmostEqual(y, 50.0,delta=5)
17 |
18 |
19 | if __name__ == "__main__":
20 | unittest.main()
21 |
--------------------------------------------------------------------------------
/deeptrack/tests/test_augmentations.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | # sys.path.append(".") # Adds the module to path
4 |
5 | import unittest
6 |
7 | import numpy as np
8 |
9 | from deeptrack import augmentations, optics, scatterers
10 | from deeptrack.features import Feature
11 |
12 |
13 | class TestAugmentations(unittest.TestCase):
14 | class DummyFeature(Feature):
15 | __distributed__ = False
16 |
17 | def get(self, image, **kwargs):
18 | output = np.array([[1, 2],
19 | [0, 0]])
20 | return output
21 |
22 | def test_FlipLR(self):
23 | feature = self.DummyFeature()
24 | augmented_feature = feature >> augmentations.FlipLR(p=1.0)
25 | output = augmented_feature.resolve()
26 | self.assertTrue(np.all(output == np.array([[2, 1], [0, 0]])))
27 |
28 | def test_FlipUD(self):
29 | feature = self.DummyFeature()
30 | augmented_feature = feature >> augmentations.FlipUD(p=1.0)
31 | output = augmented_feature.resolve()
32 | self.assertTrue(np.all(output == np.array([[0, 0], [1, 2]])))
33 |
34 | def test_FlipDiagonal(self):
35 | feature = self.DummyFeature()
36 | augmented_feature = feature >> augmentations.FlipDiagonal(p=1.0)
37 | output = augmented_feature.resolve()
38 | self.assertTrue(np.all(output == np.array([[1, 0], [2, 0]])))
39 |
40 | def test_Affine(self):
41 | opt = optics.Fluorescence(magnification=10)
42 | particle = scatterers.PointParticle(
43 | position=lambda image_size: np.random.rand(2) * image_size[-2:],
44 | image_size=opt.output_region,
45 | )
46 |
47 | augmentation = augmentations.Affine(
48 | scale=lambda: 0.25 + np.random.rand(2) * 0.25,
49 | rotation=lambda: np.random.rand() * np.pi * 2,
50 | shear=lambda: np.random.rand() * np.pi / 2 - np.pi / 4,
51 | translate=lambda: np.random.rand(2) * 20 - 10,
52 | mode="constant",
53 | )
54 |
55 | pipe = opt(particle) >> augmentation
56 | pipe.store_properties(True)
57 |
58 | for _ in range(10):
59 | image = pipe.update().resolve()
60 | pmax = np.unravel_index(
61 | np.argmax(image[:, :, 0], axis=None),
62 | shape=image[:, :, 0].shape
63 | )
64 |
65 | dist = np.sum(
66 | np.abs(np.array(image.get_property("position"))- pmax)
67 | )
68 |
69 | self.assertLess(dist, 3)
70 |
71 | def test_ElasticTransformation(self):
72 | np.random.seed(1000)
73 | import random
74 | random.seed(1000)
75 | # 3D input
76 |
77 | im = np.zeros((10, 8, 2))
78 | transformer = augmentations.ElasticTransformation(
79 | alpha=20,
80 | sigma=2,
81 | ignore_last_dim=True,
82 | order=1,
83 | mode="reflect",
84 | )
85 |
86 | im[:, :, 0] = 1
87 |
88 | out_1 = transformer.update().resolve(im)
89 | self.assertIsNone(np.testing.assert_allclose(out_1, im))
90 |
91 | im[:, :, :] = 0
92 | im[0, :, :] = 1
93 | out_2 = transformer.update().resolve(im)
94 | self.assertIsNone(
95 | np.testing.assert_allclose(out_2[:, :, 0], out_2[:, :, 1])
96 | )
97 |
98 | transformer.ignore_last_dim.set_value(False)
99 | out_3 = transformer.resolve(im)
100 | self.assertRaises(
101 | AssertionError,
102 | lambda: np.testing.assert_allclose(out_3[:, :, 0], out_3[:, :, 1]),
103 | )
104 |
105 | # 2D input
106 | im = np.zeros((10, 8))
107 | transformer = augmentations.ElasticTransformation(
108 | alpha=20,
109 | sigma=2,
110 | ignore_last_dim=False,
111 | order=1,
112 | mode="reflect",
113 | )
114 |
115 | out_1 = transformer.update().resolve(im)
116 |
117 | def test_Crop(self):
118 | image = np.ones((10, 10, 10))
119 |
120 | cropper = augmentations.Crop(crop=(3, 2, 1), crop_mode="remove")
121 | out = cropper.update().resolve(image)
122 | self.assertSequenceEqual(out.shape, (7, 8, 9))
123 |
124 | cropper = augmentations.Crop(crop=(3, 2, 1), crop_mode="retain")
125 | out = cropper.update().resolve(image)
126 | self.assertSequenceEqual(out.shape, (3, 2, 1))
127 |
128 | cropper = augmentations.Crop(crop=2, crop_mode="remove")
129 | out = cropper.update().resolve(image)
130 | self.assertSequenceEqual(out.shape, (8, 8, 8))
131 |
132 | cropper = augmentations.Crop(crop=2, crop_mode="retain")
133 | out = cropper.update().resolve(image)
134 | self.assertSequenceEqual(out.shape, (2, 2, 2))
135 |
136 | cropper = augmentations.Crop(crop=12, crop_mode="remove")
137 | out = cropper.update().resolve(image)
138 | self.assertSequenceEqual(out.shape, (1, 1, 1))
139 |
140 | cropper = augmentations.Crop(crop=0, crop_mode="retain")
141 | out = cropper.update().resolve(image)
142 | self.assertSequenceEqual(out.shape, (1, 1, 1))
143 |
144 | def test_CropToMultiple(self):
145 | image = np.ones((11, 11, 11))
146 |
147 | cropper = augmentations.CropToMultiplesOf(multiple=2)
148 | out = cropper.update().resolve(image)
149 | self.assertSequenceEqual(out.shape, (10, 10, 10))
150 |
151 | cropper = augmentations.CropToMultiplesOf(multiple=-1)
152 | out = cropper.update().resolve(image)
153 | self.assertSequenceEqual(out.shape, (11, 11, 11))
154 |
155 | cropper = augmentations.CropToMultiplesOf(multiple=(2, 3, 5))
156 | out = cropper.update().resolve(image)
157 | self.assertSequenceEqual(out.shape, (10, 9, 10))
158 |
159 | cropper = augmentations.CropToMultiplesOf(multiple=(2, -1, 7))
160 | out = cropper.update().resolve(image)
161 | self.assertSequenceEqual(out.shape, (10, 11, 7))
162 |
163 | cropper = augmentations.CropToMultiplesOf(multiple=(2, 3, None))
164 | out = cropper.update().resolve(image)
165 | self.assertSequenceEqual(out.shape, (10, 9, 11))
166 |
167 | def test_Pad(self):
168 | image = np.ones((10, 10, 10))
169 |
170 | padder = augmentations.Pad(px=(2, 0, 2, 0, 0, 0), mode="constant")
171 | out = padder.update().resolve(image)
172 | self.assertSequenceEqual(out.shape, (12, 12, 10))
173 |
174 | padder = augmentations.Pad(px=(2, 2, 2, 0, 0, 0), mode="constant")
175 | out = padder.update().resolve(image)
176 | self.assertSequenceEqual(out.shape, (14, 12, 10))
177 |
178 | padder = augmentations.Pad(px=(2, 2, 2, 2, 0, 0), mode="constant")
179 | out = padder.update().resolve(image)
180 | self.assertSequenceEqual(out.shape, (14, 14, 10))
181 |
182 | padder = augmentations.Pad(px=(2, 2, 2, 2, 2, 0), mode="constant")
183 | out = padder.update().resolve(image)
184 | self.assertSequenceEqual(out.shape, (14, 14, 12))
185 |
186 | padder = augmentations.Pad(px=(2, 2, 2, 2, 2, 2), mode="constant")
187 | out = padder.update().resolve(image)
188 | self.assertSequenceEqual(out.shape, (14, 14, 14))
189 |
190 | if __name__ == "__main__":
191 | unittest.main()
--------------------------------------------------------------------------------
/deeptrack/tests/test_elementwise.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | # sys.path.append(".") # Adds the module to path
4 |
5 | import unittest
6 | import operator
7 | import itertools
8 | from numpy.core.numeric import array_equal
9 |
10 | from numpy.testing._private.utils import assert_almost_equal
11 |
12 | from deeptrack import elementwise, features, Image
13 |
14 | import numpy as np
15 |
16 | import numpy.testing
17 | import inspect
18 |
19 |
20 | def grid_test_features(
21 | tester,
22 | feature,
23 | feature_inputs,
24 | expected_result_function,
25 | ):
26 |
27 | for f_a_input in feature_inputs:
28 |
29 | inp = features.Value(f_a_input)
30 |
31 | f_a = feature(inp)
32 | f_b = inp >> feature()
33 |
34 | for f in [f_a, f_b]:
35 | try:
36 | output = f()
37 | except Exception as e:
38 | tester.assertRaises(
39 | type(e),
40 | lambda: expected_result_function(f_a_input),
41 | )
42 | continue
43 |
44 | expected_result = expected_result_function(f_a_input)
45 | output = np.array(output)
46 | try:
47 | expected_result = np.array(expected_result)
48 | except TypeError:
49 | expected_result = expected_result.get()
50 |
51 | if isinstance(output, list) and isinstance(expected_result, list):
52 | [
53 | np.testing.assert_almost_equal(np.array(a), np.array(b))
54 | for a, b in zip(output, expected_result)
55 | ]
56 |
57 | else:
58 | is_equal = np.allclose(
59 | np.array(output), np.array(expected_result), equal_nan=True
60 | )
61 |
62 | tester.assertFalse(
63 | not is_equal,
64 | "Feature output {} is not equal to expect result {}.\n Using arguments {}".format(
65 | output, expected_result, f_a_input
66 | ),
67 | )
68 |
69 |
70 | def create_test(cl):
71 | testname = "test_{}".format(cl.__name__)
72 |
73 | def test(self):
74 | grid_test_features(
75 | self,
76 | cl,
77 | [
78 | -1,
79 | 0,
80 | 1,
81 | (np.random.rand(50, 500) - 0.5) * 100,
82 | ],
83 | np.__dict__[cl.__name__.lower()],
84 | )
85 |
86 | test.__name__ = testname
87 |
88 | return testname, test
89 |
90 |
91 | class TestFeatures(unittest.TestCase):
92 | pass
93 |
94 |
95 | classes = inspect.getmembers(elementwise, inspect.isclass)
96 |
97 | for clname, cl in classes:
98 |
99 | if not issubclass(cl, elementwise.ElementwiseFeature) or (
100 | cl is elementwise.ElementwiseFeature
101 | ):
102 | continue
103 |
104 | testname, test_method = create_test(cl)
105 | setattr(TestFeatures, testname, test_method)
106 |
107 |
108 | if __name__ == "__main__":
109 | unittest.main()
110 |
--------------------------------------------------------------------------------
/deeptrack/tests/test_holography.py:
--------------------------------------------------------------------------------
1 | # Use this only when running the test locally.
2 | # import sys
3 | # sys.path.append(".") # Adds the module to path.
4 |
5 | import unittest
6 |
7 | import numpy as np
8 |
9 | from deeptrack import holography
10 |
11 | class TestOpticalFieldFunctions(unittest.TestCase):
12 |
13 | def test_get_propagation_matrix(self):
14 | propagation_matrix = holography.get_propagation_matrix(
15 | shape=(128, 128),
16 | to_z=1.0,
17 | pixel_size=0.1,
18 | wavelength=0.65e-6,
19 | dx=0,
20 | dy=0
21 | )
22 | self.assertEqual(propagation_matrix.shape, (128, 128))
23 | self.assertTrue(np.iscomplexobj(propagation_matrix))
24 |
25 | def test_rescale(self):
26 | rescale_factor = 0.5
27 | image = np.random.rand(128, 128, 2)
28 | rescale_op = holography.Rescale(rescale=rescale_factor)
29 | scaled_image = rescale_op(image)
30 | mean_value = (image[..., 0].mean() - 1) * rescale_factor + 1
31 | self.assertAlmostEqual(scaled_image[..., 0].mean(), mean_value)
32 | expected_image = image[..., 1] * rescale_factor
33 | self.assertTrue(np.allclose(scaled_image[..., 1], expected_image))
34 |
35 | def test_fourier_transform(self):
36 | image = np.random.rand(128, 128, 2)
37 | ft_op = holography.FourierTransform()
38 | transformed_image = ft_op(image)
39 | self.assertTrue(np.iscomplexobj(transformed_image))
40 | self.assertEqual(transformed_image.shape, (192, 192)) # 128+2*32 = 192
41 |
42 | def test_inverse_fourier_transform(self):
43 | image = np.random.rand(128, 128, 2)
44 | ft_op = holography.FourierTransform()
45 | transformed_image = ft_op(image)
46 | ift_op = holography.InverseFourierTransform()
47 | reconstr_image = ift_op(transformed_image)
48 | self.assertTrue(np.allclose(image, reconstr_image, atol=1e-5))
49 |
50 |
51 | if __name__ == '__main__':
52 | unittest.main()
53 |
--------------------------------------------------------------------------------
/deeptrack/tests/test_math.py:
--------------------------------------------------------------------------------
1 | # Use this only when running the test locally.
2 | # import sys
3 | # sys.path.append(".") # Adds the module to path.
4 |
5 | import unittest
6 |
7 | import numpy as np
8 | from scipy.ndimage import uniform_filter
9 |
10 | from deeptrack import math
11 | from deeptrack.backend import OPENCV_AVAILABLE, TORCH_AVAILABLE, xp
12 | from deeptrack.tests import BackendTestBase
13 |
14 |
15 | class TestMathNumpy(BackendTestBase):
16 | BACKEND = "numpy"
17 |
18 | def test_Average(self):
19 | expected_shape = (10, 30, 20)
20 | input_image0 = xp.ones((10, 30, 20)) * 2
21 | input_image1 = xp.ones((10, 30, 20)) * 4
22 | feature = math.Average(axis=0)
23 | average = feature.resolve([input_image0, input_image1])
24 | self.assertTrue(xp.all(average == 3), True)
25 | self.assertEqual(average.shape, expected_shape)
26 |
27 | def test_Clip(self):
28 | input_image = xp.asarray(np.array([[10, 4], [4, -10]]))
29 | expected_output_1 = xp.asarray(np.array([[5, 4], [4, -5]]))
30 | expected_output_2 = xp.asarray(np.array([[5, 6], [7, 8]]))
31 |
32 | feature = math.Clip(min=-5, max=5)
33 | clipped_feature = feature.resolve(input_image)
34 | self.assertTrue(xp.all(clipped_feature == expected_output_1))
35 |
36 | input_image = xp.asarray(np.array([[5, 6], [7, 8]]))
37 | feature = math.Clip(min=0, max=10)
38 | clipped_feature = feature.resolve(input_image)
39 | self.assertTrue(xp.all(clipped_feature == expected_output_2))
40 |
41 | def test_NormalizeMinMax(self):
42 | input_image = xp.asarray(np.array([[10, 4], [4, -10]]))
43 | expected_output = xp.asarray(np.array([[5, 2], [2, -5]]))
44 |
45 | feature = math.NormalizeMinMax(min=-5, max=5)
46 | normalized_image = feature.resolve(input_image)
47 | self.assertTrue(xp.all(normalized_image == expected_output))
48 |
49 | def test_NormalizeStandard(self):
50 | feature = math.NormalizeStandard()
51 | input_image = xp.asarray(np.array([[1, 2], [3, 4]], dtype=float))
52 | normalized_image = feature.resolve(input_image)
53 | self.assertEqual(xp.mean(normalized_image), 0)
54 | self.assertEqual(xp.std(normalized_image), 1)
55 |
56 | def test_Blur(self):
57 | # TODO: check this test with torch
58 | pass
59 | #input_image = xp.asarray(np.array([[1, 2], [3, 4]], dtype=float))
60 | #expected_output = xp.asarray(np.array([[1, 1.5], [2, 2.5]]))
61 |
62 | #eature = math.Blur(filter_function=uniform_filter, size=2)
63 | #blurred_image = feature.resolve(input_image)
64 | #self.assertTrue(xp.all(blurred_image == expected_output))
65 |
66 |
67 | # Extending the test and setting the backend to torch
68 | @unittest.skipUnless(TORCH_AVAILABLE, "PyTorch is not installed.")
69 | class TestMathTorch(TestMathNumpy):
70 | BACKEND = "torch"
71 | pass
72 |
73 |
74 | class TestMath(unittest.TestCase):
75 |
76 | def test_GaussianBlur(self):
77 | input_image = np.array([[1, 2], [3, 4]], dtype=float)
78 | feature = math.GaussianBlur(sigma=0)
79 | blurred_image = feature.resolve(input_image)
80 | self.assertTrue(np.all(blurred_image == [[1, 2], [3, 4]]))
81 |
82 | input_image = np.array([[1, 2], [3, 4]], dtype=float)
83 | feature = math.GaussianBlur(sigma=1000)
84 | blurred_image = feature.resolve(input_image)
85 | self.assertTrue(np.all(blurred_image - [[2.5, 2.5], [2.5, 2.5]] <= 0.01))
86 |
87 | def test_AveragePooling(self):
88 | input_image = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=float)
89 | feature = math.AveragePooling(ksize=2)
90 | pooled_image = feature.resolve(input_image)
91 | self.assertTrue(np.all(pooled_image == [[3.5, 5.5]]))
92 |
93 | def test_MaxPooling(self):
94 | input_image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
95 | feature = math.MaxPooling(ksize=2)
96 | pooled_image = feature.resolve(input_image)
97 | self.assertTrue(np.all(pooled_image == [[5, 6], [8, 9]]))
98 |
99 | def test_MinPooling(self):
100 | input_image = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
101 | feature = math.MinPooling(ksize=2)
102 | pooled_image = feature.resolve(input_image)
103 | self.assertTrue(np.all(pooled_image == [[1, 3]]))
104 |
105 | def test_NormalizeQuantile(self):
106 | input_image = np.array([[1, 2], [3, 100]], dtype=float)
107 | feature = math.NormalizeQuantile(quantiles=(0.25, 0.75))
108 | output = feature.resolve(input_image)
109 | self.assertAlmostEqual(np.median(output), 0, places=5)
110 |
111 | def test_MedianBlur(self):
112 | input_image = np.random.rand(32, 32)
113 | feature = math.MedianBlur(ksize=3)
114 | output = feature.resolve(input_image)
115 | self.assertEqual(output.shape, input_image.shape)
116 |
117 | def test_MedianPooling(self):
118 | input_image = np.array([[1, 3, 2, 4], [5, 7, 6, 8]], dtype=float)
119 | feature = math.MedianPooling(ksize=2)
120 | pooled = feature.resolve(input_image)
121 | self.assertEqual(pooled.shape, (1, 2))
122 |
123 | @unittest.skipUnless(OPENCV_AVAILABLE, "OpenCV is not installed.")
124 | def test_Resize(self):
125 | input_image = np.random.rand(16, 16)
126 | feature = math.Resize(dsize=(8, 8))
127 | resized = feature.resolve(input_image)
128 | self.assertEqual(resized.shape, (8, 8))
129 |
130 | @unittest.skipUnless(OPENCV_AVAILABLE, "OpenCV is not installed.")
131 | def test_BlurCV2_GaussianBlur(self):
132 | import cv2
133 |
134 | input_image = np.random.rand(32, 32).astype(np.float32)
135 | expected_output = cv2.GaussianBlur(
136 | input_image, ksize=(5, 5), sigmaX=1, borderType=cv2.BORDER_REFLECT
137 | )
138 | feature = math.BlurCV2(
139 | filter_function=cv2.GaussianBlur, ksize=(5, 5), sigmaX=1, mode="reflect"
140 | )
141 | output_image = feature.resolve(input_image)
142 | self.assertTrue(output_image.shape == expected_output.shape)
143 | self.assertIsNone(
144 | np.testing.assert_allclose(
145 | output_image,
146 | expected_output,
147 | rtol=1e-5,
148 | atol=1e-6,
149 | )
150 | )
151 |
152 | @unittest.skipUnless(OPENCV_AVAILABLE, "OpenCV is not installed.")
153 | def test_BlurCV2_bilateralFilter(self):
154 | import cv2
155 |
156 | input_image = np.random.rand(32, 32).astype(np.float32)
157 | expected_output = cv2.bilateralFilter(
158 | input_image,
159 | d=9,
160 | sigmaColor=75,
161 | sigmaSpace=75,
162 | borderType=cv2.BORDER_REFLECT,
163 | )
164 | feature = math.BlurCV2(
165 | filter_function=cv2.bilateralFilter,
166 | d=9,
167 | sigmaColor=75,
168 | sigmaSpace=75,
169 | mode="reflect",
170 | )
171 | output_image = feature.resolve(input_image)
172 | self.assertTrue(output_image.shape == expected_output.shape)
173 | self.assertIsNone(
174 | np.testing.assert_allclose(
175 | output_image,
176 | expected_output,
177 | rtol=1e-5,
178 | atol=1e-6,
179 | )
180 | )
181 |
182 | @unittest.skipUnless(OPENCV_AVAILABLE, "OpenCV is not installed.")
183 | def test_BilateralBlur(self):
184 | import cv2
185 |
186 | input_image = np.random.rand(32, 32).astype(np.float32)
187 | expected_output = cv2.bilateralFilter(
188 | input_image,
189 | d=9,
190 | sigmaColor=75,
191 | sigmaSpace=75,
192 | borderType=cv2.BORDER_REFLECT,
193 | )
194 | feature = math.BilateralBlur(
195 | d=9, sigma_color=75, sigma_space=75, mode="reflect"
196 | )
197 | output_image = feature.resolve(input_image)
198 | self.assertTrue(output_image.shape == expected_output.shape)
199 | self.assertIsNone(
200 | np.testing.assert_allclose(
201 | output_image,
202 | expected_output,
203 | rtol=1e-5,
204 | atol=1e-6,
205 | )
206 | )
207 |
208 |
209 | if __name__ == "__main__":
210 | unittest.main()
211 |
--------------------------------------------------------------------------------
/deeptrack/tests/test_noises.py:
--------------------------------------------------------------------------------
1 | # Use this only when running the test locally.
2 | # import sys
3 | # sys.path.append(".") # Adds the module to path.
4 |
5 | import unittest
6 |
7 | import numpy as np
8 |
9 | from deeptrack.image import Image
10 | from deeptrack import noises
11 |
12 |
13 | class TestNoises(unittest.TestCase):
14 | def test_Offset(self):
15 | noise = noises.Offset(offset=0.5)
16 | input_image = Image(np.zeros((256, 256)))
17 | output_image = noise.resolve(input_image)
18 | self.assertIsInstance(output_image, np.ndarray)
19 | self.assertEqual(output_image.shape, (256, 256))
20 | self.assertTrue(np.all(np.array(output_image) == 0.5))
21 |
22 | def test_Background(self):
23 | noise = noises.Background(offset=0.5)
24 | input_image = Image(np.zeros((256, 256)))
25 | output_image = noise.resolve(input_image)
26 | self.assertIsInstance(output_image, np.ndarray)
27 | self.assertEqual(output_image.shape, (256, 256))
28 | self.assertTrue(np.all(np.array(output_image) == 0.5))
29 |
30 | def test_Gaussian(self):
31 | noise = noises.Gaussian(mu=0.1, sigma=0.05)
32 | input_image = Image(np.zeros((256, 256)))
33 | output_image = noise.resolve(input_image)
34 | self.assertIsInstance(output_image, np.ndarray)
35 | self.assertEqual(output_image.shape, (256, 256))
36 |
37 | def test_ComplexGaussian(self):
38 | noise = noises.ComplexGaussian(mu=0.1, sigma=0.05)
39 | input_image = Image(np.zeros((256, 256)))
40 | output_image = noise.resolve(input_image)
41 | self.assertIsInstance(output_image, np.ndarray)
42 | self.assertEqual(output_image.shape, (256, 256))
43 | self.assertTrue(np.iscomplexobj(output_image))
44 |
45 | def test_Poisson(self):
46 | noise = noises.Poisson(snr=20)
47 | input_image = Image(np.ones((256, 256)) * 0.1)
48 | output_image = noise.resolve(input_image)
49 | self.assertIsInstance(output_image, np.ndarray)
50 | self.assertEqual(output_image.shape, (256, 256))
51 |
52 |
53 | if __name__ == "__main__":
54 | unittest.main()
--------------------------------------------------------------------------------
/deeptrack/tests/test_optics.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | # sys.path.append(".") # Adds the module to path
4 |
5 | import unittest
6 |
7 | from deeptrack import features
8 | from deeptrack import units as u
9 | from deeptrack import optics
10 |
11 | from deeptrack.scatterers import PointParticle, Sphere
12 | from deeptrack.image import Image
13 |
14 |
15 | import numpy as np
16 |
17 |
18 | class TestOptics(unittest.TestCase):
19 | def test_Microscope(self):
20 | microscope_type = optics.Fluorescence()
21 | scatterer = PointParticle()
22 | microscope = optics.Microscope(
23 | sample=scatterer, objective=microscope_type,
24 | )
25 | output_image = microscope.get(None)
26 | self.assertIsInstance(output_image, np.ndarray)
27 | self.assertEqual(output_image.shape, (128, 128, 1))
28 |
29 | def test_Optics(self):
30 | microscope = optics.Optics()
31 | scatterer = PointParticle()
32 | image = microscope(scatterer)
33 | self.assertIsInstance(image, optics.Microscope)
34 |
35 | def test_Fluorescence(self):
36 | microscope = optics.Fluorescence(
37 | NA=0.7,
38 | wavelength=660e-9,
39 | resolution=1e-6,
40 | magnification=10,
41 | refractive_index_medium=1.33,
42 | upscale=2,
43 | padding=(10, 10, 10, 10),
44 | output_region=(0, 0, 64, 64),
45 | aberration=None,
46 | )
47 | scatterer = PointParticle(
48 | intensity=100, # Squared magnitude of the field.
49 | position_unit="pixel", # Units of position (default meter)
50 | position=(32, 32), # Position of the particle
51 | )
52 | imaged_scatterer = microscope(scatterer)
53 | output_image = imaged_scatterer.resolve()
54 | self.assertIsInstance(output_image, np.ndarray)
55 | self.assertEqual(microscope.NA(), 0.7)
56 | self.assertEqual(output_image.shape, (64, 64, 1))
57 |
58 | def test_Brightfield(self):
59 | microscope = optics.Brightfield(
60 | NA=0.7,
61 | wavelength=660e-9,
62 | resolution=1e-6,
63 | magnification=10,
64 | refractive_index_medium=1.33,
65 | upscale=2,
66 | output_region=(0, 0, 64, 64),
67 | padding=(10, 10, 10, 10),
68 | aberration=None,
69 | )
70 | scatterer = PointParticle(
71 | refractive_index=1.45 + 0.1j,
72 | position_unit="pixel",
73 | position=(32, 32),
74 | )
75 | imaged_scatterer = microscope(scatterer)
76 | output_image = imaged_scatterer.resolve()
77 | self.assertIsInstance(output_image, np.ndarray)
78 | self.assertEqual(output_image.shape, (64, 64, 1))
79 |
80 | def test_Holography(self):
81 | microscope = optics.Holography(
82 | NA=0.7,
83 | wavelength=660e-9,
84 | resolution=1e-6,
85 | magnification=10,
86 | refractive_index_medium=1.33,
87 | upscale=2,
88 | output_region=(0, 0, 64, 64),
89 | padding=(10, 10, 10, 10),
90 | aberration=None,
91 | )
92 | scatterer = PointParticle(
93 | refractive_index=1.45 + 0.1j,
94 | position_unit="pixel",
95 | position=(32, 32),
96 | )
97 | imaged_scatterer = microscope(scatterer)
98 | output_image = imaged_scatterer.resolve()
99 | self.assertIsInstance(output_image, np.ndarray)
100 | self.assertEqual(output_image.shape, (64, 64, 1))
101 |
102 | def test_ISCAT(self):
103 | microscope = optics.ISCAT(
104 | NA=0.7,
105 | wavelength=660e-9,
106 | resolution=1e-6,
107 | magnification=10,
108 | refractive_index_medium=1.33,
109 | upscale=2,
110 | output_region=(0, 0, 64, 64),
111 | padding=(10, 10, 10, 10),
112 | aberration=None,
113 | )
114 | scatterer = PointParticle(
115 | refractive_index=1.45 + 0.1j,
116 | position_unit="pixel",
117 | position=(32, 32),
118 | )
119 | imaged_scatterer = microscope(scatterer)
120 | output_image = imaged_scatterer.resolve()
121 | self.assertEqual(microscope.illumination_angle(), 3.141592653589793)
122 | self.assertIsInstance(output_image, np.ndarray)
123 | self.assertEqual(output_image.shape, (64, 64, 1))
124 |
125 | def test_Darkfield(self):
126 | microscope = optics.Darkfield(
127 | NA=0.7,
128 | wavelength=660e-9,
129 | resolution=1e-6,
130 | magnification=10,
131 | refractive_index_medium=1.33,
132 | upscale=2,
133 | output_region=(0, 0, 64, 64),
134 | padding=(10, 10, 10, 10),
135 | aberration=None,
136 | )
137 | scatterer = PointParticle(
138 | refractive_index=1.45 + 0.1j,
139 | position_unit="pixel",
140 | position=(32, 32),
141 | )
142 | imaged_scatterer = microscope(scatterer)
143 | output_image = imaged_scatterer.resolve()
144 | self.assertEqual(microscope.illumination_angle(), 1.5707963267948966)
145 | self.assertIsInstance(output_image, np.ndarray)
146 | self.assertEqual(output_image.shape, (64, 64, 1))
147 |
148 | def test_IlluminationGradient(self):
149 | illumination_gradient = optics.IlluminationGradient(gradient=(5e-5, 5e-5))
150 | microscope = optics.Brightfield(
151 | NA=0.7,
152 | wavelength=660e-9,
153 | resolution=1e-6,
154 | magnification=10,
155 | refractive_index_medium=1.33,
156 | upscale=2,
157 | output_region=(0, 0, 64, 64),
158 | padding=(10, 10, 10, 10),
159 | aberration=None,
160 | illumination=illumination_gradient,
161 | )
162 | scatterer = PointParticle(
163 | refractive_index=1.45 + 0.1j,
164 | position_unit="pixel",
165 | position=(32, 32),
166 | )
167 | imaged_scatterer = microscope(scatterer)
168 | output_image = imaged_scatterer.resolve()
169 | self.assertIsInstance(output_image, np.ndarray)
170 | self.assertEqual(output_image.shape, (64, 64, 1))
171 |
172 | def test_upscale_fluorescence(self):
173 | microscope = optics.Brightfield(
174 | NA=0.7,
175 | wavelength=660e-9,
176 | resolution=1e-6,
177 | magnification=5,
178 | refractive_index_medium=1.33,
179 | upscale=2,
180 | output_region=(0, 0, 64, 64),
181 | padding=(10, 10, 10, 10),
182 | aberration=None,
183 | )
184 | scatterer = Sphere(
185 | refractive_index=1.45,
186 | radius=1e-6,
187 | z=2 * u.um,
188 | position_unit="pixel",
189 | position=(32, 32),
190 | )
191 |
192 | imaged_scatterer = microscope(scatterer)
193 | output_image_no_upscale = imaged_scatterer.update()(upscale=1)
194 |
195 | output_image_2x_upscale = imaged_scatterer.update()(upscale=(2, 2, 2))
196 |
197 | self.assertEqual(output_image_no_upscale.shape, (64, 64, 1))
198 | self.assertEqual(output_image_2x_upscale.shape, (64, 64, 1))
199 | # Ensure the upscaled image is almost the same as the original image
200 |
201 | error = np.abs(
202 | output_image_2x_upscale - output_image_no_upscale
203 | ).mean() # Mean absolute error
204 | self.assertLess(error, 0.01)
205 |
206 | def test_upscale_brightfield(self):
207 | microscope = optics.Fluorescence(
208 | NA=0.5,
209 | wavelength=660e-9,
210 | resolution=1e-6,
211 | magnification=10,
212 | refractive_index_medium=1.33,
213 | upscale=2,
214 | output_region=(0, 0, 64, 64),
215 | padding=(10, 10, 10, 10),
216 | aberration=None,
217 | )
218 | scatterer = Sphere(
219 | intensity=100,
220 | radius=1e-6,
221 | z=2 * u.um,
222 | position_unit="pixel",
223 | position=(32, 32),
224 | )
225 |
226 | imaged_scatterer = microscope(scatterer)
227 | output_image_no_upscale = imaged_scatterer.update()(upscale=1)
228 |
229 | output_image_2x_upscale = imaged_scatterer.update()(upscale=(2, 2, 1))
230 |
231 | self.assertEqual(output_image_no_upscale.shape, (64, 64, 1))
232 | self.assertEqual(output_image_2x_upscale.shape, (64, 64, 1))
233 | # Ensure the upscaled image is almost the same as the original image
234 |
235 | error = np.abs(
236 | output_image_2x_upscale - output_image_no_upscale
237 | ).mean() # Mean absolute error
238 | self.assertLess(error, 0.01)
239 |
240 |
241 | if __name__ == "__main__":
242 | unittest.main()
--------------------------------------------------------------------------------
/deeptrack/tests/test_properties.py:
--------------------------------------------------------------------------------
1 | # pylint: disable=C0115:missing-class-docstring
2 | # pylint: disable=C0116:missing-function-docstring
3 | # pylint: disable=C0103:invalid-name
4 |
5 | # Use this only when running the test locally.
6 | # import sys
7 | # sys.path.append(".") # Adds the module to path.
8 |
9 | import unittest
10 |
11 | from deeptrack.backend.core import DeepTrackNode
12 | from deeptrack.utils import get_kwarg_names
13 | import numpy as np
14 |
15 | from deeptrack import properties
16 |
17 |
18 | class TestProperties(unittest.TestCase):
19 |
20 | def test_Property_constant_list_nparray(self):
21 | P = properties.Property(42)
22 | self.assertEqual(P(), 42)
23 | P.update()
24 | self.assertEqual(P(), 42)
25 |
26 | P = properties.Property((1, 2, 3))
27 | self.assertEqual(P(), (1, 2, 3))
28 | P.update()
29 | self.assertEqual(P(), (1, 2, 3))
30 |
31 | P = properties.Property(np.array([1, 2, 3]))
32 | np.testing.assert_array_equal(P(), np.array([1, 2, 3]))
33 | P.update()
34 | np.testing.assert_array_equal(P(), np.array([1, 2, 3]))
35 |
36 |
37 | def test_Property_function(self):
38 |
39 | # Lambda function.
40 | P = properties.Property(lambda x: x * 2, x=properties.Property(10))
41 | self.assertEqual(P(), 20)
42 | P.update()
43 | self.assertEqual(P(), 20)
44 |
45 | # Function.
46 | def func1(x):
47 | return 2 * x
48 |
49 | P = properties.Property(func1, x=properties.Property(10))
50 | self.assertEqual(P(), 20)
51 | P.update()
52 | self.assertEqual(P(), 20)
53 |
54 | # Lambda function with randomness.
55 | P = properties.Property(lambda: np.random.rand())
56 | for _ in range(10):
57 | P.update()
58 | self.assertEqual(P(), P())
59 | self.assertTrue(P() >= 0 and P() <= 1)
60 |
61 | # Function with randomness.
62 | def func2(x):
63 | return 2 * x
64 |
65 | P = properties.Property(
66 | func2,
67 | x=properties.Property(lambda: np.random.rand()),
68 | )
69 | for _ in range(10):
70 | P.update()
71 | self.assertEqual(P(), P())
72 | self.assertTrue(P() >= 0 and P() <= 2)
73 |
74 |
75 | def test_Property_slice(self):
76 | P = properties.Property(slice(1, lambda: 10, properties.Property(2)))
77 | result = P()
78 | self.assertEqual(result.start, 1)
79 | self.assertEqual(result.stop, 10)
80 | self.assertEqual(result.step, 2)
81 | P.update()
82 | self.assertEqual(result.start, 1)
83 | self.assertEqual(result.stop, 10)
84 | self.assertEqual(result.step, 2)
85 |
86 |
87 | def test_Property_iterable(self):
88 | P = properties.Property(iter([1, 2, 3]))
89 |
90 | self.assertEqual(P(), 1)
91 | P.update()
92 | self.assertEqual(P(), 2)
93 | P.update()
94 | self.assertEqual(P(), 3)
95 | P.update()
96 | self.assertEqual(P(), 3) # Last value repeats indefinitely
97 |
98 |
99 | def test_Property_list(self):
100 | P = properties.Property([1, lambda: 2, properties.Property(3)])
101 | self.assertEqual(P(), [1, 2, 3])
102 | P.update()
103 | self.assertEqual(P(), [1, 2, 3])
104 |
105 | P = properties.Property(
106 | [
107 | lambda _ID=(): 1 * np.random.rand(),
108 | lambda: 2 * np.random.rand(),
109 | properties.Property(lambda _ID=(): 3 * np.random.rand()),
110 | ]
111 | )
112 | for _ in range(10):
113 | P.update()
114 | self.assertEqual(P(), P())
115 | self.assertTrue(P()[0] >= 0 and P()[0] <= 1)
116 | self.assertTrue(P()[1] >= 0 and P()[1] <= 2)
117 | self.assertTrue(P()[2] >= 0 and P()[2] <= 3)
118 |
119 |
120 | def test_Property_dict(self):
121 | P = properties.Property(
122 | {
123 | "a": 1,
124 | "b": lambda: 2,
125 | "c": properties.Property(3),
126 | }
127 | )
128 | self.assertEqual(P(), {"a": 1, "b": 2, "c": 3})
129 | P.update()
130 | self.assertEqual(P(), {"a": 1, "b": 2, "c": 3})
131 |
132 | P = properties.Property(
133 | {
134 | "a": lambda _ID=(): 1 * np.random.rand(),
135 | "b": lambda: 2 * np.random.rand(),
136 | "c": properties.Property(lambda _ID=(): 3 * np.random.rand()),
137 | }
138 | )
139 | for _ in range(10):
140 | P.update()
141 | self.assertEqual(P(), P())
142 | self.assertTrue(P()["a"] >= 0 and P()["a"] <= 1)
143 | self.assertTrue(P()["b"] >= 0 and P()["b"] <= 2)
144 | self.assertTrue(P()["c"] >= 0 and P()["c"] <= 3)
145 |
146 |
147 | def test_Property_DeepTrackNode(self):
148 | node = DeepTrackNode(100)
149 | P = properties.Property(node)
150 | self.assertEqual(P(), 100)
151 | P.update()
152 | self.assertEqual(P(), 100)
153 |
154 | node = DeepTrackNode(lambda _ID=(): np.random.rand())
155 | P = properties.Property(node)
156 | for _ in range(10):
157 | P.update()
158 | self.assertEqual(P(), P())
159 | self.assertTrue(P() >= 0 and P() <= 1)
160 |
161 |
162 | def test_Property_ID(self):
163 | P = properties.Property(lambda _ID: _ID)
164 | self.assertEqual(P(), ())
165 |
166 | P = properties.Property(lambda _ID: _ID)
167 | self.assertEqual(P((1,)), (1,))
168 |
169 | P = properties.Property(lambda _ID: _ID)
170 | self.assertEqual(P((1, 2, 3)), (1, 2, 3))
171 |
172 |
173 | def test_Property_combined(self):
174 | P = properties.Property(
175 | {
176 | "constant": 42,
177 | "list": [1, lambda: 2, properties.Property(3)],
178 | "dict": {"a": properties.Property(1), "b": lambda: 2},
179 | "function": lambda x, y: x * y,
180 | "slice": slice(1, lambda: 10, properties.Property(2)),
181 | },
182 | x=properties.Property(5),
183 | y=properties.Property(3),
184 | )
185 |
186 | result = P()
187 | self.assertEqual(result["constant"], 42)
188 | self.assertEqual(result["list"], [1, 2, 3])
189 | self.assertEqual(result["dict"], {"a": 1, "b": 2})
190 | self.assertEqual(result["function"], 15)
191 | self.assertEqual(result["slice"].start, 1)
192 | self.assertEqual(result["slice"].stop, 10)
193 | self.assertEqual(result["slice"].step, 2)
194 |
195 |
196 | def test_PropertyDict(self):
197 |
198 | PD = properties.PropertyDict(
199 | constant=42,
200 | random=lambda: np.random.rand(),
201 | dependent=lambda constant: constant + 1,
202 | )
203 |
204 | self.assertIn("constant", PD)
205 | self.assertIn("constant", PD())
206 | self.assertIn("random", PD)
207 | self.assertIn("random", PD())
208 | self.assertIn("dependent", PD)
209 | self.assertIn("dependent", PD())
210 |
211 | self.assertIsInstance(PD["constant"], properties.Property)
212 | self.assertEqual(PD["constant"](), 42)
213 | self.assertEqual(PD()["constant"], 42)
214 |
215 | self.assertIsInstance(PD["random"], properties.Property)
216 | self.assertTrue(0 <= PD["random"]() <= 1)
217 | self.assertTrue(0 <= PD()["random"] <= 1)
218 |
219 | self.assertIsInstance(PD["dependent"], properties.Property)
220 | self.assertEqual(PD["dependent"](), 43)
221 | self.assertEqual(PD()["dependent"], 43)
222 |
223 |
224 | def test_SequentialProperty(self):
225 | SP = properties.SequentialProperty()
226 | SP.sequence_length.store(5)
227 | SP.current = lambda _ID=(): SP.sequence_step() + 1
228 |
229 | for step in range(SP.sequence_length()):
230 | SP.sequence_step.store(step)
231 | current_value = SP.current()
232 | SP.store(current_value)
233 |
234 | self.assertEqual(SP.data[()].current_value(),
235 | list(range(1, step + 2)))
236 |
237 | SP.previous_value.invalidate()
238 | # print(SP.previous_value())
239 |
240 | SP.previous_values.invalidate()
241 | # print(SP.previous_values())
242 |
243 | self.assertEqual(SP.previous_value(), 4)
244 | self.assertEqual(SP.previous_values(),
245 | list(range(1, SP.sequence_length() - 1)))
246 |
247 |
248 | if __name__ == "__main__":
249 | unittest.main()
250 |
--------------------------------------------------------------------------------
/deeptrack/tests/test_sequences.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | # sys.path.append(".") # Adds the module to path
4 |
5 | import unittest
6 |
7 | from matplotlib import pyplot
8 |
9 | from deeptrack import sequences
10 |
11 | from deeptrack.optics import Fluorescence
12 | from deeptrack.scatterers import Ellipse
13 | import numpy as np
14 |
15 |
16 | class TestSequences(unittest.TestCase):
17 | def test_Sequence(self):
18 | optics = Fluorescence(
19 | output_region=(0, 0, 32, 32),
20 | )
21 | ellipse = Ellipse(
22 | position_unit="pixel",
23 | position=(16, 16),
24 | intensity=1,
25 | radius=(1.5e-6, 1e-6),
26 | rotation=0, # This will be the value at time 0.
27 | upsample=2,
28 | )
29 |
30 | def get_rotation(sequence_length, previous_value):
31 | return previous_value + 2 * np.pi / sequence_length
32 |
33 | rotating_ellipse = sequences.Sequential(ellipse, rotation=get_rotation)
34 | imaged_rotating_ellipse = optics(rotating_ellipse)
35 | imaged_rotating_ellipse_sequence = sequences.Sequence(
36 | imaged_rotating_ellipse, sequence_length=5
37 | )
38 | imaged_rotating_ellipse_sequence.store_properties()
39 |
40 | self.assertIsInstance(imaged_rotating_ellipse_sequence, sequences.Sequence)
41 |
42 | outputs = imaged_rotating_ellipse_sequence()
43 |
44 | for i, out in enumerate(outputs):
45 |
46 | self.assertAlmostEqual(out.get_property("rotation"), 2 * i * np.pi / 5)
47 |
48 | def test_Dependent_Sequential(self):
49 |
50 | optics = Fluorescence(
51 | output_region=(0, 0, 32, 32),
52 | )
53 | ellipse = Ellipse(
54 | position_unit="pixel",
55 | position=(16, 16),
56 | radius=(1.5e-6, 1e-6),
57 | rotation=0, # This will be the value at time 0.
58 | upsample=2,
59 | )
60 |
61 | def get_rotation(sequence_length, previous_value):
62 | return previous_value + 2 * np.pi / sequence_length
63 |
64 | def get_intensity(rotation):
65 | return rotation * 2
66 |
67 | rotating_ellipse = sequences.Sequential(
68 | ellipse, rotation=get_rotation, intensity=get_intensity
69 | )
70 | imaged_rotating_ellipse = optics(rotating_ellipse)
71 | imaged_rotating_ellipse_sequence = sequences.Sequence(
72 | imaged_rotating_ellipse, sequence_length=5
73 | )
74 | imaged_rotating_ellipse_sequence.store_properties()
75 |
76 | self.assertIsInstance(imaged_rotating_ellipse_sequence, sequences.Sequence)
77 |
78 | outputs = imaged_rotating_ellipse_sequence()
79 |
80 | for i, out in enumerate(outputs):
81 | self.assertAlmostEqual(out.get_property("rotation"), 2 * i * np.pi / 5)
82 | self.assertAlmostEqual(out.get_property("intensity"), 4 * i * np.pi / 5)
83 |
84 | def test_RepeatedParticle(self):
85 |
86 | optics = Fluorescence(
87 | output_region=(0, 0, 32, 32),
88 | )
89 | ellipse = Ellipse(
90 | position_unit="pixel",
91 | position=lambda: np.random.randn(2) * 4 + (16, 16),
92 | radius=(1.5e-6, 1e-6),
93 | rotation=0, # This will be the value at time 0.
94 | upsample=2,
95 | )
96 |
97 | def get_rotation(sequence_length, previous_value):
98 | return previous_value + 2 * np.pi / sequence_length
99 |
100 | def get_intensity(rotation):
101 | return rotation * 2
102 |
103 | rotating_ellipse = sequences.Sequential(
104 | ellipse, rotation=get_rotation, intensity=get_intensity
105 | )
106 | imaged_rotating_ellipse = optics(rotating_ellipse ^ 2)
107 | imaged_rotating_ellipse_sequence = sequences.Sequence(
108 | imaged_rotating_ellipse, sequence_length=5
109 | )
110 | imaged_rotating_ellipse_sequence.store_properties()
111 |
112 | self.assertIsInstance(imaged_rotating_ellipse_sequence, sequences.Sequence)
113 | imaged_rotating_ellipse_sequence.update()
114 | outputs = imaged_rotating_ellipse_sequence()
115 |
116 | for i, out in enumerate(outputs):
117 | rotations = out.get_property("rotation", get_one=False)
118 | intensity = out.get_property("intensity", get_one=False)
119 | positions = out.get_property("position", get_one=False)
120 | self.assertEqual(len(rotations), 2)
121 | self.assertEqual(len(intensity), 2)
122 | self.assertEqual(len(positions), 2)
123 | self.assertAlmostEqual(rotations[0], 2 * i * np.pi / 5)
124 | self.assertAlmostEqual(rotations[1], 2 * i * np.pi / 5)
125 | self.assertAlmostEqual(intensity[0], 4 * i * np.pi / 5)
126 | self.assertAlmostEqual(intensity[1], 4 * i * np.pi / 5)
127 |
128 | self.assertNotEqual(positions[0][0], positions[1][0])
129 | self.assertNotEqual(positions[0][1], positions[1][1])
130 |
131 | def test_DistributedRepeatedParticle(self):
132 |
133 | positions = [(16, 25), (15, 24)]
134 | optics = Fluorescence(
135 | output_region=(0, 0, 32, 32),
136 | )
137 | ellipse = Ellipse(
138 | position_unit="pixel",
139 | position=lambda _ID: positions[_ID[-1]],
140 | radius=(1.5e-6, 1e-6),
141 | rotation=0, # This will be the value at time 0.
142 | upsample=2,
143 | )
144 |
145 | def get_rotation(sequence_length, previous_value):
146 | return previous_value + 2 * np.pi / sequence_length
147 |
148 | def get_intensity(rotation):
149 | return rotation * 2
150 |
151 | rotating_ellipse = sequences.Sequential(
152 | ellipse, rotation=get_rotation, intensity=get_intensity
153 | )
154 | imaged_rotating_ellipse = optics(rotating_ellipse ^ 2)
155 | imaged_rotating_ellipse_sequence = sequences.Sequence(
156 | imaged_rotating_ellipse, sequence_length=5
157 | )
158 | imaged_rotating_ellipse_sequence.store_properties()
159 |
160 | self.assertIsInstance(imaged_rotating_ellipse_sequence, sequences.Sequence)
161 | imaged_rotating_ellipse_sequence.update()
162 | outputs = imaged_rotating_ellipse_sequence()
163 |
164 | for i, out in enumerate(outputs):
165 | rotations = out.get_property("rotation", get_one=False)
166 | intensity = out.get_property("intensity", get_one=False)
167 | p_positions = out.get_property("position", get_one=False)
168 | self.assertEqual(len(rotations), 2)
169 | self.assertEqual(len(intensity), 2)
170 | self.assertEqual(len(positions), 2)
171 | self.assertAlmostEqual(rotations[0], 2 * i * np.pi / 5)
172 | self.assertAlmostEqual(rotations[1], 2 * i * np.pi / 5)
173 | self.assertAlmostEqual(intensity[0], 4 * i * np.pi / 5)
174 | self.assertAlmostEqual(intensity[1], 4 * i * np.pi / 5)
175 |
176 | self.assertSequenceEqual(list(p_positions[0]), list(positions[0]))
177 | self.assertSequenceEqual(list(p_positions[1]), list(positions[1]))
178 |
179 |
180 | if __name__ == "__main__":
181 | unittest.main()
182 |
--------------------------------------------------------------------------------
/deeptrack/tests/test_statistics.py:
--------------------------------------------------------------------------------
1 | # Use this only when running the test locally.
2 | # import sys
3 | # sys.path.append(".") # Adds the module to path.
4 |
5 | import unittest
6 |
7 | import numpy as np
8 |
9 | from deeptrack import statistics, features
10 |
11 |
12 | class TestFeatures(unittest.TestCase):
13 |
14 | def test_sum(self):
15 | input_values = [np.ones((2,)), np.ones((2,))]
16 | sum_operation = statistics.Sum(axis=0, distributed=False)
17 | sum_result = sum_operation(input_values)
18 | self.assertTrue(np.all(sum_result == np.array([2., 2.])))
19 |
20 | input_values = [np.zeros((2, 3)), np.zeros((2, 3))]
21 | sum_operation = statistics.Sum(axis=1, distributed=False)
22 | sum_result = sum_operation(input_values)
23 | expected_result = np.array([[0., 0., 0.], [0., 0., 0.]])
24 | self.assertTrue(np.all(sum_result == expected_result))
25 |
26 | def test_mean(self):
27 | input_values = [np.ones((2,)), np.ones((2,))]
28 | mean_operation = statistics.Mean(axis=0, distributed=False)
29 | mean_result = mean_operation(input_values)
30 | self.assertTrue(np.all(mean_result == np.array([1., 1.])))
31 |
32 | input_values = [np.array([1., 2.]), np.array([3., 4.])]
33 | mean_operation = statistics.Mean(axis=0, distributed=False)
34 | mean_result = mean_operation(input_values)
35 | self.assertTrue(np.all(mean_result == np.array([2., 3.])))
36 |
37 | def test_std(self):
38 | input_values = [np.array([1., 2.]), np.array([1., 3.])]
39 | std_operation = statistics.Std(axis=0, distributed=False)
40 | std_result = std_operation(input_values)
41 | self.assertTrue(np.all(std_result == np.array([0., 0.5])))
42 |
43 | def test_variance(self):
44 | input_values = [np.array([1., 2.]), np.array([1., 3.])]
45 | variance_operation = statistics.Variance(axis=0, distributed=False)
46 | variance_result = variance_operation(input_values)
47 | self.assertTrue(np.all(variance_result == np.array([0., 0.25])))
48 |
49 | def test_peak_to_peak(self):
50 | input_values = [np.array([1., 2.]), np.array([1.5, 3.])]
51 | peak_to_peak_op = statistics.PeakToPeak(axis=0, distributed=False)
52 | peak_to_peak_result = peak_to_peak_op(input_values)
53 | self.assertTrue(np.all(peak_to_peak_result == np.array([0.5, 1.])))
54 |
55 | def test_quantile(self):
56 | input_values = [np.array([1., 2., 3., 1., 10.])]
57 | quantile_op = statistics.Quantile(q=0.5, axis=1, distributed=False)
58 | quantile_result = quantile_op(input_values) # median
59 | self.assertTrue(np.all(quantile_result == np.array([2.])))
60 |
61 | def test_percentile(self):
62 | input_values = [np.array([1., 2., 3., 4., 10.])]
63 | percentile_op = statistics.Percentile(q=75, axis=1, distributed=False)
64 | percentile_result = percentile_op(input_values)
65 | self.assertTrue(np.all(percentile_result == np.array([4.])))
66 |
67 | def test_prod(self):
68 | input_values = [np.array([1., 2.]), np.array([3., 4.])]
69 | prod_operation = statistics.Prod(axis=0, distributed=False)
70 | prod_result = prod_operation(input_values)
71 | self.assertTrue(np.all(prod_result == np.array([3., 8.])))
72 |
73 | def test_median(self):
74 | input_values = [np.array([10., 3., 1., 4., 2.])]
75 | median_op = statistics.Median(axis=1, distributed=False)
76 | median_result = median_op(input_values)
77 | self.assertTrue(np.all(median_result == np.array([3.])))
78 |
79 | def test_cumsum(self):
80 | input_values = [np.array([1., 2., 3.]), np.array([1., 1., 1.])]
81 | cumsum_op = statistics.Cumsum(axis=1, distributed=False)
82 | cumsum_result = cumsum_op(input_values)
83 | expected_result = np.array([[1., 3., 6.], [1., 2., 3.]])
84 | self.assertTrue(np.all(cumsum_result == expected_result))
85 |
86 | def test_nan(self):
87 | input_values = [np.array([1., 2., np.nan]), np.array([np.nan, 1., 1.])]
88 | mean_op = statistics.Mean(axis=0, distributed=False)
89 | mean_result = mean_op(input_values)
90 | self.assertTrue(np.isnan(mean_result[0]))
91 | self.assertTrue(mean_result[1] == 1.5)
92 | self.assertTrue(np.isnan(mean_result[2]))
93 |
94 | prod_op = statistics.Prod(axis=0, distributed=False)
95 | prod_result = prod_op(input_values)
96 | self.assertTrue(np.isnan(prod_result[0]))
97 | self.assertTrue(prod_result[1] == 2)
98 | self.assertTrue(np.isnan(prod_result[2]))
99 |
100 | def test_inf(self):
101 | input_values = [np.array([1., 2., np.inf]), np.array([np.inf, 1., 1.])]
102 | mean_op = statistics.Mean(axis=0, distributed=False)
103 | mean_result = mean_op(input_values)
104 | self.assertTrue(np.isinf(mean_result[0]))
105 | self.assertTrue(mean_result[1] == 1.5)
106 | self.assertTrue(np.isinf(mean_result[2]))
107 |
108 | def test_edge_cases(self):
109 | edge_cases = [
110 | -1,
111 | 0,
112 | 1,
113 | (np.random.rand(3, 5) - 0.5) * 100,
114 | np.inf,
115 | np.nan,
116 | [np.zeros((3, 4)), np.ones((3, 4))],
117 | np.random.rand(2, 3, 2, 3),
118 | ]
119 |
120 | all_statistics = [
121 | statistics.Sum,
122 | statistics.Mean,
123 | statistics.Prod,
124 | statistics.Median,
125 | statistics.Std,
126 | statistics.Variance,
127 | statistics.PeakToPeak,
128 | statistics.Quantile,
129 | statistics.Percentile,
130 | ]
131 |
132 | specific_statistics_for_inf = [
133 | statistics.Sum,
134 | statistics.Mean,
135 | statistics.Prod,
136 | statistics.Median,
137 | ]
138 |
139 | for case in edge_cases:
140 | if case is np.inf:
141 | selected_statistics = specific_statistics_for_inf
142 | else:
143 | selected_statistics = all_statistics
144 |
145 | for stat in selected_statistics:
146 | self._test_single_case(case, stat)
147 |
148 | def _test_single_case(self, case, feature_class):
149 | feature = feature_class(axis=0, distributed=False)
150 | result = feature([case])
151 | self.assertIsNotNone(result)
152 |
153 | def test_broadcast_list(self):
154 | inp = features.Value([1, 0])
155 | pipeline = inp - statistics.Mean(inp)
156 | self.assertListEqual(pipeline(), [0, 0])
157 | pipeline = inp - (inp >> statistics.Mean())
158 | self.assertListEqual(pipeline(), [0, 0])
159 |
160 |
161 | if __name__ == "__main__":
162 | unittest.main()
--------------------------------------------------------------------------------
/deeptrack/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | # pylint: disable=C0115:missing-class-docstring
2 | # pylint: disable=C0116:missing-function-docstring
3 | # pylint: disable=C0103:invalid-name
4 |
5 | # Use this only when running the test locally.
6 | # import sys
7 | # sys.path.append(".") # Adds the module to path.
8 |
9 | import unittest
10 |
11 | import deeptrack as dt
12 | import numpy as np
13 |
14 | from deeptrack import utils
15 |
16 |
17 | class DummyClass:
18 | def method(self): pass
19 | def __len__(self): return 42
20 |
21 |
22 | class TestUtils(unittest.TestCase):
23 |
24 | def test_hasmethod(self):
25 | self.assertTrue(utils.hasmethod(utils, "hasmethod"))
26 | self.assertFalse(utils.hasmethod(utils, "not_a_method"))
27 | self.assertTrue(utils.hasmethod(DummyClass, "method"))
28 | self.assertFalse(utils.hasmethod(DummyClass, "not_real"))
29 | self.assertTrue(utils.hasmethod(DummyClass(), "method"))
30 | self.assertTrue(utils.hasmethod(DummyClass(), "__len__"))
31 | self.assertFalse(utils.hasmethod(123, "foo")) # int has no foo
32 |
33 | # Built-in edge cases
34 | self.assertTrue(utils.hasmethod([], "append"))
35 | self.assertFalse(utils.hasmethod([], "not_a_real_method"))
36 |
37 | def test_as_list(self):
38 | # Scalars
39 | self.assertEqual(utils.as_list(1), [1])
40 | self.assertEqual(utils.as_list(None), [None])
41 | self.assertEqual(utils.as_list(3.14), [3.14])
42 |
43 | # Containers
44 | self.assertEqual(utils.as_list([1, 2]), [1, 2])
45 | self.assertEqual(utils.as_list((1, 2)), [1, 2])
46 | self.assertEqual(sorted(utils.as_list({1, 2})), [1, 2])
47 |
48 | # Generator
49 | gen = (i for i in range(2))
50 | self.assertEqual(utils.as_list(gen), [0, 1])
51 |
52 | # Strings and bytes
53 | self.assertEqual(utils.as_list("abc"), ["abc"])
54 | self.assertEqual(utils.as_list(b"123"), [b"123"])
55 |
56 | # Numpy array
57 | arr = np.array([1, 2, 3])
58 | result = utils.as_list(arr)
59 | self.assertTrue(isinstance(result, list))
60 | self.assertTrue(all(isinstance(x, (int, np.generic)) for x in result))
61 |
62 | if dt.TORCH_AVAILABLE:
63 | import torch
64 |
65 | tensor = torch.tensor([[1, 2], [3, 4]])
66 | result = utils.as_list(tensor)
67 |
68 | # By default, this will be [tensor([1, 2]), tensor([3, 4])]
69 | self.assertEqual(len(result), 2)
70 | self.assertTrue(all(isinstance(x, torch.Tensor) for x in result))
71 |
72 | def test_get_kwarg_names(self):
73 | def f1(): pass
74 | self.assertEqual(utils.get_kwarg_names(f1), [])
75 |
76 | def f2(a): pass
77 | self.assertEqual(utils.get_kwarg_names(f2), ["a"])
78 |
79 | def f3(a, b=1): pass
80 | self.assertEqual(utils.get_kwarg_names(f3), ["a", "b"])
81 |
82 | def f4(a, *args, b=2): pass
83 | self.assertEqual(utils.get_kwarg_names(f4), ["b"])
84 |
85 | def f5(*args, b, c=2): pass
86 | self.assertEqual(utils.get_kwarg_names(f5), ["b", "c"])
87 |
88 | def f6(a, b, *args): pass
89 | self.assertEqual(utils.get_kwarg_names(f6), [])
90 |
91 | def f7(a, b=1, c=3, **kwargs): pass
92 | self.assertEqual(utils.get_kwarg_names(f7), ["a", "b", "c"])
93 |
94 | # Built-in function (should not raise)
95 | self.assertIsInstance(utils.get_kwarg_names(len), list)
96 |
97 | # Lambda
98 | l = lambda a, b=2: a + b
99 | self.assertEqual(utils.get_kwarg_names(l), ["a", "b"])
100 |
101 | # Method
102 | self.assertIn("self", utils.get_kwarg_names(DummyClass.method))
103 |
104 | def test_kwarg_has_default(self):
105 | def f1(a, b=2): pass
106 | self.assertFalse(utils.kwarg_has_default(f1, "a"))
107 | self.assertTrue(utils.kwarg_has_default(f1, "b"))
108 |
109 | # Not in function
110 | self.assertFalse(utils.kwarg_has_default(f1, "c"))
111 |
112 | def test_safe_call(self):
113 | def f(a, b=2, c=3): return a + b + c
114 | # All args present
115 | self.assertEqual(utils.safe_call(f, positional_args=[1], b=2, c=3), 6)
116 | # Only some kwargs present
117 | self.assertEqual(utils.safe_call(f, positional_args=[1], b=4), 8)
118 | # No kwargs
119 | self.assertEqual(utils.safe_call(f, positional_args=[1]), 6)
120 | # Extra kwargs are ignored
121 | self.assertEqual(utils.safe_call(f, positional_args=[1], b=5, x=10), 9)
122 | # Only kwargs
123 | self.assertEqual(utils.safe_call(f, a=1, b=2, c=3), 6)
124 |
125 | # Should ignore kwargs not in function signature
126 | def g(a): return a
127 | self.assertEqual(utils.safe_call(g, a=42, extrakw=1), 42)
128 |
129 | # Missing required arg should raise error
130 | def f(a): return a
131 | with self.assertRaises(TypeError):
132 | utils.safe_call(f)
133 |
134 | def g(a, *, b): return a + b
135 | with self.assertRaises(TypeError):
136 | utils.safe_call(g, a=1) # Missing b
137 |
138 |
139 | if __name__ == "__main__":
140 | unittest.main()
141 |
--------------------------------------------------------------------------------
/deeptrack/types.py:
--------------------------------------------------------------------------------
1 | """Type declarations for internal use.
2 |
3 | This module defines type aliases and utility types to standardize the type
4 | annotations used throughout the codebase. It enhances code readability,
5 | maintainability, and reduces redundancy in type annotations. These types are
6 | particularly useful for properties and array-like structures used within the
7 | library.
8 |
9 | Defined Types
10 | -------------
11 | - `PropertyLike`
12 | A type alias representing a value of type `T` or a callable returning `T`.
13 | - `ArrayLike`
14 | A type alias for array-like structures (e.g., tuples, lists, numpy arrays).
15 | - `NumberLike`
16 | A type alias for numeric types, including scalars and arrays (e.g., numpy
17 | arrays, GPU tensors).
18 |
19 | Examples
20 | --------
21 | Using `PropertyLike`:
22 |
23 | >>> def scale(value: PropertyLike[float]) -> float:
24 | ... if callable(value):
25 | ... return value()
26 | ... return value
27 | >>> scale(3.14) # 3.14
28 | >>> scale(lambda: 2.71) # 2.71
29 |
30 | Using `ArrayLike`:
31 |
32 | >>> import numpy as np
33 | >>> def compute_mean(array: ArrayLike[float]) -> float:
34 | ... return np.mean(array)
35 | >>> compute_mean([1.0, 2.0, 3.0]) # 2.0
36 | >>> compute_mean((4.0, 5.0, 6.0)) # 5.0
37 | >>> compute_mean(np.array([7.0, 8.0, 9.0])) # 8.0
38 |
39 | Using `NumberLike`:
40 |
41 | >>> def add_numbers(a: NumberLike, b: NumberLike) -> NumberLike:
42 | ... return a + b
43 | >>> add_numbers(5, 3.2) # 8.2
44 | >>> add_numbers(np.array([1, 2, 3]), 4) # array([5, 6, 7])
45 |
46 | """
47 |
48 | from __future__ import annotations
49 |
50 | from typing import Callable, List, Tuple, TypeVar, Union, TYPE_CHECKING
51 |
52 | import numpy as np
53 |
54 |
55 | if TYPE_CHECKING:
56 | import torch
57 | from deeptrack.image import Image
58 |
59 |
60 | # T is a generic type variable defining generic types for reusability.
61 | _T = TypeVar("T")
62 |
63 | # PropertyLike is a type alias representing a value of type T
64 | # or a callable returning type T.
65 | PropertyLike = Union[_T, Callable[..., _T]]
66 |
67 | # ArrayLike is a type alias representing any array-like structure.
68 | # It supports tuples, lists, and numpy arrays containing elements of type T.
69 | ArrayLike = Union[
70 | np.ndarray,
71 | "torch.Tensor",
72 | "Image",
73 | List[_T],
74 | Tuple[_T, ...],
75 | ]
76 |
77 | # NumberLike is a type alias representing any numeric type including arrays.
78 | NumberLike = Union[np.ndarray, "torch.Tensor", int, float, bool, complex]
79 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "scripts": {
3 | "publish": "py setup.py sdist bdist_wheel && py -m twine upload --repository pypi dist/*",
4 | "make-docs-windows": "cd _src && make html && Xcopy /Y /I /E .\\build\\html ..\\docs && type nul > ../docs/.nojekyll",
5 | "full-make-docs-windows": "cd _src && py .\\autodoc_dt.py && make html && Xcopy /Y /I /E .\\build\\html ..\\docs && type nul > ../docs/.nojekyll",
6 | "test": "set TF_CPP_MIN_LOG_LEVEL=2 && py -W ignore -m unittest discover",
7 | "benchmark": "python -m pytest --benchmark-autosave --benchmark-autosave",
8 | "benchmark:fluorescence": "python -m pytest ./deeptrack/benchmarks/test_fluorescence.py --benchmark-autosave --disable-pytest-warnings --benchmark-compare --benchmark-compare-fail=min:5%",
9 | "benchmark:mie": "python -m pytest ./deeptrack/benchmarks/test_simulate_mie.py --benchmark-autosave --disable-pytest-warnings --benchmark-compare --benchmark-compare-fail=min:5%"
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.pytest.ini_options]
2 | minversion = "6.0"
3 | testpaths = [
4 | "deeptrack/benchmarks",
5 | ]
6 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | matplotlib
3 | scipy
4 | scikit-image
5 | more_itertools
6 | pint
7 | pandas
8 | tqdm
9 | lazy_import
10 | rich
11 | gdown
12 | array_api_compat
13 | array_api_strict
14 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file=README-pypi.md
3 | license_files=LICENSE.rst
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | with open("README-pypi.md", "r") as fh:
4 | long_description = fh.read()
5 |
6 | with open("requirements.txt", "r") as fh:
7 | required = fh.read().splitlines()
8 |
9 | # Remove sphinx from requirements
10 | required = [x for x in required if not x.startswith("Sphinx")]
11 | required = [x for x in required if not x.startswith("pydata-sphinx-theme")]
12 |
13 | setup(
14 | name="deeptrack",
15 | version="2.0.1",
16 | license="MIT",
17 | packages=find_packages(),
18 | author=(
19 | "Benjamin Midtvedt, Jesus Pineda, Henrik Klein Moberg, "
20 | "Harshith Bachimanchi, Mirja Granfors, Alex Lech, Carlo Manzo, "
21 | "Giovanni Volpe"
22 | ),
23 | description=(
24 | "A deep learning framework to enhance microscopy, "
25 | "developed by DeepTrackAI."
26 | ),
27 | long_description=long_description,
28 | long_description_content_type="text/markdown",
29 | url="https://github.com/DeepTrackAI/DeepTrack2",
30 | install_requires=required,
31 | classifiers=[
32 | "Programming Language :: Python :: 3",
33 | "License :: OSI Approved :: MIT License",
34 | "Operating System :: OS Independent",
35 | ],
36 | extras_requires={"tensorflow": ["tensorflow<=2.10", "tensorflow-probability", "tensorflow-datasets", "tensorflow_addons"]},
37 | python_requires=">=3.8",
38 | )
39 |
--------------------------------------------------------------------------------
/tutorials/2-examples/assets/overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepTrackAI/DeepTrack2/c1ad44b7a0a2e3f38e054d14e13332a5609a953b/tutorials/2-examples/assets/overview.png
--------------------------------------------------------------------------------
/tutorials/2-examples/readme_to_incorporate into MAGIK examples.md:
--------------------------------------------------------------------------------
1 | # MAGIK
2 |
3 | MAGIK is a geometric deep learning approach for the analysis of dynamical properties from time-lapse microscopy.
4 | Here we provide the code as well as instructions to train models and to analyze experimental data.
5 |
6 | # Getting started
7 |
8 | ## Installation from PyPi
9 |
10 | MAGIK requires at least python 3.6.
11 |
12 | To install MAGIK you must install the [Deeptrack](https://github.com/softmatterlab/DeepTrack-2.0) framework. Open a terminal or command prompt and run:
13 |
14 | pip install deeptrack
15 |
16 | ## Software requirements
17 |
18 | ### OS Requirements
19 |
20 | MAGIK has been tested on the following systems:
21 |
22 | - macOS: Monterey (12.2.1)
23 | - Windows: 10 (64-bit)
24 |
25 | ### Python Dependencies
26 |
27 | ```
28 | tensorflow
29 | numpy
30 | matplotlib
31 | scipy
32 | Sphinx==2.2.0
33 | pydata-sphinx-theme
34 | numpydoc
35 | scikit-image
36 | tensorflow-probability
37 | pint
38 | pandas
39 |
40 | ```
41 |
42 | If you have a very recent version of python, you may need to install numpy _before_ DeepTrack. This is a known issue with scikit-image.
43 |
44 | ## It's a kind of MAGIK...
45 |
46 | To see MAGIK in action, we provide an [example](//github.com/softmatterlab/DeepTrack-2.0/blob/develop/examples/MAGIK/) based on live-cell migration experiments. Data courtesy of Sergi Masó Orriols, [the QuBI lab](https://mon.uvic.cat/qubilab/).
47 |
48 | ## Cite us!
49 |
50 | If you use MAGIK in your project, please cite our article:
51 |
52 | ```
53 | Jesús Pineda, Benjamin Midtvedt, Harshith Bachimanchi, Sergio Noé, Daniel Midtvedt, Giovanni Volpe, and Carlo Manzo
54 | "Geometric deep learning reveals the spatiotemporal fingerprint of microscopic motion."
55 | arXiv 2202.06355 (2022).
56 | https://arxiv.org/pdf/2202.06355.pdf
57 | ```
58 |
59 | ## Funding
60 |
61 | This work was supported by FEDER/Ministerio de Ciencia, Innovación y Universidades – Agencia Estatal de Investigación
62 | through the “Ram ́on y Cajal” program 2015 (Grant No. RYC-2015-17896) and the “Programa Estatal de I+D+i Orientada a los Retos de la Sociedad” (Grant No. BFU2017-85693-R); the Generalitat de Catalunya (AGAUR Grant No. 2017SGR940); the ERC Starting Grant ComplexSwimmers (Grant No. 677511); and the ERC Starting Grant MAPEI (101001267); the Knut and Alice Wallenberg Foundation (Grant No. 2019.0079).
63 |
64 | ## License
65 |
66 | This project is covered under the **MIT License**.
67 |
--------------------------------------------------------------------------------
/tutorials/3-advanced-topics/DTAT381_math.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# deeptrack.math\n",
8 | "\n",
9 | "
"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "# !pip install deeptrack # Uncomment if running on Colab/Kaggle."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "This advanced tutorial introduces the module deeptrack.math."
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": 2,
31 | "metadata": {},
32 | "outputs": [],
33 | "source": [
34 | "import numpy as np"
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {},
40 | "source": [
41 | "## 1. What is included in math?\n",
42 | "\n",
43 | "The module math.py includes features that perform some mathematical operation on the input image. This can be used to normalize images, or merge several features."
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {},
49 | "source": [
50 | "## 2. Clip\n",
51 | "\n",
52 | "Clips the input within a minimum and a maximum value."
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": 3,
58 | "metadata": {
59 | "execution": {
60 | "iopub.execute_input": "2022-06-29T20:33:12.489396Z",
61 | "iopub.status.busy": "2022-06-29T20:33:12.489396Z",
62 | "iopub.status.idle": "2022-06-29T20:33:12.492396Z",
63 | "shell.execute_reply": "2022-06-29T20:33:12.492396Z"
64 | }
65 | },
66 | "outputs": [
67 | {
68 | "name": "stdout",
69 | "output_type": "stream",
70 | "text": [
71 | "[[ 10 4]\n",
72 | " [ 4 -10]]\n",
73 | "becomes\n",
74 | "[[ 5 4]\n",
75 | " [ 4 -5]]\n"
76 | ]
77 | }
78 | ],
79 | "source": [
80 | "import deeptrack.math as math\n",
81 | "\n",
82 | "input_image = np.array(\n",
83 | " [\n",
84 | " [10, 4],\n",
85 | " [4, -10],\n",
86 | " ]\n",
87 | ")\n",
88 | "\n",
89 | "clip = math.Clip(min=-5, max=5)\n",
90 | "\n",
91 | "print(input_image)\n",
92 | "print(\"becomes\")\n",
93 | "print(clip.resolve(input_image))"
94 | ]
95 | },
96 | {
97 | "cell_type": "markdown",
98 | "metadata": {},
99 | "source": [
100 | "## 3. NormalizeMinMax\n",
101 | "\n",
102 | "Transforms the input to be between a minimum and a maximum value using a linear transformation."
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": 4,
108 | "metadata": {
109 | "execution": {
110 | "iopub.execute_input": "2022-06-29T20:33:12.495896Z",
111 | "iopub.status.busy": "2022-06-29T20:33:12.495397Z",
112 | "iopub.status.idle": "2022-06-29T20:33:12.497897Z",
113 | "shell.execute_reply": "2022-06-29T20:33:12.497897Z"
114 | }
115 | },
116 | "outputs": [
117 | {
118 | "name": "stdout",
119 | "output_type": "stream",
120 | "text": [
121 | "[[ 10 4]\n",
122 | " [ 4 -10]]\n",
123 | "becomes\n",
124 | "[[ 5. 2.]\n",
125 | " [ 2. -5.]]\n"
126 | ]
127 | }
128 | ],
129 | "source": [
130 | "input_image = np.array(\n",
131 | " [\n",
132 | " [10, 4],\n",
133 | " [4, -10],\n",
134 | " ]\n",
135 | ")\n",
136 | "\n",
137 | "feature = math.NormalizeMinMax(min=-5, max=5)\n",
138 | "\n",
139 | "print(input_image)\n",
140 | "print(\"becomes\")\n",
141 | "print(feature.resolve(input_image))"
142 | ]
143 | }
144 | ],
145 | "metadata": {
146 | "kernelspec": {
147 | "display_name": "py_env_book",
148 | "language": "python",
149 | "name": "python3"
150 | },
151 | "language_info": {
152 | "codemirror_mode": {
153 | "name": "ipython",
154 | "version": 3
155 | },
156 | "file_extension": ".py",
157 | "mimetype": "text/x-python",
158 | "name": "python",
159 | "nbconvert_exporter": "python",
160 | "pygments_lexer": "ipython3",
161 | "version": "3.10.15"
162 | }
163 | },
164 | "nbformat": 4,
165 | "nbformat_minor": 2
166 | }
167 |
--------------------------------------------------------------------------------
/tutorials/3-advanced-topics/DTAT385_statistics.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# deeptrack.statistics\n",
8 | "\n",
9 | "
"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "# !pip install deeptrack # Uncomment if running on Colab/Kaggle."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "This advanced tutorial introduces the module deeptrack.statistics."
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": 2,
31 | "metadata": {},
32 | "outputs": [],
33 | "source": [
34 | "import numpy as np\n",
35 | "\n",
36 | "from deeptrack import statistics"
37 | ]
38 | },
39 | {
40 | "cell_type": "markdown",
41 | "metadata": {},
42 | "source": [
43 | "## 1. What is the `statistics` module?\n",
44 | "\n",
45 | "The `statistics` module provides a set of features that perform statistical operations on input data to reduce its dimensionality. These operations include common tasks like summing, averaging, and calculating standard deviations along specified axes. The module is built around NumPy functions, so it offers familiar syntax and behavior for users. Additionally, it supports a `distributed` option, which determines whether each image in\n",
46 | "the input list should be handled individually or not."
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "metadata": {},
52 | "source": [
53 | "## 2. The `Reducer` class\n",
54 | "\n",
55 | "The `Reducer` class is the base class for features that reduce the input dimensionality using a statistical function. This class handles most of the core logic for the operations in the statistics module, including specifying the function (e.g., sum, mean) and the axis along which to reduce. Users typically won't interact with `Reducer` directly but will instead use its subclasses (e.g., `Sum`, `Mean`, `Std`) that provide the specific statistical function to apply."
56 | ]
57 | },
58 | {
59 | "cell_type": "markdown",
60 | "metadata": {},
61 | "source": [
62 | "## 3. Statistical Operations"
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "metadata": {},
68 | "source": [
69 | "### Sum\n",
70 | "\n",
71 | "The `Sum` operation calculates the sum of the input values along the specified axis."
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 3,
77 | "metadata": {},
78 | "outputs": [
79 | {
80 | "name": "stdout",
81 | "output_type": "stream",
82 | "text": [
83 | "[array([1., 1.]), array([0., 0.])]\n"
84 | ]
85 | }
86 | ],
87 | "source": [
88 | "# Example data.\n",
89 | "\n",
90 | "input_values = [np.ones((2,)), np.zeros((2,))]\n",
91 | "\n",
92 | "print(input_values)"
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": 4,
98 | "metadata": {},
99 | "outputs": [
100 | {
101 | "name": "stdout",
102 | "output_type": "stream",
103 | "text": [
104 | "[2.0, 0.0]\n"
105 | ]
106 | }
107 | ],
108 | "source": [
109 | "sum_operation = statistics.Sum(axis=0, distributed=True)\n",
110 | "\n",
111 | "sum_result = sum_operation(input_values)\n",
112 | "\n",
113 | "print(sum_result)"
114 | ]
115 | },
116 | {
117 | "cell_type": "markdown",
118 | "metadata": {},
119 | "source": [
120 | "Above, the sum operation is performed along axis 0, with `distributed`=True, meaning the input arrays are processed individually. \n",
121 | "By setting `distributed`=False, the inputs are handled together, instead of individually:"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": 5,
127 | "metadata": {},
128 | "outputs": [
129 | {
130 | "name": "stdout",
131 | "output_type": "stream",
132 | "text": [
133 | "[1. 1.]\n"
134 | ]
135 | }
136 | ],
137 | "source": [
138 | "sum_operation = statistics.Sum(axis=0, distributed=False)\n",
139 | "\n",
140 | "sum_result = sum_operation(input_values)\n",
141 | "\n",
142 | "print(sum_result)"
143 | ]
144 | },
145 | {
146 | "cell_type": "markdown",
147 | "metadata": {},
148 | "source": [
149 | "### Product\n",
150 | "\n",
151 | "The `Prod` operation calculates the product of the input values along the specified axis."
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "execution_count": 6,
157 | "metadata": {},
158 | "outputs": [
159 | {
160 | "name": "stdout",
161 | "output_type": "stream",
162 | "text": [
163 | "[1.0, 0.0]\n"
164 | ]
165 | }
166 | ],
167 | "source": [
168 | "prod_operation = statistics.Prod(axis=0, distributed=True)\n",
169 | "\n",
170 | "prod_result = prod_operation(input_values)\n",
171 | "\n",
172 | "print(prod_result)"
173 | ]
174 | },
175 | {
176 | "cell_type": "markdown",
177 | "metadata": {},
178 | "source": [
179 | "### Other operations\n",
180 | "\n",
181 | "Other statistical operations in the module include:\n",
182 | "\n",
183 | "`Mean`: Computes the arithmetic mean along the specified axis.\n",
184 | "\n",
185 | "`Median`: Computes the median along the specified axis.\n",
186 | "\n",
187 | "`Std`: Computes the standard deviation along the specified axis.\n",
188 | "\n",
189 | "`Variance`: Computes the variance along the specified axis.\n",
190 | "\n",
191 | "`Cumsum`: Computes the cumulative sum along the specified axis.\n",
192 | "\n",
193 | "`Min/Max`: Computes the minimum/maximum values along the specified axis.\n",
194 | "\n",
195 | "`PeakToPeak`: Computes the range (max - min) along the specified axis.\n",
196 | "\n",
197 | "`Quantile`: Computes the q-th quantile along the specified axis.\n",
198 | "\n",
199 | "`Percentile`: Computes the q-th percentile along the specified axis."
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "metadata": {},
205 | "source": [
206 | "## 4. Adding Reducers to the Pipeline\n",
207 | "\n",
208 | "Reducers, such as `Sum`, can be added to a pipeline in two different ways. Both methods allow the reducer to be applied to the output of a preceding feature or sequence of features.\n",
209 | "\n",
210 | "`summed_pipeline = some_pipeline_of_features >> Sum(axis=0)`\n",
211 | "\n",
212 | "`summed_pipeline = Sum(some_pipeline_of_features, axis=0)`\n",
213 | "\n",
214 | "However, combining these two methods is not supported and may lead to unpredictable behavior."
215 | ]
216 | }
217 | ],
218 | "metadata": {
219 | "kernelspec": {
220 | "display_name": "py_env_book",
221 | "language": "python",
222 | "name": "python3"
223 | },
224 | "language_info": {
225 | "codemirror_mode": {
226 | "name": "ipython",
227 | "version": 3
228 | },
229 | "file_extension": ".py",
230 | "mimetype": "text/x-python",
231 | "name": "python",
232 | "nbconvert_exporter": "python",
233 | "pygments_lexer": "ipython3",
234 | "version": "3.10.15"
235 | }
236 | },
237 | "nbformat": 4,
238 | "nbformat_minor": 2
239 | }
240 |
--------------------------------------------------------------------------------
/tutorials/3-advanced-topics/DTAT387_types.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# deeptrack.types\n",
8 | "\n",
9 | "
"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "# !pip install deeptrack # Uncomment if running on Colab/Kaggle."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "This advanced tutorial introduces the types module."
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "## 1. What is `types`?\n",
33 | "\n",
34 | "The `types` module introduces custom datatype aliases for type hints throughout the DeepTrack2 codebase.\n",
35 | "Type hints are used when declaring a function or class to improve readability and maintanability by specifying expected parameters and attribute types.\n",
36 | "\n",
37 | "Currently there are three type aliases in DeepTrack2:\n",
38 | "\n",
39 | "- `PropertyLike` for values or callables, can contain a generic datatype or a callable function which returns a generic datatype.\n",
40 | "- `ArrayLike` for array-like structures (e.g., tuples, lists, numpy arrays).\n",
41 | "- `NumberLike` for numeric types, including scalars and arrays (e.g., numpy \n",
42 | " arrays, GPU tensors)."
43 | ]
44 | },
45 | {
46 | "cell_type": "markdown",
47 | "metadata": {},
48 | "source": [
49 | "## 2. Declare a function with type hints\n",
50 | "\n",
51 | "An elementary example of the usage of a type hint can be seen below:"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 2,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "from deeptrack.types import ArrayLike\n",
61 | "\n",
62 | "def matrix_add(\n",
63 | " input: ArrayLike[float]\n",
64 | ") -> ArrayLike[float]:\n",
65 | " \"\"\"Function docsting.\"\"\"\n",
66 | " return input + input"
67 | ]
68 | },
69 | {
70 | "cell_type": "markdown",
71 | "metadata": {},
72 | "source": [
73 | "And in the context of DeepTrack2, a larger class with several type hints can look something like the following:"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": 3,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "from __future__ import annotations\n",
83 | "import deeptrack as dt\n",
84 | "from deeptrack.types import PropertyLike\n",
85 | "\n",
86 | "class Augmentation(dt.Feature):\n",
87 | "\n",
88 | " def __init__(\n",
89 | " self: Augmentation,\n",
90 | " time_consistent: bool = False,\n",
91 | " **kwargs\n",
92 | " ) -> None:\n",
93 | " super().__init__(time_consistent=time_consistent, **kwargs)\n",
94 | "\n",
95 | " def _image_wrapped_process_and_get(\n",
96 | " self: Augmentation,\n",
97 | " image_list: list[Image],\n",
98 | " time_consistent: PropertyLike[bool],\n",
99 | " **kwargs\n",
100 | " ) -> list[Image]:\n",
101 | " pass"
102 | ]
103 | },
104 | {
105 | "cell_type": "markdown",
106 | "metadata": {},
107 | "source": [
108 | "## 3. Declare a new type hint\n",
109 | "\n",
110 | "We will now declare a new type hint of our own which will represent non-integer numeric datatypes with higher precision than the standard float datatype. We will use `Union` for this, which lets us gather datatypes into a single set."
111 | ]
112 | },
113 | {
114 | "cell_type": "markdown",
115 | "metadata": {},
116 | "source": [
117 | "Declare the new type using Union ..."
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": 4,
123 | "metadata": {},
124 | "outputs": [
125 | {
126 | "name": "stdout",
127 | "output_type": "stream",
128 | "text": [
129 | "typing.Union[numpy.float64, numpy.longdouble, numpy.clongdouble]\n"
130 | ]
131 | }
132 | ],
133 | "source": [
134 | "import numpy as np\n",
135 | "from typing import Union\n",
136 | "\n",
137 | "DoubleLike = Union[np.float64, np.longdouble, np.longcomplex]\n",
138 | "print(DoubleLike)"
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "metadata": {},
144 | "source": [
145 | "... and declare function with new type hint."
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": 5,
151 | "metadata": {},
152 | "outputs": [],
153 | "source": [
154 | "def double_add(\n",
155 | " input: ArrayLike[DoubleLike],\n",
156 | " number: DoubleLike\n",
157 | ") -> ArrayLike[DoubleLike]:\n",
158 | " \"\"\"Function docsting.\"\"\"\n",
159 | " return input + number"
160 | ]
161 | }
162 | ],
163 | "metadata": {
164 | "kernelspec": {
165 | "display_name": "py_env_book",
166 | "language": "python",
167 | "name": "python3"
168 | },
169 | "language_info": {
170 | "codemirror_mode": {
171 | "name": "ipython",
172 | "version": 3
173 | },
174 | "file_extension": ".py",
175 | "mimetype": "text/x-python",
176 | "name": "python",
177 | "nbconvert_exporter": "python",
178 | "pygments_lexer": "ipython3",
179 | "version": "3.10.15"
180 | }
181 | },
182 | "nbformat": 4,
183 | "nbformat_minor": 2
184 | }
185 |
--------------------------------------------------------------------------------
/tutorials/3-advanced-topics/DTAT389_elementwise.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# deeptrack.elementwise\n",
8 | "\n",
9 | "
"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "# !pip install deeptrack # Uncomment if running on Colab/Kaggle."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "This advanced tutorial introduces the elementwise module."
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "## 1. What is elementwise?\n",
33 | "\n",
34 | "The elementwise module introduces utility functions which lets the user apply Numpy functions to `Feature` objects elementwise, that is element by element.\n",
35 | "\n",
36 | "Some functions included in elementwise are:\n",
37 | "\n",
38 | "- Trigonometric\n",
39 | "- Hyperbolic\n",
40 | "- Rounding \n",
41 | "- Exponents and Logarithms\n",
42 | "- Complex "
43 | ]
44 | },
45 | {
46 | "cell_type": "markdown",
47 | "metadata": {},
48 | "source": [
49 | "## 2. Use example\n",
50 | "\n",
51 | "Create a feature that subtracts values from an image ..."
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 2,
57 | "metadata": {},
58 | "outputs": [
59 | {
60 | "name": "stdout",
61 | "output_type": "stream",
62 | "text": [
63 | "[-9 -8 -7]\n"
64 | ]
65 | }
66 | ],
67 | "source": [
68 | "import numpy as np\n",
69 | "from deeptrack.features import Feature\n",
70 | "\n",
71 | "class Subtract(Feature):\n",
72 | " def get(self, image, value, **kwargs):\n",
73 | " return image - value\n",
74 | "\n",
75 | "subtract_10 = Subtract(value=10)\n",
76 | "\n",
77 | "input_image = np.array([1, 2, 3])\n",
78 | "output_image = subtract_10(input_image)\n",
79 | "print(output_image)"
80 | ]
81 | },
82 | {
83 | "cell_type": "markdown",
84 | "metadata": {},
85 | "source": [
86 | "... and compute the absolute value of the feature (in sequence)."
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": 3,
92 | "metadata": {},
93 | "outputs": [
94 | {
95 | "name": "stdout",
96 | "output_type": "stream",
97 | "text": [
98 | "[9 8 7]\n",
99 | "[9 8 7]\n"
100 | ]
101 | }
102 | ],
103 | "source": [
104 | "from deeptrack.elementwise import Abs\n",
105 | "\n",
106 | "# Sequentially take the absolute value after subtraction.\n",
107 | "pipeline = Abs(subtract_10)\n",
108 | "output_image = pipeline(input_image)\n",
109 | "print(output_image)\n",
110 | "\n",
111 | "# Or equivalently:\n",
112 | "pipeline = subtract_10 >> Abs()\n",
113 | "output_image = pipeline(input_image)\n",
114 | "print(output_image)"
115 | ]
116 | }
117 | ],
118 | "metadata": {
119 | "kernelspec": {
120 | "display_name": "py_env_book",
121 | "language": "python",
122 | "name": "python3"
123 | },
124 | "language_info": {
125 | "codemirror_mode": {
126 | "name": "ipython",
127 | "version": 3
128 | },
129 | "file_extension": ".py",
130 | "mimetype": "text/x-python",
131 | "name": "python",
132 | "nbconvert_exporter": "python",
133 | "pygments_lexer": "ipython3",
134 | "version": "3.10.15"
135 | }
136 | },
137 | "nbformat": 4,
138 | "nbformat_minor": 2
139 | }
140 |
--------------------------------------------------------------------------------
/tutorials/3-advanced-topics/DTAT391A_sources.base.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# deeptrack.sources.base\n",
8 | "\n",
9 | "
"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "# !pip install deeptrack # Uncomment if running on Colab/Kaggle."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "This advanced tutorial introduces the sources.base module."
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "## 1. What is `base`?\n",
33 | "\n",
34 | "The `base` module provides utilities for manipulating data sources, primarily when data needs to be dynamically manipulated, filtered or combined. This guide explains how to use each component in the module with examples."
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {},
40 | "source": [
41 | "## 2. Dynamically generate child nodes when attributes are accessed with `SourceDeepTrackNode`."
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "metadata": {},
48 | "outputs": [
49 | {
50 | "name": "stdout",
51 | "output_type": "stream",
52 | "text": [
53 | "30\n"
54 | ]
55 | }
56 | ],
57 | "source": [
58 | "from deeptrack.sources.base import SourceDeepTrackNode\n",
59 | "\n",
60 | "# Define parent node.\n",
61 | "node = SourceDeepTrackNode(lambda: {\"a\": 10, \"b\": 20})\n",
62 | "\n",
63 | "# Create child nodes.\n",
64 | "child_a = node.a\n",
65 | "child_b = node.b\n",
66 | "\n",
67 | "# Call child nodes.\n",
68 | "print(child_a() + child_b())"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "metadata": {},
74 | "source": [
75 | "## 3. Generate a source item that allows callbacks when accessed"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": 3,
81 | "metadata": {},
82 | "outputs": [
83 | {
84 | "name": "stdout",
85 | "output_type": "stream",
86 | "text": [
87 | "Item accessed: SourceItem({'a': 5, 'b': 10})\n",
88 | "5\n"
89 | ]
90 | }
91 | ],
92 | "source": [
93 | "from deeptrack.sources.base import SourceItem\n",
94 | "\n",
95 | "def callback(item):\n",
96 | " print(\"Item accessed:\", item)\n",
97 | "\n",
98 | "# Create a SourceItem with a callback.\n",
99 | "item = SourceItem([callback], a=5, b=10)\n",
100 | "\n",
101 | "# Call the item to trigger a callback.\n",
102 | "item() \n",
103 | "\n",
104 | "# Access values directly\n",
105 | "print(item[\"a\"])"
106 | ]
107 | },
108 | {
109 | "cell_type": "markdown",
110 | "metadata": {},
111 | "source": [
112 | "## 4. Generate a dataset of multiple `SourceItem` objects."
113 | ]
114 | },
115 | {
116 | "cell_type": "code",
117 | "execution_count": 4,
118 | "metadata": {},
119 | "outputs": [
120 | {
121 | "name": "stdout",
122 | "output_type": "stream",
123 | "text": [
124 | "SourceItem({'a': 1, 'b': 4})\n",
125 | "SourceItem({'a': 2, 'b': 5})\n",
126 | "SourceItem({'a': 1, 'b': 4})\n",
127 | "SourceItem({'a': 2, 'b': 5})\n",
128 | "SourceItem({'a': 3, 'b': 6})\n"
129 | ]
130 | }
131 | ],
132 | "source": [
133 | "from deeptrack.sources.base import Source\n",
134 | "\n",
135 | "# Define a source with multiple attributes.\n",
136 | "dataset = Source(a=[1, 2, 3], b=[4, 5, 6])\n",
137 | "\n",
138 | "# Access elements by index.\n",
139 | "print(dataset[0])\n",
140 | "print(dataset[1]()) \n",
141 | "\n",
142 | "# Print the items in the dataset.\n",
143 | "for item in dataset:\n",
144 | " print(item)"
145 | ]
146 | },
147 | {
148 | "cell_type": "markdown",
149 | "metadata": {},
150 | "source": [
151 | "## 5. Combine existing attributes with `Product`"
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "execution_count": 5,
157 | "metadata": {},
158 | "outputs": [
159 | {
160 | "name": "stdout",
161 | "output_type": "stream",
162 | "text": [
163 | "SourceItem({'c': 5, 'a': 1, 'b': 3})\n",
164 | "SourceItem({'c': 6, 'a': 1, 'b': 3})\n",
165 | "SourceItem({'c': 5, 'a': 2, 'b': 4})\n",
166 | "SourceItem({'c': 6, 'a': 2, 'b': 4})\n"
167 | ]
168 | }
169 | ],
170 | "source": [
171 | "from deeptrack.sources.base import Source\n",
172 | "\n",
173 | "# Create a source\n",
174 | "source = Source(a=[1, 2], b=[3, 4])\n",
175 | "\n",
176 | "# Generate a new source as a product with new attributes.\n",
177 | "new_source = source.product(c=[5, 6])\n",
178 | "\n",
179 | "# Print the combinations.\n",
180 | "for item in new_source:\n",
181 | " print(item)"
182 | ]
183 | },
184 | {
185 | "cell_type": "markdown",
186 | "metadata": {},
187 | "source": [
188 | "## 6. Filter dataset items with `Subset`"
189 | ]
190 | },
191 | {
192 | "cell_type": "code",
193 | "execution_count": 6,
194 | "metadata": {},
195 | "outputs": [
196 | {
197 | "name": "stdout",
198 | "output_type": "stream",
199 | "text": [
200 | "SourceItem({'a': 2, 'b': 2})\n",
201 | "SourceItem({'a': 4, 'b': 4})\n",
202 | "SourceItem({'a': 2, 'b': 7})\n",
203 | "SourceItem({'a': 8, 'b': 9})\n",
204 | "SourceItem({'a': 8, 'b': 11})\n"
205 | ]
206 | }
207 | ],
208 | "source": [
209 | "from deeptrack.sources import Source, Subset\n",
210 | "\n",
211 | "# Define a source.\n",
212 | "source = Source(\n",
213 | " a=[1, 2, 3, 4, 2, 8, 8],\n",
214 | " b=[1, 2, 3, 4, 7, 9, 11 ]\n",
215 | ")\n",
216 | "\n",
217 | "# Create a subset with only even values of 'a'.\n",
218 | "subset = source.filter(lambda a, b: a % 2 == 0)\n",
219 | "\n",
220 | "# Print subset values.\n",
221 | "for item in subset:\n",
222 | " print(item)"
223 | ]
224 | },
225 | {
226 | "cell_type": "markdown",
227 | "metadata": {},
228 | "source": [
229 | "## 7. Random splitting of sources into multiple subsets\n"
230 | ]
231 | },
232 | {
233 | "cell_type": "code",
234 | "execution_count": 7,
235 | "metadata": {},
236 | "outputs": [
237 | {
238 | "name": "stdout",
239 | "output_type": "stream",
240 | "text": [
241 | "Subset 1:\n",
242 | "SourceItem({'a': 4, 'b': 10})\n",
243 | "SourceItem({'a': 3, 'b': 9})\n",
244 | "Subset 2:\n",
245 | "SourceItem({'a': 6, 'b': 12})\n",
246 | "SourceItem({'a': 5, 'b': 11})\n",
247 | "SourceItem({'a': 2, 'b': 8})\n",
248 | "SourceItem({'a': 1, 'b': 7})\n"
249 | ]
250 | }
251 | ],
252 | "source": [
253 | "from deeptrack.sources import Source, random_split\n",
254 | "import numpy as np\n",
255 | "\n",
256 | "# Create a source\n",
257 | "source = Source(\n",
258 | " a=[1, 2, 3, 4, 5, 6],\n",
259 | " b=[7, 8, 9, 10, 11, 12],\n",
260 | ")\n",
261 | "\n",
262 | "# Split into two subsets (proportionally to 30% and 70%)\n",
263 | "# commonly used for validation during training.\n",
264 | "train_subset, test_subset = random_split(\n",
265 | " source,\n",
266 | " lengths=[0.3, 0.7],\n",
267 | " generator=np.random.default_rng(42)\n",
268 | ")\n",
269 | "\n",
270 | "print(\"Subset 1:\")\n",
271 | "for item in train_subset:\n",
272 | " print(item)\n",
273 | "\n",
274 | "print(\"Subset 2:\")\n",
275 | "for item in test_subset:\n",
276 | " print(item)"
277 | ]
278 | }
279 | ],
280 | "metadata": {
281 | "kernelspec": {
282 | "display_name": "py_env_book",
283 | "language": "python",
284 | "name": "python3"
285 | },
286 | "language_info": {
287 | "codemirror_mode": {
288 | "name": "ipython",
289 | "version": 3
290 | },
291 | "file_extension": ".py",
292 | "mimetype": "text/x-python",
293 | "name": "python",
294 | "nbconvert_exporter": "python",
295 | "pygments_lexer": "ipython3",
296 | "version": "3.10.15"
297 | }
298 | },
299 | "nbformat": 4,
300 | "nbformat_minor": 2
301 | }
302 |
--------------------------------------------------------------------------------
/tutorials/3-advanced-topics/DTAT391B_sources.folder.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# deeptrack.sources.folder\n",
8 | "\n",
9 | "
"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "# !pip install deeptrack # Uncomment if running on Colab/Kaggle."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "This advanced tutorial introduces the sources.folder module."
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "## 1. What is `folder`?\n",
33 | "\n",
34 | "The `folder` module enables the management of image datasets organized in a directory hierarchy. It contains a single class `ImageFolder` that provides utilities to perform structured naming, organization, and retrieval of image data."
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {},
40 | "source": [
41 | "## 2. Creating a Directory Structure\n",
42 | "\n",
43 | "Since the `ImageFolder` class expects images to be stored in directories categorized by class names, we will need to create a dummy directory structure for demonstration purposes."
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": 2,
49 | "metadata": {},
50 | "outputs": [],
51 | "source": [
52 | "import os\n",
53 | "import shutil\n",
54 | "\n",
55 | "from deeptrack.sources import folder\n",
56 | "\n",
57 | "\n",
58 | "# Define root directory.\n",
59 | "dataset_path = \"dummy_dataset\"\n",
60 | "\n",
61 | "# Define class names.\n",
62 | "classes = [\"cat\", \"dog\", \"bird\"]\n",
63 | "\n",
64 | "# Remove existing directory if exists.\n",
65 | "if os.path.exists(dataset_path):\n",
66 | " shutil.rmtree(dataset_path)\n",
67 | "\n",
68 | "# Create directories.\n",
69 | "for class_name in classes:\n",
70 | " os.makedirs(os.path.join(dataset_path, class_name))\n",
71 | "\n",
72 | "# Create some empty dummy files.\n",
73 | "for class_name in classes:\n",
74 | " for i in range(3): \n",
75 | " with open(os.path.join(dataset_path, class_name, f\"image_{i}.jpg\"), \"w\") as f:\n",
76 | " f.write(\"\")\n"
77 | ]
78 | },
79 | {
80 | "cell_type": "markdown",
81 | "metadata": {},
82 | "source": [
83 | "## 3. Initializing an `ImageFolder`.\n",
84 | "Now that the dummy directory is created, we initialize an `ImageFolder` object."
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": 3,
90 | "metadata": {},
91 | "outputs": [
92 | {
93 | "name": "stdout",
94 | "output_type": "stream",
95 | "text": [
96 | "Total images in dataset: 9\n",
97 | "Classes: ['dog', 'cat', 'bird']\n"
98 | ]
99 | }
100 | ],
101 | "source": [
102 | "data_source = folder.ImageFolder(dataset_path)\n",
103 | "\n",
104 | "# Print total number of images.\n",
105 | "print(f\"Total images in dataset: {len(data_source)}\")\n",
106 | "\n",
107 | "# Print class names.\n",
108 | "print(f\"Classes: {data_source.classes}\")"
109 | ]
110 | },
111 | {
112 | "cell_type": "markdown",
113 | "metadata": {},
114 | "source": [
115 | "## 4. Getting Category Names from File Paths\n"
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": 4,
121 | "metadata": {},
122 | "outputs": [
123 | {
124 | "name": "stdout",
125 | "output_type": "stream",
126 | "text": [
127 | "Category of dummy_dataset/dog/image_1.jpg: dog\n"
128 | ]
129 | }
130 | ],
131 | "source": [
132 | "example_path = os.path.join(dataset_path, \"dog\", \"image_1.jpg\")\n",
133 | "category = data_source.get_category_name(example_path, directory_level=0)\n",
134 | "print(f\"Category of {example_path}: {category}\")\n"
135 | ]
136 | },
137 | {
138 | "cell_type": "markdown",
139 | "metadata": {},
140 | "source": [
141 | "## 5. Dataset Splitting.\n",
142 | "\n",
143 | "If the dataset has subcategories (e.g., train/dog, train/cat), we can split it according to those subcategories."
144 | ]
145 | },
146 | {
147 | "cell_type": "code",
148 | "execution_count": 5,
149 | "metadata": {},
150 | "outputs": [
151 | {
152 | "name": "stdout",
153 | "output_type": "stream",
154 | "text": [
155 | "Train set classes: ['cat']\n",
156 | "Test set classes: ['dog']\n"
157 | ]
158 | }
159 | ],
160 | "source": [
161 | "# Create directories if they don't exist.\n",
162 | "train_dir = os.path.join(dataset_path, \"train\")\n",
163 | "test_dir = os.path.join(dataset_path, \"test\")\n",
164 | "os.makedirs(train_dir, exist_ok=True)\n",
165 | "os.makedirs(test_dir, exist_ok=True)\n",
166 | "\n",
167 | "\n",
168 | "# Define source and destination paths\n",
169 | "cat_src = os.path.join(dataset_path, \"cat\")\n",
170 | "cat_dest = os.path.join(train_dir, \"cat\")\n",
171 | "\n",
172 | "dog_src = os.path.join(dataset_path, \"dog\")\n",
173 | "dog_dest = os.path.join(test_dir, \"dog\")\n",
174 | "\n",
175 | "\n",
176 | "# Move only if source exists and destination does not.\n",
177 | "if os.path.exists(cat_src) and not os.path.exists(cat_dest):\n",
178 | " shutil.move(cat_src, train_dir)\n",
179 | "\n",
180 | "if os.path.exists(dog_src) and not os.path.exists(dog_dest):\n",
181 | " shutil.move(dog_src, test_dir)\n",
182 | "\n",
183 | "split_data_source = folder.ImageFolder(dataset_path)\n",
184 | "\n",
185 | "# Split into train and test.\n",
186 | "train, test = split_data_source.split(\"train\", \"test\")\n",
187 | "\n",
188 | "print(f\"Train set classes: {train.classes}\")\n",
189 | "print(f\"Test set classes: {test.classes}\")\n"
190 | ]
191 | },
192 | {
193 | "cell_type": "markdown",
194 | "metadata": {},
195 | "source": [
196 | "## 6. Print directory structure\n",
197 | "The resulting directory structure from splitting the dataset can be visualized by running the code cell below."
198 | ]
199 | },
200 | {
201 | "cell_type": "code",
202 | "execution_count": 6,
203 | "metadata": {},
204 | "outputs": [
205 | {
206 | "name": "stdout",
207 | "output_type": "stream",
208 | "text": [
209 | "📂 dummy_dataset\n",
210 | " 📂 test\n",
211 | " 📂 dog\n",
212 | " 📄 image_0.jpg\n",
213 | " 📄 image_1.jpg\n",
214 | " 📄 image_2.jpg\n",
215 | " 📂 bird\n",
216 | " 📄 image_0.jpg\n",
217 | " 📄 image_1.jpg\n",
218 | " 📄 image_2.jpg\n",
219 | " 📂 train\n",
220 | " 📂 cat\n",
221 | " 📄 image_0.jpg\n",
222 | " 📄 image_1.jpg\n",
223 | " 📄 image_2.jpg\n"
224 | ]
225 | }
226 | ],
227 | "source": [
228 | "for root, dirs, files in os.walk(dataset_path):\n",
229 | "\n",
230 | " # Get depth of directory for indenting the print text.\n",
231 | " depth = root.replace(dataset_path, \"\").count(os.sep)\n",
232 | " indent = \" \" * depth\n",
233 | "\n",
234 | " # Directories.\n",
235 | " directory_name = os.path.basename(root)\n",
236 | " print(f\"{indent}📂 {directory_name}\")\n",
237 | " \n",
238 | " # Files.\n",
239 | " for filename in sorted(files):\n",
240 | " print(f\"{indent} 📄 {filename}\")"
241 | ]
242 | }
243 | ],
244 | "metadata": {
245 | "kernelspec": {
246 | "display_name": "py_env_book",
247 | "language": "python",
248 | "name": "python3"
249 | },
250 | "language_info": {
251 | "codemirror_mode": {
252 | "name": "ipython",
253 | "version": 3
254 | },
255 | "file_extension": ".py",
256 | "mimetype": "text/x-python",
257 | "name": "python",
258 | "nbconvert_exporter": "python",
259 | "pygments_lexer": "ipython3",
260 | "version": "3.10.15"
261 | }
262 | },
263 | "nbformat": 4,
264 | "nbformat_minor": 2
265 | }
266 |
--------------------------------------------------------------------------------
/tutorials/3-advanced-topics/DTAT391C_sources.rng.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# deeptrack.sources.rng\n",
8 | "\n",
9 | "
"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "# !pip install deeptrack # Uncomment if running on Colab/Kaggle."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "This advanced tutorial introduces the sources.rng module."
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "## 1. What is `rng`?\n",
33 | "\n",
34 | "The `rng` module is an extension of both Numpy and Python random number generator objects. It lets the user instance several generators with different seeds, returned as lists.\n"
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {},
40 | "source": [
41 | "## 2. Instance Python random number generator objects.\n",
42 | "Generate a list of Python rng's and sample some numbers from them, followed by resetting the states and sampling once more."
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": 2,
48 | "metadata": {
49 | "execution": {
50 | "iopub.execute_input": "2022-06-29T20:33:47.187180Z",
51 | "iopub.status.busy": "2022-06-29T20:33:47.186679Z",
52 | "iopub.status.idle": "2022-06-29T20:33:50.691576Z",
53 | "shell.execute_reply": "2022-06-29T20:33:50.691075Z"
54 | }
55 | },
56 | "outputs": [
57 | {
58 | "name": "stdout",
59 | "output_type": "stream",
60 | "text": [
61 | "Python rng #0 yields a Random Number: 36\n",
62 | "Python rng #1 yields a Random Number: 83\n",
63 | "Python rng #2 yields a Random Number: 28\n",
64 | "Python rng #0 yields a Random Number: 36\n",
65 | "Python rng #1 yields a Random Number: 83\n",
66 | "Python rng #2 yields a Random Number: 28\n"
67 | ]
68 | }
69 | ],
70 | "source": [
71 | "from deeptrack.sources.rng import PythonRNG\n",
72 | "\n",
73 | "\n",
74 | "python_rng = PythonRNG(n_states=3, seed=123)\n",
75 | "states = python_rng._generate_states()\n",
76 | "\n",
77 | "for i, rng in enumerate(states):\n",
78 | " print(f\"Python rng #{i} yields a Random Number: {rng.randint(0, 100)}\")\n",
79 | "\n",
80 | "# Reset states to obtain the same numbers.\n",
81 | "python_rng.reset()\n",
82 | "new_states = python_rng._generate_states()\n",
83 | "\n",
84 | "for i, rng in enumerate(new_states):\n",
85 | " print(f\"Python rng #{i} yields a Random Number: {rng.randint(0, 100)}\")"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "metadata": {},
91 | "source": [
92 | "## 3. Instance Numpy random number generator objects.\n",
93 | "In the same way, we do it for Numpy rng's."
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": 3,
99 | "metadata": {},
100 | "outputs": [
101 | {
102 | "name": "stdout",
103 | "output_type": "stream",
104 | "text": [
105 | "Numpy rng #0 yields a Random Number: 4\n",
106 | "Numpy rng #1 yields a Random Number: 88\n",
107 | "Numpy rng #2 yields a Random Number: 55\n",
108 | "Numpy rng #0 yields a Random Number: 4\n",
109 | "Numpy rng #1 yields a Random Number: 88\n",
110 | "Numpy rng #2 yields a Random Number: 55\n"
111 | ]
112 | }
113 | ],
114 | "source": [
115 | "from deeptrack.sources.rng import NumpyRNG\n",
116 | "\n",
117 | "\n",
118 | "numpy_rng = NumpyRNG(n_states=3, seed=123)\n",
119 | "states = numpy_rng._generate_states()\n",
120 | "\n",
121 | "for i, rng in enumerate(states):\n",
122 | " print(f\"Numpy rng #{i} yields a Random Number: {rng.randint(0, 100)}\")\n",
123 | "\n",
124 | "# Reset states to obtain the same numbers.\n",
125 | "numpy_rng.reset()\n",
126 | "new_states = numpy_rng._generate_states()\n",
127 | "\n",
128 | "for i, rng in enumerate(new_states):\n",
129 | " print(f\"Numpy rng #{i} yields a Random Number: {rng.randint(0, 100)}\")"
130 | ]
131 | }
132 | ],
133 | "metadata": {
134 | "kernelspec": {
135 | "display_name": "py_env_book",
136 | "language": "python",
137 | "name": "python3"
138 | },
139 | "language_info": {
140 | "codemirror_mode": {
141 | "name": "ipython",
142 | "version": 3
143 | },
144 | "file_extension": ".py",
145 | "mimetype": "text/x-python",
146 | "name": "python",
147 | "nbconvert_exporter": "python",
148 | "pygments_lexer": "ipython3",
149 | "version": "3.10.15"
150 | }
151 | },
152 | "nbformat": 4,
153 | "nbformat_minor": 2
154 | }
155 |
--------------------------------------------------------------------------------
/tutorials/3-advanced-topics/DTAT393A_pytorch.features.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# deeptrack.pytorch.features\n",
8 | "\n",
9 | "
"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "# !pip install deeptrack # Uncomment if running on Colab/Kaggle."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "This advanced tutorial introduces the pytorch.features module."
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": 1,
31 | "metadata": {},
32 | "outputs": [],
33 | "source": [
34 | "import numpy as np\n",
35 | "import torch"
36 | ]
37 | },
38 | {
39 | "cell_type": "markdown",
40 | "metadata": {},
41 | "source": [
42 | "## 1. What is `pytorch.features`?\n",
43 | "\n",
44 | "The `pytorch.features` module contains a class `ToTensor` which transforms an input to a PyTorch tensor and lets the user specify the torch.device, expand dimensions, and permute to move the input channel first."
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "## 2. Convert a numpy array to a tensor"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {},
58 | "outputs": [
59 | {
60 | "name": "stdout",
61 | "output_type": "stream",
62 | "text": [
63 | "tensor([1., 2., 3.])\n"
64 | ]
65 | }
66 | ],
67 | "source": [
68 | "from deeptrack.pytorch import ToTensor\n",
69 | "\n",
70 | "numpy_array = np.array([1.0, 2.0, 3.0])\n",
71 | "\n",
72 | "Tensor_converter = ToTensor(\n",
73 | " dtype=torch.float32,\n",
74 | " device=None, \n",
75 | " add_dim_to_number=False,\n",
76 | " permute_mode=\"numpy\",\n",
77 | ")\n",
78 | "\n",
79 | "torch_tensor = Tensor_converter(numpy_array)\n",
80 | "print(torch_tensor)"
81 | ]
82 | }
83 | ],
84 | "metadata": {
85 | "kernelspec": {
86 | "display_name": "py_env_book",
87 | "language": "python",
88 | "name": "python3"
89 | },
90 | "language_info": {
91 | "codemirror_mode": {
92 | "name": "ipython",
93 | "version": 3
94 | },
95 | "file_extension": ".py",
96 | "mimetype": "text/x-python",
97 | "name": "python",
98 | "nbconvert_exporter": "python",
99 | "pygments_lexer": "ipython3",
100 | "version": "3.10.15"
101 | }
102 | },
103 | "nbformat": 4,
104 | "nbformat_minor": 2
105 | }
106 |
--------------------------------------------------------------------------------
/tutorials/3-advanced-topics/DTAT399B_backend.pint_definition.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# deeptrack.backend.pint_definition\n",
8 | "
"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": 1,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "# !pip install deeptrack # Uncomment if running on Colab/Kaggle."
18 | ]
19 | },
20 | {
21 | "cell_type": "markdown",
22 | "metadata": {},
23 | "source": [
24 | "This advanced tutorial introduces the pint_definition module."
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "## 1. What is `pint_definition`?\n",
32 | "\n",
33 | "The `pint_definition` module consolidates and extends the default definitions provided by Pint's \n",
34 | "`default_en.txt` and `constants_en.txt` files. It defines physical constants, \n",
35 | "unit systems, and project-specific adjustments necessary for DeepTrack2."
36 | ]
37 | },
38 | {
39 | "cell_type": "markdown",
40 | "metadata": {},
41 | "source": [
42 | "## 2. Unit Quantities\n",
43 | "Pint lets us include units when calculating quantities.\n"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": 21,
49 | "metadata": {},
50 | "outputs": [
51 | {
52 | "name": "stdout",
53 | "output_type": "stream",
54 | "text": [
55 | "Wavelength: 550 nanometer\n",
56 | "Frequency: 545.4545454545453 terahertz\n"
57 | ]
58 | }
59 | ],
60 | "source": [
61 | "from pint import UnitRegistry\n",
62 | "from deeptrack.backend.pint_definition import pint_definitions\n",
63 | "\n",
64 | "# Initialize UnitRegistry with microscopy-related units\n",
65 | "units = UnitRegistry(pint_definitions.split(\"\\n\"))\n",
66 | "\n",
67 | "# Define wavelength in nanometers\n",
68 | "wavelength = 550 * units.nanometer # Green light\n",
69 | "\n",
70 | "c = 3e8 * units.meter / units.second\n",
71 | "\n",
72 | "frequency = c / wavelength\n",
73 | "\n",
74 | "print(f\"Wavelength: {wavelength}\")\n",
75 | "print(f\"Frequency: {frequency.to(units.terahertz)}\") "
76 | ]
77 | },
78 | {
79 | "cell_type": "markdown",
80 | "metadata": {},
81 | "source": [
82 | "## 3. Diffraction Limit"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": 22,
88 | "metadata": {},
89 | "outputs": [
90 | {
91 | "name": "stdout",
92 | "output_type": "stream",
93 | "text": [
94 | "Resolution limit: 178.57142857142858 nanometer\n"
95 | ]
96 | }
97 | ],
98 | "source": [
99 | "Numerical_aperture = 1.4 \n",
100 | "wavelength = 500 * units.nanometer\n",
101 | "resolution = wavelength / (2 * Numerical_aperture)\n",
102 | "\n",
103 | "print(f\"Resolution limit: {resolution.to(units.nanometer)}\")"
104 | ]
105 | },
106 | {
107 | "cell_type": "markdown",
108 | "metadata": {},
109 | "source": [
110 | "## 4. Exposure Time and Motion Blur"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "metadata": {},
117 | "outputs": [
118 | {
119 | "name": "stdout",
120 | "output_type": "stream",
121 | "text": [
122 | "Motion blur: 19.999999999999996 nanometer\n"
123 | ]
124 | }
125 | ],
126 | "source": [
127 | "exposure_time = 2 * units.millisecond \n",
128 | "\n",
129 | "sample_velocity = 10 * units.micrometer / units.second \n",
130 | "\n",
131 | "blur_distance = sample_velocity * exposure_time\n",
132 | "\n",
133 | "print(f\"Motion blur: {blur_distance.to(units.nanometer)}\")"
134 | ]
135 | }
136 | ],
137 | "metadata": {
138 | "kernelspec": {
139 | "display_name": "Python 3",
140 | "language": "python",
141 | "name": "python3"
142 | },
143 | "language_info": {
144 | "codemirror_mode": {
145 | "name": "ipython",
146 | "version": 3
147 | },
148 | "file_extension": ".py",
149 | "mimetype": "text/x-python",
150 | "name": "python",
151 | "nbconvert_exporter": "python",
152 | "pygments_lexer": "ipython3",
153 | "version": "3.9.13"
154 | }
155 | },
156 | "nbformat": 4,
157 | "nbformat_minor": 2
158 | }
159 |
--------------------------------------------------------------------------------
/tutorials/3-advanced-topics/DTAT399C_backend.units.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# deeptrack.backend.units\n",
8 | "
"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": 1,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "# !pip install deeptrack # Uncomment if running on Colab/Kaggle."
18 | ]
19 | },
20 | {
21 | "cell_type": "markdown",
22 | "metadata": {},
23 | "source": [
24 | "This advanced tutorial introduces the backend.units module."
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": 2,
30 | "metadata": {},
31 | "outputs": [],
32 | "source": [
33 | "import pint\n",
34 | "\n",
35 | "from deeptrack.backend import units"
36 | ]
37 | },
38 | {
39 | "cell_type": "markdown",
40 | "metadata": {},
41 | "source": [
42 | "## 1. What is `units`?\n",
43 | "\n",
44 | "The `units` module defines functions and a class to handle unit conversions."
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "## 2. Convert Meters to Centimeters and Millimeters"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 3,
57 | "metadata": {},
58 | "outputs": [
59 | {
60 | "name": "stdout",
61 | "output_type": "stream",
62 | "text": [
63 | "{'length': , 'width': }\n"
64 | ]
65 | }
66 | ],
67 | "source": [
68 | "ureg = pint.UnitRegistry()\n",
69 | "\n",
70 | "# Create a conversion table\n",
71 | "conversions = units.ConversionTable(\n",
72 | " length=(ureg.meter, ureg.millimeter), \n",
73 | " width=(ureg.meter, ureg.centimeter),\n",
74 | ")\n",
75 | "\n",
76 | "values = {\n",
77 | " \"length\": 2.5, \n",
78 | " \"width\": 1.2, \n",
79 | "}\n",
80 | "\n",
81 | "converted_values = conversions.convert(**values)\n",
82 | "print(converted_values)"
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "metadata": {},
88 | "source": [
89 | "## 3. Convert a List of Velocities"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": 4,
95 | "metadata": {},
96 | "outputs": [
97 | {
98 | "name": "stdout",
99 | "output_type": "stream",
100 | "text": [
101 | "{'velocity': }\n"
102 | ]
103 | }
104 | ],
105 | "source": [
106 | "ureg = pint.UnitRegistry()\n",
107 | "\n",
108 | "# From m/s to km/h.\n",
109 | "conversions = units.ConversionTable(\n",
110 | " velocity=(ureg.meter / ureg.second, ureg.kilometer / ureg.hour),\n",
111 | ")\n",
112 | "\n",
113 | "values = {\n",
114 | " \"velocity\": [5, 10, 15], \n",
115 | "}\n",
116 | "\n",
117 | "converted_values = conversions.convert(**values)\n",
118 | "print(converted_values)"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {},
124 | "source": [
125 | "## 4. Convert Temperature"
126 | ]
127 | },
128 | {
129 | "cell_type": "code",
130 | "execution_count": 5,
131 | "metadata": {},
132 | "outputs": [
133 | {
134 | "name": "stdout",
135 | "output_type": "stream",
136 | "text": [
137 | "{'temperature': }\n"
138 | ]
139 | }
140 | ],
141 | "source": [
142 | "ureg = pint.UnitRegistry()\n",
143 | "\n",
144 | "conversions = units.ConversionTable(\n",
145 | " temperature=(ureg.kelvin, ureg.celsius), \n",
146 | ")\n",
147 | "\n",
148 | "values = {\n",
149 | " \"temperature\": 300,\n",
150 | "}\n",
151 | "\n",
152 | "converted_values = conversions.convert(**values)\n",
153 | "print(converted_values)"
154 | ]
155 | },
156 | {
157 | "cell_type": "markdown",
158 | "metadata": {},
159 | "source": [
160 | "## 5. Converting Multiple Quantities"
161 | ]
162 | },
163 | {
164 | "cell_type": "code",
165 | "execution_count": 6,
166 | "metadata": {},
167 | "outputs": [
168 | {
169 | "name": "stdout",
170 | "output_type": "stream",
171 | "text": [
172 | "{'mass': , 'force': , 'energy': }\n"
173 | ]
174 | }
175 | ],
176 | "source": [
177 | "ureg = pint.UnitRegistry()\n",
178 | "\n",
179 | "conversions = units.ConversionTable(\n",
180 | " mass=(ureg.gram, ureg.kilogram),\n",
181 | " force=(ureg.newton, ureg.pound_force),\n",
182 | " energy=(ureg.joule, ureg.calorie),\n",
183 | ")\n",
184 | "\n",
185 | "values = {\n",
186 | " \"mass\": 5000,\n",
187 | " \"force\": 50,\n",
188 | " \"energy\": 1000,\n",
189 | "}\n",
190 | "\n",
191 | "converted_values = conversions.convert(**values)\n",
192 | "print(converted_values)"
193 | ]
194 | }
195 | ],
196 | "metadata": {
197 | "kernelspec": {
198 | "display_name": "py_env_book",
199 | "language": "python",
200 | "name": "python3"
201 | },
202 | "language_info": {
203 | "codemirror_mode": {
204 | "name": "ipython",
205 | "version": 3
206 | },
207 | "file_extension": ".py",
208 | "mimetype": "text/x-python",
209 | "name": "python",
210 | "nbconvert_exporter": "python",
211 | "pygments_lexer": "ipython3",
212 | "version": "3.10.15"
213 | }
214 | },
215 | "nbformat": 4,
216 | "nbformat_minor": 2
217 | }
218 |
--------------------------------------------------------------------------------
/tutorials/3-advanced-topics/DTAT399E_backend.mie.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# deeptrack.backend.mie\n",
8 | "\n",
9 | "
"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 1,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "# !pip install deeptrack # Uncomment if running on Colab/Kaggle."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "This advanced tutorial introduces the backend.mie module."
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "## 1. What is `mie`?\n",
33 | "\n",
34 | "The `mie` module provides functions to perform Mie scattering calculations, including computation of spherical harmonics coefficients and related operations."
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {},
40 | "source": [
41 | "## 2. Calculate Mie Coefficients for a Solid Particle\n"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": 2,
47 | "metadata": {},
48 | "outputs": [
49 | {
50 | "name": "stdout",
51 | "output_type": "stream",
52 | "text": [
53 | "A coefficients: [1.04581094e-03-2.45891920e-02j 5.61754632e-06-3.39444972e-04j\n",
54 | " 3.54498410e-08-2.25608771e-06j 1.33314410e-10-8.61596363e-09j\n",
55 | " 3.25668489e-13-2.12414217e-11j]\n",
56 | "B coefficients: [2.22095481e-05-8.65285036e-04j 1.48987829e-07-6.11710129e-06j\n",
57 | " 5.86860069e-10-2.42490110e-08j 1.47867923e-12-6.12742871e-11j\n",
58 | " 2.58354155e-15-1.07228248e-13j]\n"
59 | ]
60 | }
61 | ],
62 | "source": [
63 | "from deeptrack.backend import mie\n",
64 | "\n",
65 | "\n",
66 | "particle_radius = 0.5\n",
67 | "relative_refract_index = 1.5 + 0.01j\n",
68 | "max_order = 5\n",
69 | "\n",
70 | "A, B = mie.coefficients(relative_refract_index, particle_radius, max_order)\n",
71 | "\n",
72 | "print(\"A coefficients:\", A)\n",
73 | "print(\"B coefficients:\", B)"
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {},
79 | "source": [
80 | "## 2. Calculate Mie Coefficients for a Stratified Particle\n",
81 | "Here we only need to specify multiple radii for the different shells of the stratified particle."
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": 3,
87 | "metadata": {},
88 | "outputs": [
89 | {
90 | "name": "stdout",
91 | "output_type": "stream",
92 | "text": [
93 | "A coefficients: [4.51815568e-03-4.14667025e-02j 9.00848393e-05-8.90449307e-04j\n",
94 | " 1.30191942e-06-9.70936049e-06j 1.04767670e-08-6.35738175e-08j\n",
95 | " 5.30930040e-11-2.77931029e-10j]\n",
96 | "B coefficients: [2.08384699e-04-1.97056962e-03j 2.98780924e-06-2.15999592e-05j\n",
97 | " 2.41258487e-08-1.40444143e-07j 1.22573034e-10-6.07956361e-10j\n",
98 | " 4.27642496e-13-1.88504769e-12j]\n"
99 | ]
100 | }
101 | ],
102 | "source": [
103 | "particle_radii = [0.5, 0.6, 0.7]\n",
104 | "relative_refract_index = [1.5 + 0.01j, 1.3 + 0.02j, 1.1 + 0.03j]\n",
105 | "max_order = 5\n",
106 | "\n",
107 | "A, B = mie.stratified_coefficients(\n",
108 | " relative_refract_index, particle_radii, max_order,\n",
109 | ")\n",
110 | "\n",
111 | "print(\"A coefficients:\", A)\n",
112 | "print(\"B coefficients:\", B)"
113 | ]
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "metadata": {},
118 | "source": [
119 | "## 3. Calculate Spherical Harmonics of the Mie Field "
120 | ]
121 | },
122 | {
123 | "cell_type": "code",
124 | "execution_count": 4,
125 | "metadata": {},
126 | "outputs": [
127 | {
128 | "name": "stdout",
129 | "output_type": "stream",
130 | "text": [
131 | "(5, 100)\n",
132 | "(5, 100)\n"
133 | ]
134 | },
135 | {
136 | "data": {
137 | "text/plain": [
138 | "(-0.5, 99.5, 4.5, -0.5)"
139 | ]
140 | },
141 | "execution_count": 4,
142 | "metadata": {},
143 | "output_type": "execute_result"
144 | },
145 | {
146 | "data": {
147 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAxoAAAA7CAYAAAD8WPxcAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAALn0lEQVR4nO3dT2wc1R3A8d/szO56nRjHJARBSuIQFENy6KEUqSekBBBIgCpxKVIOOQSCIBWqoKKngnzrxTkhI3IIAnHjAqlcWT4AEZGRqkpFQv2jKlgcmpAqFDtZb9a7OzO97bz3G+/bXfM222y+HynSvP29mXme2RnPy/u9cZCmaSoAAAAA4FFh2A0AAAAAMHroaAAAAADwjo4GAAAAAO/oaAAAAADwjo4GAAAAAO/oaAAAAADwjo4GAAAAAO/oaAAAAADwjo4GAAAAAO/oaAAAAAzZ8ePHZXp6etjNALyio4FbShAEPf377LPPht1UAABEROS9996zfkeNjY3JwYMH5dSpU3LlypVhNw8YmGjYDQD68cEHH1jl999/X5aWlnKfP/TQQzezWQAAdDU7Oyv79++Xer0uX3zxhczPz8vCwoJ8/fXXcubMGUmSZNhNBLyio4FbyrFjx6zyl19+KUtLS7nPAQD4f/PUU0/Jww8/LCIiJ06ckJ07d8rc3Jx8/PHH8vzzzw+5dYB/pE5h5Jw9e1aOHDkiu3fvlnK5LIcOHZL5+flcvSAI5K233sp9Pj09LcePHx98QwEAt7UjR46IiMjKygpzNDCSGNHAyJmfn5fDhw/Ls88+K1EUyblz5+Tll1+WJEnklVdeGXbzAAAQEZGLFy+KiMjOnTuH3BJgMOhoYOR8/vnnUqlU2uVTp07Jk08+KXNzc3Q0AABDs7a2JlevXpV6vS4XLlyQ2dlZqVQq8vTTT8vy8vKwmwd4R0cDI8fsZKytrUmz2ZRHH31UFhcXZW1tTSYnJ4fYOgDA7eqxxx6zyvv27ZMPP/xQ9uzZM6QWAYNFRwMj58KFC/Lmm2/K8vKy1Go1K0ZHAwAwLG+//bYcPHhQoiiSu+++W2ZmZqRQYLosRhcdDYyUixcvytGjR+XBBx+Uubk5ue+++6RUKsnCwoKcPn26p1cHxnF8E1oKALjdPPLII+23TgG3AzoaGCnnzp2TjY0N+eSTT2Tv3r3tzz/99NNc3ampKVldXbU+azQacvny5UE3EwAAYOQxXoeREoahiIikadr+bG1tTc6ePZure+DAATl//rz12bvvvsuIBgAAgAeMaGCkPPHEE1IqleSZZ56RkydPSrValTNnzsju3btzIxUnTpyQl156SZ577jl5/PHH5auvvpLFxUXZtWvXkFoPAAAwOhjRwEiZmZmRjz76SIIgkNdff13eeecdefHFF+XVV1/N1X3hhRfkjTfekPPnz8trr70mKysrsrS0JNu2bRtCywEAAEZLkJo5JgAAAADgASMaAAAAALyjowEAAADAOzoaAAAAALyjowEAAADAOzoaAAAAALyjowEAAADAu57/YN8Df5jb2h6Cra2mDeQdvJ7aNuhtpgNpp6cjesv8vAPY5oC26+3n93WOrW363+Stss10yNfMtyd/62f/I2Zm9vSwmwAAt61//v43zjgjGgAAAAC8o6MBAAAAwDs6GgAAAAC863mORrrvhl02EsnTRNVNgk2XRUTELOtY2qGejqlU6cDaph0zE96DXEwcMXv/Vnq2qmvVdO1Dp3gnjpjZbldqeG491zbNguP49rvPHm05xX0gE3S2aIv59Vued+FYL7dN1z706Q46x3repzqhVkz9F0av+8udamM7+Z9X7d/cp/55rZjai7mPgr4QOm9TzLqumNpmYJSDXMxYHsQcmxE09Q/7puu81szvWl/Xi6Nyz9dLj/W6tqX3uj72eTOOU8/766fuoI5pp/Vu9v66GeZx6tJO93bSHuv1HvPRbufd+Mdcd677/E3/7vXYlj6fZxjRAAAAAOAdHQ0AAAAA3vWcOnXnnypWuWUU4zF7HCUeM5ZL9nbicjY0kxTtWFIyhsyKKi3CLBdVfpJRLkT2eoVCFgsje70wTDatJyISqXJopjuo4aXQqFtQMdcIk67bSeIY68plY5kpbWo9Mxar1DRdN06yPqjef2KsmySFzrHYjpn7SOLOKXWpjpnlXCpethio9cxyEEvHmIhKndOpcca6hdw+HOslmy/r9XIx13qudqZp57qu1Dhnip1jPYetpnekjpSrtKC+s6EqmylQodqOcadLdMwo63tSGjnuV8Y9KS3p+1V28AN134lK2ckvjzWt2LZyo708Wa4Luqv+as0qx8Z9J1b3IPOepO9dZgpwqu9drnuQFbNDrrRe6/6kU26TzjFnCq7zXuK4d/axzU7bz20nl+LsiPVxPzLrdj02nbbjvMc5grdSNqMz7abHFLctpu52Tet11jWeBfR/hTvWy9XtFHOl9eqY8dyX+93k2qZz/51TftOwj7Rec5u59YznVRULjOfeQLXbfA4uhOoZ2CiHoeOmsAlGNAAAAAB4R0cDAAAAgHd0NAAAAAB41/McjWO/W7DKy6v3t5f/9cNdVuzaf7dnhVU7sbl4LevblFftJLtiNSuHN+y8sqielaMNe71Cs2Asq3y0ON10uSudD26Udf53EgWbLouIJEUzZq8Xl4xY0RHT81yMsmuei17Pjul5Luq4mfNeSvYEhyjKysWiHStFLWPZjpVDIzc9bFmxohGLVOJtyairY+Y8l4IrmbiLJO08J6VlxBpx1DHWjO0vxoZRd0PFGq3QWLa32WhmsVbLXi9pZOW0qf6foKmui4ZxXTR0TDrHjGkDYUPsWMOMqflQxnrRhoq1zHo6lm66LKLnsqjrV5cdzPkcem6HdY0W1bkvZ+WWnotWMc79diskze1G7A77PLV2ZMuTE/Zrw++f/L69/Isd3wi6u+eXf7fKhYmJbHnbuF15PJtcmIyPWaFkPLuZJhX7mozLobGsvgdGOS52/m7FuTk+2XKqf28Yu8//vrHLVo63zhU319Uxc/5RH7nwrhz+QUxhcL5Jc6uvd3fOEQk6x1xzQvqZr+J8tb6q65rb0mPMdSzy99Xe1tOc82wGwfXa1n5i5rNdbh6GUVnFzLl+uTmBuWvNWE9d69Z16Iqp57nU8Qp185ktUs9v5XL2i3q8ZM8RnChvtJenyjUrNlXKyneVqtIPRjQAAAAAeEdHAwAAAIB3PadO/fHwlFVuHc1SpzZ+WrYrH8iGaib2XrNCh+/6rr18aOKyFXugfKW9fG/xByt2R5AN6RTVWGNsjIXVVC7RapINnX8f2/kNl5o72svfbUxasX/Xd1jlK7VsOP6HdXs4fn09G4KPq/YhDavZmFpYU6li61k5WrdCEtWyobDi953TyMK6egXZhpHy1LBjQct4rVmsYv2koZhDjZF9vNNi9l1IIrsfG5ezcq2sY51TVFrm65J1zHrNst3OuJIay+qvB1fs4cSwYr5y1M4XGjeGGrcbQ4siIpOl7BWkd47bJ/GOaMNYtlNkxo2cpPGCvb+ykYNU1O/ldWiq8VuzXIvta7RqlK+17FdXrzaz8mpDxepZ+Vrd3matlpXjqv29KNSytkRV+9xHxnVQum6FpFjNzmGpap/D4vVY1c3ys8KqfZ6CG9kxDjZUPljDGD7Wr7YMjWNasn+mZCI7Fq0p+zjd2JWNc6/fY5+X6k+yn79asYeup/dkqVO/nvpW0N3ipb9a5b8Y5/fPN/Zbsb/V7m0vf1PdZcX+cz27x19X39HmenYOA3UfD29kdcO6ihlfw4J9qu30RTuTVCIjdTj/am5V3mKqy1b/6rAzVcsZ6y0NxRlT8f7233kf9mtMHa8f7SONzNpG51BXPaeOudLBHGldztcee3r1ekH/Gkt6i+nnEtf+C45XxjtT6sxgH9eL/ep1FXO9Qj2XJtk5Td5KvVSP2fGY+Xyj/5SBsW/1enUzTX2Xema5f/vV9vKh8UtW7OeVlfbyz8qqoV0wogEAAADAOzoaAAAAALyjowEAAADAuyBNdVIyAAAAAPw4jGgAAAAA8I6OBgAAAADv6GgAAAAA8I6OBgAAAADv6GgAAAAA8I6OBgAAAADv6GgAAAAA8I6OBgAAAADv6GgAAAAA8O5/ttnMCeDpufcAAAAASUVORK5CYII=",
148 | "text/plain": [
149 | ""
150 | ]
151 | },
152 | "metadata": {},
153 | "output_type": "display_data"
154 | }
155 | ],
156 | "source": [
157 | "import numpy as np\n",
158 | "from matplotlib import pyplot as plt\n",
159 | "\n",
160 | "\n",
161 | "x = np.linspace(-1, 1, 100)\n",
162 | "L = 5\n",
163 | "pi, tau = mie.harmonics(x, L)\n",
164 | "\n",
165 | "print(pi.shape)\n",
166 | "print(tau.shape)\n",
167 | "\n",
168 | "fig, axes = plt.subplots(1, 2, figsize=(10, 10))\n",
169 | "\n",
170 | "axes[0].imshow(tau, cmap='viridis') \n",
171 | "axes[0].set_title('Tau')\n",
172 | "axes[0].axis('off') \n",
173 | "\n",
174 | "axes[1].imshow(pi, cmap='viridis') \n",
175 | "axes[1].set_title('Pi')\n",
176 | "axes[1].axis('off') "
177 | ]
178 | }
179 | ],
180 | "metadata": {
181 | "kernelspec": {
182 | "display_name": "py_env_book",
183 | "language": "python",
184 | "name": "python3"
185 | },
186 | "language_info": {
187 | "codemirror_mode": {
188 | "name": "ipython",
189 | "version": 3
190 | },
191 | "file_extension": ".py",
192 | "mimetype": "text/x-python",
193 | "name": "python",
194 | "nbconvert_exporter": "python",
195 | "pygments_lexer": "ipython3",
196 | "version": "3.10.15"
197 | }
198 | },
199 | "nbformat": 4,
200 | "nbformat_minor": 2
201 | }
202 |
--------------------------------------------------------------------------------
/tutorials/test_notebooks.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 |
4 | # HOW TO:
5 | # go to root and run pip install -e .
6 | # Run python examples/test_notebooks.py
7 |
8 |
9 | def test_notebooks():
10 | """
11 | Test all notebooks in the examples directory.
12 | """
13 |
14 | notebooks = glob.glob(os.path.join("examples", "**", "*.ipynb"), recursive=True)
15 | failed_runs = []
16 | for notebook in notebooks:
17 | print(f"Testing notebook: {notebook}...")
18 |
19 | # Allow errors to be raised.
20 | out = os.popen(f'git diff --name-only "{notebook}"').read()
21 | if out:
22 | print("Notebook already ran since last git commit... skipping")
23 | continue
24 | out = os.system(
25 | f'jupyter nbconvert --to notebook --execute --inplace --ExecutePreprocessor.timeout=None "{notebook}"'
26 | )
27 |
28 | # Check if notebook ran successfully.
29 | if out != 0:
30 | failed_runs.append(notebook)
31 |
32 | if failed_runs:
33 | print("Failed runs:")
34 | for notebook in failed_runs:
35 | print(f"\t{notebook}")
36 | raise ValueError("Some notebooks failed to run.")
37 | else:
38 | print("All notebooks ran successfully.")
39 |
40 |
41 | if __name__ == "__main__":
42 | print("Testing notebooks...")
43 | test_notebooks()
44 |
--------------------------------------------------------------------------------