├── .gitattributes ├── .github └── workflows │ ├── test-conda.yml │ └── test-pip.yml ├── .gitignore ├── .travis.yml ├── .zenodo.json ├── LICENSE ├── MANIFEST.in ├── README.md ├── benchmarks ├── benchmarks.db ├── benchmarks.py ├── maxima_benchmarks.ipynb ├── numba_benchmarks.ipy ├── simple_benchmarks.ipy ├── source │ └── vbench │ │ ├── figures │ │ └── locate_artificial_sparse.png │ │ └── locate_artificial_sparse.txt ├── suite.py └── test_perf.py ├── doc ├── Makefile ├── _static │ └── .gitignore ├── _templates │ └── autosummary │ │ └── class.rst ├── api.rst ├── conf.py ├── featured-thumbnails │ ├── fluorescent-particles-in-cfs.png │ ├── foam.png │ ├── interfacial-particles.png │ ├── large-particle-in-liquid-crystal.png │ ├── rearrangements-and-strain.png │ ├── tracking-sphere.png │ └── trajectories-in-water.png ├── index.rst ├── installation.rst ├── introduction.rst ├── releases │ ├── v0.3.0.txt │ ├── v0.3.1.txt │ ├── v0.4.txt │ ├── v0.5.txt │ └── v0.6.txt ├── tutorial.rst ├── tutorial │ ├── Makefile │ └── tools │ │ ├── nb_to_doc.py │ │ └── nbstripout └── whatsnew.rst ├── doi.png ├── examples └── README.md ├── reproducibility_data.py ├── setup.cfg ├── setup.py ├── soft-matter-docs-deploy.enc ├── test_perf.sh ├── trackpy ├── __init__.py ├── _version.py ├── api.py ├── artificial.py ├── diag.py ├── feature.py ├── filtering.py ├── find.py ├── framewise_data.py ├── linking │ ├── __init__.py │ ├── find_link.py │ ├── legacy.py │ ├── linking.py │ ├── partial.py │ ├── subnet.py │ ├── subnetlinker.py │ └── utils.py ├── locate_functions │ ├── __init__.py │ └── brightfield_ring.py ├── masks.py ├── motion.py ├── plots.py ├── predict.py ├── preprocessing.py ├── refine │ ├── __init__.py │ ├── brightfield_ring.py │ ├── center_of_mass.py │ └── least_squares.py ├── static.py ├── tests │ ├── README.rst │ ├── __init__.py │ ├── common.py │ ├── data │ │ ├── reproduce_duplicate_track_assignment.npy │ │ ├── reproducibility_v0.4.npz │ │ └── sparse_trajectories.npy │ ├── locate │ │ └── test_brightfield_ring.py │ ├── test_correlations.py │ ├── test_feature.py │ ├── test_feature_saving.py │ ├── test_find.py │ ├── test_find_link.py │ ├── test_leastsq.py │ ├── test_legacy_linking.py │ ├── test_linking.py │ ├── test_mask.py │ ├── test_misc.py │ ├── test_motion.py │ ├── test_plot_traj_labeling.py │ ├── test_plots.py │ ├── test_predict.py │ ├── test_preprocessing.py │ ├── test_reproducibility.py │ ├── test_static.py │ ├── video │ │ ├── bulk-water_frame0.npy │ │ ├── bulk-water_frame1.npy │ │ ├── image_sequence │ │ │ ├── T76S3F00001.png │ │ │ ├── T76S3F00002.png │ │ │ ├── T76S3F00003.png │ │ │ ├── T76S3F00004.png │ │ │ └── T76S3F00005.png │ │ ├── seq_frame0.npy │ │ ├── seq_frame1.npy │ │ ├── stuck.tif │ │ ├── stuck_frame0.npy │ │ └── stuck_frame1.npy │ ├── water │ │ └── bulk-water.mov │ └── wire │ │ ├── horizontal expected result.png │ │ ├── horizontal_frame.npy │ │ ├── oblique expected result.png │ │ ├── oblique_frame.npy │ │ ├── vertical expected result.png │ │ └── vertical_frame.npy ├── tracking.py ├── try_numba.py ├── uncertainty.py └── utils.py └── versioneer.py /.gitattributes: -------------------------------------------------------------------------------- 1 | trackpy/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.github/workflows/test-conda.yml: -------------------------------------------------------------------------------- 1 | name: Conda 2 | 3 | on: [push, pull_request, workflow_dispatch] 4 | 5 | jobs: 6 | TestConda: 7 | name: ${{ matrix.os }} - Python ${{ matrix.python }} 8 | runs-on: ${{ matrix.os }} 9 | strategy: 10 | fail-fast: false 11 | matrix: 12 | os: [ubuntu-latest, macos-latest, windows-latest] 13 | python: ["3.9", "3.11", "3.13"] 14 | 15 | steps: 16 | - uses: actions/checkout@v4 17 | 18 | - uses: conda-incubator/setup-miniconda@v3 19 | with: 20 | auto-update-conda: true 21 | python-version: ${{ matrix.python }} 22 | channels: conda-forge 23 | 24 | - name: Initialize conda 25 | shell: bash -el {0} 26 | run: | 27 | conda init 28 | 29 | - name: Install packages 30 | shell: bash -el {0} # Reads .profile so conda will work 31 | run: | 32 | conda create --name test python=${{ matrix.python }} pytest numpy scipy matplotlib pandas pytables numba scikit-learn pyyaml looseversion pip 33 | conda activate test 34 | pip install -v --no-deps -e . 35 | python -V 36 | conda info 37 | conda list 38 | 39 | - name: Run tests 40 | shell: bash -el {0} # Reads .profile so conda will work 41 | run: | 42 | conda activate test 43 | pytest trackpy 44 | -------------------------------------------------------------------------------- /.github/workflows/test-pip.yml: -------------------------------------------------------------------------------- 1 | name: Pip 2 | 3 | on: [push, pull_request, workflow_dispatch] 4 | 5 | jobs: 6 | TestLinux: 7 | name: Python ${{ matrix.python }} ${{ matrix.display_name }} 8 | runs-on: ubuntu-latest 9 | strategy: 10 | fail-fast: false 11 | matrix: 12 | include: 13 | - python: "3.9" 14 | display_name: "2020" 15 | deps: "numpy==1.19.* scipy==1.5.* matplotlib==3.3.* pandas==1.1.* tables==3.6.* scikit-learn==0.24.* numba==0.53.* llvmlite==0.36.*" 16 | - python: "3.10" 17 | display_name: "2021" 18 | deps: "numpy==1.22.* scipy==1.7.* matplotlib==3.5.* pandas==1.3.* tables==3.7.* scikit-learn==1.0.* numba==0.55.* llvmlite==0.38.*" 19 | - python: "3.11" 20 | display_name: "2022" 21 | deps: "numpy==1.24.* scipy==1.9.* matplotlib==3.6.* pandas==2.0.* tables==3.8.* scikit-learn==1.1.* numba==0.57.* llvmlite==0.40.*" 22 | - python: "3.12" 23 | display_name: "2023" 24 | deps: "numpy==1.26.* scipy==1.11.* matplotlib==3.8.* pandas==2.1.* tables==3.9.* scikit-learn==1.3.* numba==0.59.* llvmlite==0.42.*" 25 | - python: "3.13" 26 | display_name: "2024" 27 | deps: "numpy==2.1.* scipy==1.14.* matplotlib==3.9.* pandas==2.2.* tables==3.10.* scikit-learn==1.5.* numba==0.61.* llvmlite==0.44.*" 28 | - python: "3.13" 29 | display_name: "latest (no numba)" 30 | deps: "numpy scipy matplotlib pandas tables scikit-learn" 31 | 32 | 33 | steps: 34 | - uses: actions/checkout@v4 35 | 36 | - name: Set up Python ${{ matrix.python }} 37 | uses: actions/setup-python@v5 38 | with: 39 | python-version: ${{ matrix.python }} 40 | 41 | - name: Install HDF5 library 42 | run: sudo apt-get install libhdf5-dev 43 | 44 | - name: Install python dependencies 45 | shell: bash 46 | run: | 47 | pip install --disable-pip-version-check --upgrade pip setuptools wheel 48 | pip install -v -e .[test] ${{ matrix.deps }}; 49 | pip list 50 | 51 | - name: Run tests 52 | shell: bash 53 | run: | 54 | pytest trackpy 55 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[co] 2 | 3 | # Packages 4 | *.egg 5 | *.egg-info 6 | dist 7 | build 8 | eggs 9 | parts 10 | bin 11 | var 12 | sdist 13 | develop-eggs 14 | .installed.cfg 15 | 16 | # Installer logs 17 | pip-log.txt 18 | 19 | # Unit test / coverage reports 20 | .coverage 21 | .tox 22 | 23 | #Translations 24 | *.mo 25 | 26 | #Mr Developer 27 | .mr.developer.cfg 28 | 29 | *.pro 30 | *.log 31 | *.pyc 32 | *.so 33 | *~ 34 | *.swp 35 | 36 | MANIFEST 37 | trackpy/version.py 38 | examples/.ipynb_checkpoints 39 | examples/feature_log* 40 | doc/_build 41 | doc/generated 42 | doc/tutorial/*.html 43 | doc/tutorial/*.rst 44 | doc/tutorial/*_files 45 | .idea 46 | doc/examples/index.rst 47 | soft-matter-docs-deploy 48 | 49 | # VS Code 50 | .vscode 51 | .venv -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | install: 4 | - conda update --yes conda 5 | - conda config --append channels conda-forge 6 | - conda create -n testenv --yes python=3.8 numpy=1.19.2 scipy=1.6.2 matplotlib=3.3.4 pillow=8.2.0 pandas=1.2.3 scikit-image=0.17.2 pytables=3.6.1 numba=0.53.1 scikit-learn=0.24.1 pyyaml=5.4.1 ipython=7.22 sphinx=3.5.3 numpydoc=1.1.0 nbconvert=6.0.7 sphinx_bootstrap_theme=0.8.0 7 | - source activate testenv 8 | 9 | # for debugging... 10 | - echo $PATH 11 | - which python 12 | - conda info 13 | - conda list 14 | 15 | before_install: 16 | - wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh 17 | - chmod +x miniconda.sh 18 | - ./miniconda.sh -b -p /home/travis/mc 19 | - export PATH=/home/travis/mc/bin:$PATH 20 | 21 | script: 22 | - | 23 | cd $TRAVIS_BUILD_DIR/.. 24 | git clone https://github.com/soft-matter/trackpy-examples.git 25 | cd trackpy/doc 26 | # this expects the notebooks to be in ../../../trackpy-examples/notebooks/ 27 | make notebooks 28 | make html 29 | 30 | after_success: 31 | - | 32 | if [[ $TRAVIS_PULL_REQUEST == false && $TRAVIS_REPO_SLUG == 'soft-matter/trackpy' && $TRAVIS_BRANCH == 'master' ]]; then 33 | echo "Uploading documentation" 34 | cd $TRAVIS_BUILD_DIR 35 | openssl aes-256-cbc -K $encrypted_23ba5de49536_key -iv $encrypted_23ba5de49536_iv -in soft-matter-docs-deploy.enc -out soft-matter-docs-deploy -d 36 | eval `ssh-agent -s` 37 | chmod 600 soft-matter-docs-deploy 38 | ssh-add soft-matter-docs-deploy 39 | git config --global user.email "Travis@nomail" 40 | git config --global user.name "Travis" 41 | git config --global push.default simple 42 | cd .. 43 | git clone git@github.com:soft-matter/soft-matter.github.io.git ./doc-repo 44 | cd doc-repo/trackpy 45 | git checkout --orphan temp_branch 46 | git rm -rf ./dev 47 | mv $TRAVIS_BUILD_DIR/doc/_build/html ./dev 48 | if [ -n "$TRAVIS_TAG" ]; then 49 | cp -R dev $TRAVIS_TAG; 50 | fi 51 | git add -A 52 | git commit -m "Docs build of trackpy commit $TRAVIS_COMMIT" 53 | git branch -D master 54 | git branch -m master 55 | git push --set-upstream origin master --force 56 | fi 57 | -------------------------------------------------------------------------------- /.zenodo.json: -------------------------------------------------------------------------------- 1 | { 2 | "license": "BSD-3-Clause", 3 | "upload_type": "software", 4 | "creators": [ 5 | { 6 | "orcid": "0000-0002-5947-6017", 7 | "name": "Allan, Daniel B." 8 | }, 9 | { 10 | "orcid": "0000-0003-4692-608X", 11 | "name": "Caswell, Thomas" 12 | }, 13 | { 14 | "orcid": "0000-0003-0746-0547", 15 | "name": "Keim, Nathan C." 16 | }, 17 | { 18 | "orcid": "0000-0002-0488-2237", 19 | "name": "van der Wel, Casper M." 20 | }, 21 | { 22 | "orcid": "0000-0003-3925-5732", 23 | "name": "Verweij, Ruben W." 24 | } 25 | ], 26 | "access_right": "open" 27 | } 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Notice and Statement for the trackpy Project 2 | =================================================== 3 | 4 | Copyright (c) 2013-2014 trackpy contributors 5 | https://github.com/soft-matter/trackpy 6 | All rights reserved 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of the soft-matter organization nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include MANIFEST.in 2 | include setup.py 3 | include versioneer.py 4 | include trackpy/_version.py 5 | include README.md 6 | include LICENSE 7 | include doi.png 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | trackpy 2 | ======= 3 | 4 | [![Build status](https://github.com/soft-matter/trackpy/workflows/Pip/badge.svg)](https://github.com/soft-matter/trackpy/actions/) 5 | [![Build status](https://github.com/soft-matter/trackpy/workflows/Conda/badge.svg)](https://github.com/soft-matter/trackpy/actions/) 6 | [![DOI](https://zenodo.org/badge/4744355.svg)](https://zenodo.org/badge/latestdoi/4744355) 7 | 8 | What is it? 9 | ----------- 10 | 11 | **trackpy** is a Python package for particle tracking in 2D, 3D, and higher dimensions. 12 | [**Read the walkthrough**](http://soft-matter.github.io/trackpy/dev/tutorial/walkthrough.html) to skim or study an example project from start to finish. 13 | 14 | Documentation 15 | ------------- 16 | 17 | [**Read the documentation**](http://soft-matter.github.io/trackpy/) for 18 | 19 | - an introduction 20 | - tutorials on the basics, 3D tracking, and much, much more 21 | - easy [installation instructions](http://soft-matter.github.io/trackpy/dev/installation.html) 22 | - the reference guide 23 | 24 | If you use trackpy for published research, please 25 | [cite the release](http://soft-matter.github.io/trackpy/dev/introduction.html#citing-trackpy) 26 | both to credit the contributors, and to direct your readers to the exact 27 | version of trackpy they could use to reproduce your results. 28 | -------------------------------------------------------------------------------- /benchmarks/benchmarks.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/benchmarks/benchmarks.db -------------------------------------------------------------------------------- /benchmarks/benchmarks.py: -------------------------------------------------------------------------------- 1 | from vbench.api import Benchmark, BenchmarkRunner 2 | from datetime import datetime 3 | 4 | common_setup = """ 5 | import mr 6 | import numpy as np 7 | import pandas as pd 8 | from pandas import DataFrame, Series 9 | 10 | def random_walk(N): 11 | return np.cumsum(np.random.randn(N)) 12 | """ 13 | 14 | setup = common_setup + """ 15 | 16 | def draw_gaussian_spot(image, pos, r): 17 | assert image.shape[0] != image.shape[1], \ 18 | "For stupid numpy broadcasting reasons, don't make the image square." 19 | x, y = np.meshgrid(*np.array(map(np.arange, image.shape)) - pos) 20 | max_value = np.iinfo(image.dtype).max 21 | spot = max_value*np.exp(-(x**2 + y**2)/r).T 22 | image += spot 23 | 24 | def gen_random_locations(shape, count): 25 | np.random.seed(0) 26 | return np.array([map(np.random.randint, shape) for _ in xrange(count)]) 27 | 28 | def draw_spots(shape, locations, r): 29 | image = np.zeros(shape, dtype='uint8') 30 | for x in locations: 31 | draw_gaussian_spot(image, x, r) 32 | return image 33 | 34 | SHAPE = (1200, 1000) 35 | COUNT = 10 36 | R = 7 37 | locations = gen_random_locations(SHAPE, COUNT) 38 | img = draw_spots(SHAPE, locations, R) 39 | for module in ['mr', 'mr.feature', 'mr.core.feature']: 40 | try: 41 | locate = __import__(module).locate 42 | except ImportError: 43 | continue 44 | except AttributeError: 45 | continue 46 | else: 47 | break 48 | """ 49 | 50 | locate_artificial_sparse = Benchmark("locate(img, 7)", setup, ncalls=10, 51 | name='locate_artificial_sparse') 52 | 53 | setup = common_setup + """ 54 | # One 1D stepper 55 | N = 500 56 | f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)}) 57 | 58 | """ 59 | 60 | link_one_continuous_stepper = Benchmark("mr.link(f, 5)", setup, ncalls=5, 61 | name='link_one_continuous_stepper') 62 | 63 | link_trackpy_one_continuous_stepper = Benchmark("mr.link_trackpy(f, 5)", 64 | setup, ncalls=5, name='link_trackpy_one_continuous_stepper') 65 | 66 | setup = common_setup + """ 67 | N = 500 68 | Y = 2 69 | # Begin second feature one frame later than the first, so the probe labeling (0, 1) is 70 | # established and not arbitrary. 71 | a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)}) 72 | b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 73 | 'frame': np.arange(1, N)}) 74 | f = pd.concat([a, b]).sort('frame') 75 | """ 76 | 77 | link_two_nearby_steppers = Benchmark("mr.link(f, 5)", setup, ncalls=5, 78 | name='link_two_nearby_steppers') 79 | 80 | link_trackpy_two_nearby_steppers = Benchmark("mr.link_trackpy(f, 5)", 81 | setup, ncalls=5, name='link_trackpy_two_nearby_steppers') 82 | 83 | setup = common_setup + """ 84 | np.random.seed(0) 85 | N = 100 86 | Y = 250 87 | M = 50 # margin, because negative values raise OutOfHash 88 | initial_positions = [(10, 11), (10, 18), (14, 15), (20, 21), (13, 13), 89 | (10, 10), (17, 19)] 90 | import itertools 91 | c = itertools.count() 92 | def walk(x, y): 93 | i = next(c) 94 | return DataFrame({'x': M + x + random_walk(N - i), 95 | 'y': M + y + random_walk(N - i), 96 | 'frame': np.arange(i, N)}) 97 | f = pd.concat([walk(*pos) for pos in initial_positions]) 98 | """ 99 | 100 | link_nearby_continuous_random_walks = Benchmark("mr.link(f, 5)", setup, 101 | ncalls=5, name='link_nearby_continuous_random_walks') 102 | 103 | link_trackpy_nearby_continuous_random_walks = \ 104 | Benchmark("mr.link_trackpy(f, 5)", setup, 105 | ncalls=5, name='link_nearby_continuous_random_walks') 106 | -------------------------------------------------------------------------------- /benchmarks/maxima_benchmarks.ipynb: -------------------------------------------------------------------------------- 1 | # must be run in ipython 2 | 3 | import numpy as np 4 | import trackpy as tp 5 | from trackpy.preprocessing import scale_to_gamut 6 | 7 | 8 | def b(command): 9 | get_ipython().magic(command) 10 | 11 | 12 | dummy_noise_image = scale_to_gamut(np.random.randint(0, 100, (100, 100)), np.uint8) 13 | real_image_raw = tp.ImageSequence('../trackpy/tests/video/image_sequence')[0] 14 | real_image = scale_to_gamut(tp.bandpass(real_image_raw, 1, 10, threshold=1), np.uint8) 15 | big_image = scale_to_gamut(tp.bandpass(np.tile(real_image_raw, (2, 5)), 1, 10, threshold=1), np.uint8) 16 | very_small_image = scale_to_gamut(tp.bandpass(real_image_raw[:200, :200], 1, 10, threshold=1), np.uint8) 17 | 18 | #print('Locate using Python Engine with Default Settings (Accurate)') 19 | #b(u"timeit tp.locate(real_image, 9, engine='python', preprocess=False)") 20 | 21 | print('1x: Find local_maxima only') 22 | b(u"timeit tp.feature.local_maxima(real_image, 9, 10)") 23 | 24 | print('10x: Find local_maxima only') 25 | b(u"timeit tp.feature.local_maxima(big_image, 9, 10)") 26 | 27 | print('~0.1x: Find local_maxima only') 28 | b(u"timeit tp.feature.local_maxima(very_small_image, 9, 10)") 29 | -------------------------------------------------------------------------------- /benchmarks/numba_benchmarks.ipy: -------------------------------------------------------------------------------- 1 | # must be run in ipython 2 | 3 | import numpy as np 4 | import trackpy as tp 5 | 6 | 7 | def b(command): 8 | get_ipython().magic(command) 9 | 10 | 11 | dummy_noise_image = np.random.randint(0, 100, (100, 100)) 12 | real_image_raw = tp.ImageSequence('../trackpy/tests/video/image_sequence')[0] 13 | real_image = tp.bandpass(real_image_raw, 1, 10, threshold=1) 14 | big_image = tp.bandpass(np.tile(real_image_raw, (2, 5)), 1, 10, threshold=1) 15 | 16 | print('Compiling Numba...') 17 | tp.locate(dummy_noise_image, 9, engine='numba') 18 | 19 | #print('Locate using Python Engine with Default Settings (Accurate)') 20 | #b(u"timeit tp.locate(real_image, 9, engine='python', preprocess=False)") 21 | 22 | print('10x: Locate using Python Engine with Default Settings (Accurate)') 23 | b("timeit tp.locate(big_image, 9, engine='python', preprocess=False)") 24 | 25 | print('10x: Locate using Python Engine with Fast Settings (Sloppy)') 26 | b("timeit tp.locate(big_image, 9, engine='python', preprocess=False, filter_before=False, filter_after=False, max_iterations=0, characterize=False)") 27 | 28 | print('1x: Locate using Numba Engine with Default Settings (Accurate)') 29 | b("timeit tp.locate(real_image, 9, engine='numba', preprocess=False)") 30 | 31 | print('10x: Locate using Numba Engine with Default Settings (Accurate)') 32 | b("timeit tp.locate(big_image, 9, engine='numba', preprocess=False)") 33 | 34 | print('10x: Locate using Numba Engine with Fast Settings (Sloppy)') 35 | b("timeit tp.locate(big_image, 9, engine='numba', preprocess=False, filter_before=False, filter_after=False, max_iterations=0, characterize=False)") 36 | 37 | -------------------------------------------------------------------------------- /benchmarks/simple_benchmarks.ipy: -------------------------------------------------------------------------------- 1 | # must be run in ipython 2 | 3 | import numpy as np 4 | import trackpy as tp 5 | 6 | 7 | def b(command): 8 | get_ipython().magic(command) 9 | 10 | 11 | dummy_noise_image = np.random.randint(0, 100, (100, 100)) 12 | real_image_raw = tp.ImageSequence('../trackpy/tests/video/image_sequence')[0] 13 | real_image = tp.bandpass(real_image_raw, 1, 10, threshold=1) 14 | 15 | print('Compiling Numba...') 16 | tp.locate(dummy_noise_image, 9, engine='numba') 17 | 18 | print('Locate using Python Engine with Default Settings (Accurate)') 19 | b("timeit tp.locate(real_image, 9, engine='python', preprocess=False)") 20 | 21 | print('Locate using Python Engine with Fast Settings (Sloppy)') 22 | b("timeit tp.locate(real_image, 9, engine='python', preprocess=False, filter_before=False, filter_after=False, max_iterations=0, characterize=False)") 23 | 24 | print('Locate using Numba Engine with Default Settings (Accurate)') 25 | b("timeit tp.locate(real_image, 9, engine='numba', preprocess=False)") 26 | 27 | print('Locate using Numba Engine with Fast Settings (Sloppy)') 28 | b("timeit tp.locate(real_image, 9, engine='numba', preprocess=False, filter_before=False, filter_after=False, max_iterations=0, characterize=False)") 29 | 30 | -------------------------------------------------------------------------------- /benchmarks/source/vbench/figures/locate_artificial_sparse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/benchmarks/source/vbench/figures/locate_artificial_sparse.png -------------------------------------------------------------------------------- /benchmarks/source/vbench/locate_artificial_sparse.txt: -------------------------------------------------------------------------------- 1 | **Benchmark setup** 2 | 3 | .. code-block:: python 4 | 5 | 6 | import mr 7 | import numpy as np 8 | 9 | 10 | def draw_gaussian_spot(image, pos, r): 11 | assert image.shape[0] != image.shape[1], "For stupid numpy broadcasting reasons, don't make the image square." 12 | x, y = np.meshgrid(*np.array(map(np.arange, image.shape)) - pos) 13 | max_value = np.iinfo(image.dtype).max 14 | spot = max_value*np.exp(-(x**2 + y**2)/r).T 15 | image += spot 16 | 17 | def gen_random_locations(shape, count): 18 | np.random.seed(0) 19 | return np.array([map(np.random.randint, shape) for _ in xrange(count)]) 20 | 21 | def draw_spots(shape, locations, r): 22 | image = np.zeros(shape, dtype='uint8') 23 | for x in locations: 24 | draw_gaussian_spot(image, x, r) 25 | return image 26 | 27 | SHAPE = (1200, 1000) 28 | COUNT = 10 29 | R = 7 30 | locations = gen_random_locations(SHAPE, COUNT) 31 | img = draw_spots(SHAPE, locations, R) 32 | for module in ['mr', 'mr.feature', 'mr.core.feature']: 33 | try: 34 | locate = __import__(module).locate 35 | except ImportError: 36 | continue 37 | except AttributeError: 38 | continue 39 | else: 40 | break 41 | 42 | 43 | **Benchmark statement** 44 | 45 | .. code-block:: python 46 | 47 | locate(img, 7) 48 | 49 | **Performance graph** 50 | 51 | .. image:: vbench/figures/locate_artificial_sparse.png 52 | :width: 6in -------------------------------------------------------------------------------- /benchmarks/suite.py: -------------------------------------------------------------------------------- 1 | import getpass 2 | import sys 3 | import os 4 | from vbench.api import Benchmark, BenchmarkRunner 5 | from datetime import datetime 6 | 7 | USERNAME = getpass.getuser() 8 | 9 | if sys.platform == 'darwin': 10 | HOME = '/Users/%s' % USERNAME 11 | else: 12 | HOME = '/home/%s' % USERNAME 13 | 14 | try: 15 | import ConfigParser 16 | 17 | config = ConfigParser.ConfigParser() 18 | config.readfp(open(os.path.expanduser('~/.vbenchcfg'))) 19 | 20 | REPO_PATH = config.get('setup', 'repo_path') 21 | REPO_URL = config.get('setup', 'repo_url') 22 | DB_PATH = config.get('setup', 'db_path') 23 | TMP_DIR = config.get('setup', 'tmp_dir') 24 | except: 25 | REPO_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) 26 | REPO_URL = 'git@github.com:danielballan/mr.git' 27 | DB_PATH = os.path.join(REPO_PATH, 'vb_suite/benchmarks.db') 28 | TMP_DIR = os.path.join(HOME, 'tmp/vb_mr') 29 | 30 | PREPARE = """ 31 | python setup.py clean 32 | """ 33 | BUILD = """ 34 | python setup.py build_ext --inplace 35 | """ 36 | dependencies = [] 37 | 38 | START_DATE = datetime(2012, 9, 19) # first full day when setup.py existed 39 | 40 | # repo = GitRepo(REPO_PATH) 41 | 42 | RST_BASE = 'source' 43 | 44 | def generate_rst_files(benchmarks): 45 | import matplotlib as mpl 46 | mpl.use('Agg') 47 | import matplotlib.pyplot as plt 48 | 49 | vb_path = os.path.join(RST_BASE, 'vbench') 50 | fig_base_path = os.path.join(vb_path, 'figures') 51 | 52 | if not os.path.exists(vb_path): 53 | print('creating %s' % vb_path) 54 | os.makedirs(vb_path) 55 | 56 | if not os.path.exists(fig_base_path): 57 | print('creating %s' % fig_base_path) 58 | os.makedirs(fig_base_path) 59 | 60 | for bmk in benchmarks: 61 | print('Generating rst file for %s' % bmk.name) 62 | rst_path = os.path.join(RST_BASE, 'vbench/%s.txt' % bmk.name) 63 | 64 | fig_full_path = os.path.join(fig_base_path, '%s.png' % bmk.name) 65 | 66 | # make the figure 67 | plt.figure(figsize=(10, 6)) 68 | ax = plt.gca() 69 | bmk.plot(DB_PATH, ax=ax) 70 | 71 | start, end = ax.get_xlim() 72 | 73 | plt.xlim([start - 30, end + 30]) 74 | plt.savefig(fig_full_path, bbox_inches='tight') 75 | plt.close('all') 76 | 77 | fig_rel_path = 'vbench/figures/%s.png' % bmk.name 78 | rst_text = bmk.to_rst(image_path=fig_rel_path) 79 | with open(rst_path, 'w') as f: 80 | f.write(rst_text) 81 | 82 | ref = __import__('benchmarks') 83 | benchmarks = [v for v in ref.__dict__.values() if isinstance(v, Benchmark)] 84 | 85 | runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, 86 | BUILD, DB_PATH, TMP_DIR, PREPARE, 87 | always_clean=True, 88 | run_option='eod', start_date=START_DATE, 89 | module_dependencies=dependencies) 90 | 91 | if __name__ == '__main__': 92 | runner.run() 93 | generate_rst_files(benchmarks) 94 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | rm -rf tutorial/*_files/ 52 | rm -rf tutorial/*.rst 53 | 54 | notebooks: 55 | make -C tutorial notebooks 56 | 57 | html: 58 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 59 | 60 | dirhtml: 61 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 62 | @echo 63 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 64 | 65 | singlehtml: 66 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 67 | @echo 68 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 69 | 70 | pickle: 71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 72 | @echo 73 | @echo "Build finished; now you can process the pickle files." 74 | 75 | json: 76 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 77 | @echo 78 | @echo "Build finished; now you can process the JSON files." 79 | 80 | htmlhelp: 81 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 82 | @echo 83 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 84 | ".hhp project file in $(BUILDDIR)/htmlhelp." 85 | 86 | qthelp: 87 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 88 | @echo 89 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 90 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 91 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/trackpy.qhcp" 92 | @echo "To view the help file:" 93 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/trackpy.qhc" 94 | 95 | devhelp: 96 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 97 | @echo 98 | @echo "Build finished." 99 | @echo "To view the help file:" 100 | @echo "# mkdir -p $$HOME/.local/share/devhelp/trackpy" 101 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/trackpy" 102 | @echo "# devhelp" 103 | 104 | epub: 105 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 106 | @echo 107 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 108 | 109 | latex: 110 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 111 | @echo 112 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 113 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 114 | "(use \`make latexpdf' here to do that automatically)." 115 | 116 | latexpdf: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo "Running LaTeX files through pdflatex..." 119 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 120 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 121 | 122 | latexpdfja: 123 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 124 | @echo "Running LaTeX files through platex and dvipdfmx..." 125 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 126 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 127 | 128 | text: 129 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 130 | @echo 131 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 132 | 133 | man: 134 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 135 | @echo 136 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 137 | 138 | texinfo: 139 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 140 | @echo 141 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 142 | @echo "Run \`make' in that directory to run these through makeinfo" \ 143 | "(use \`make info' here to do that automatically)." 144 | 145 | info: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo "Running Texinfo files through makeinfo..." 148 | make -C $(BUILDDIR)/texinfo info 149 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 150 | 151 | gettext: 152 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 153 | @echo 154 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 155 | 156 | changes: 157 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 158 | @echo 159 | @echo "The overview file is in $(BUILDDIR)/changes." 160 | 161 | linkcheck: 162 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 163 | @echo 164 | @echo "Link check complete; look for any errors in the above output " \ 165 | "or in $(BUILDDIR)/linkcheck/output.txt." 166 | 167 | doctest: 168 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 169 | @echo "Testing of doctests in the sources finished, look at the " \ 170 | "results in $(BUILDDIR)/doctest/output.txt." 171 | 172 | xml: 173 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 174 | @echo 175 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 176 | 177 | pseudoxml: 178 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 179 | @echo 180 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 181 | 182 | gh-pages: 183 | cd .. && git checkout gh-pages && git rm -rf . && touch .nojekyll && mv doc/_build/html/* . && rm -rf doc && git add -A && git commit -a --amend -m "Makefile is force-commiting new build." && git push upstream gh-pages -f 184 | -------------------------------------------------------------------------------- /doc/_static/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/doc/_static/.gitignore -------------------------------------------------------------------------------- /doc/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {% extends "!autosummary/class.rst" %} 2 | 3 | {% block methods %} 4 | {% if methods %} 5 | 6 | .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. 7 | .. autosummary:: 8 | :toctree: 9 | {% for item in all_methods %} 10 | {%- if not item.startswith('_') or item in ['__call__'] %} 11 | {{ name }}.{{ item }} 12 | {%- endif -%} 13 | {%- endfor %} 14 | 15 | {% endif %} 16 | {% endblock %} 17 | 18 | {% block attributes %} 19 | {% if attributes %} 20 | 21 | .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. 22 | .. autosummary:: 23 | :toctree: 24 | {% for item in all_attributes %} 25 | {%- if not item.startswith('_') %} 26 | {{ name }}.{{ item }} 27 | {%- endif -%} 28 | {%- endfor %} 29 | 30 | {% endif %} 31 | {% endblock %} 32 | -------------------------------------------------------------------------------- /doc/api.rst: -------------------------------------------------------------------------------- 1 | .. _api_ref: 2 | 3 | API reference 4 | ============= 5 | The core functionality of trackpy is grouped into three separate steps: 6 | 7 | 1. Locating features in an image 8 | 2. Refining feature coordinates to obtain subpixel precision 9 | 3. Identifying features through time, linking them into trajectories. 10 | 11 | Convenience functions for feature finding, refinement, and linking are readily available: 12 | 13 | .. autosummary:: 14 | :toctree: generated/ 15 | 16 | trackpy.locate 17 | trackpy.batch 18 | trackpy.link 19 | 20 | For more control on your tracking "pipeline", the following core functions are provided: 21 | 22 | 23 | Feature finding 24 | --------------- 25 | .. autosummary:: 26 | :toctree: generated/ 27 | 28 | trackpy.grey_dilation 29 | trackpy.find_link 30 | 31 | 32 | Coordinate refinement 33 | --------------------- 34 | .. autosummary:: 35 | :toctree: generated/ 36 | 37 | trackpy.refine_com 38 | trackpy.refine_leastsq 39 | 40 | Linking 41 | ------- 42 | .. autosummary:: 43 | :toctree: generated/ 44 | 45 | trackpy.link 46 | trackpy.link_iter 47 | trackpy.link_df_iter 48 | trackpy.link_partial 49 | trackpy.reconnect_traj_patch 50 | 51 | 52 | :func:`~trackpy.linking.link` and :func:`~trackpy.linking.link_df_iter` run 53 | the same underlying code. :func:`~trackpy.linking.link` operates on a single 54 | DataFrame containing data for an entire movie. 55 | :func:`~trackpy.linking.link_df_iter` streams through larger data sets, 56 | in the form of one DataFrame for each video frame. 57 | :func:`~trackpy.linking.link_iter` streams through a series of numpy 58 | ndarrays. 59 | :func:`~trackpy.linking.link_partial` can patch a region of trajectories in 60 | an already linked dataset. 61 | 62 | 63 | See the tutorial on large data sets for more. 64 | 65 | Static Analysis 66 | --------------- 67 | 68 | .. autosummary:: 69 | :toctree: generated/ 70 | 71 | trackpy.static.proximity 72 | trackpy.static.pair_correlation_2d 73 | trackpy.static.pair_correlation_3d 74 | trackpy.static.cluster 75 | 76 | Motion Analysis 77 | --------------- 78 | 79 | .. autosummary:: 80 | :toctree: generated/ 81 | 82 | trackpy.motion.msd 83 | trackpy.motion.imsd 84 | trackpy.motion.emsd 85 | trackpy.motion.compute_drift 86 | trackpy.motion.subtract_drift 87 | trackpy.motion.vanhove 88 | trackpy.motion.relate_frames 89 | trackpy.motion.velocity_corr 90 | trackpy.motion.direction_corr 91 | trackpy.motion.is_typical 92 | trackpy.motion.diagonal_size 93 | trackpy.motion.theta_entropy 94 | trackpy.motion.min_rolling_theta_entropy 95 | trackpy.filtering.filter_stubs 96 | trackpy.filtering.filter_clusters 97 | 98 | Prediction Framework 99 | -------------------- 100 | 101 | Trackpy extends the Crocker--Grier algoritm using a prediction framework, described in the prediction tutorial. 102 | 103 | .. autosummary:: 104 | :toctree: generated/ 105 | 106 | trackpy.predict.NullPredict 107 | trackpy.predict.ChannelPredict 108 | trackpy.predict.DriftPredict 109 | trackpy.predict.NearestVelocityPredict 110 | trackpy.predict.predictor 111 | trackpy.predict.instrumented 112 | 113 | Plotting Tools 114 | -------------- 115 | 116 | Trackpy includes functions for plotting the data in ways that are commonly useful. If you don't find what you need here, you can plot the data any way you like using matplotlib, seaborn, or any other plotting library. 117 | 118 | .. autosummary:: 119 | :toctree: generated/ 120 | 121 | trackpy.annotate 122 | trackpy.scatter 123 | trackpy.plot_traj 124 | trackpy.annotate3d 125 | trackpy.scatter3d 126 | trackpy.plot_traj3d 127 | trackpy.plot_displacements 128 | trackpy.subpx_bias 129 | trackpy.plot_density_profile 130 | 131 | These two are almost too simple to justify their existence -- just a convenient shorthand for a common plotting task. 132 | 133 | .. autosummary:: 134 | :toctree: generated/ 135 | 136 | trackpy.mass_ecc 137 | trackpy.mass_size 138 | 139 | Image Conversion 140 | ---------------- 141 | 142 | By default, :func:`~trackpy.feature.locate` applies a bandpass and a percentile-based 143 | threshold to the image(s) before finding features. You can turn off this functionality 144 | using ``preprocess=False, percentile=0``.) In many cases, the default bandpass, which 145 | guesses good length scales from the ``diameter`` parameter, "just works." But if you want 146 | to executre these steps manually, you can. 147 | 148 | .. autosummary:: 149 | :toctree: generated/ 150 | 151 | trackpy.find.percentile_threshold 152 | trackpy.preprocessing.bandpass 153 | trackpy.preprocessing.lowpass 154 | trackpy.preprocessing.scale_to_gamut 155 | trackpy.preprocessing.invert_image 156 | trackpy.preprocessing.convert_to_int 157 | 158 | Framewise Data Storage & Retrieval Interface 159 | -------------------------------------------- 160 | 161 | Trackpy implements a generic interface that could be used to store and 162 | retrieve particle tracking data in any file format. We hope that it can 163 | make it easier for researchers who use different file formats to exchange data. Any in-house format could be accessed using the same simple interface in trackpy. 164 | 165 | At present, the interface is implemented only for HDF5 files. There are 166 | several different implementations, each with different performance 167 | optimizations. :class:`~trackpy.framewise_data.PandasHDFStoreBig` is a good general-purpose choice. 168 | 169 | .. autosummary:: 170 | :toctree: generated/ 171 | 172 | trackpy.PandasHDFStore 173 | trackpy.PandasHDFStoreBig 174 | trackpy.PandasHDFStoreSingleNode 175 | trackpy.FramewiseData 176 | 177 | That last class cannot be used directly; it is meant to be subclassed 178 | to support other formats. See *Writing Your Own Interface* in the streaming tutorial for 179 | more. 180 | 181 | Logging 182 | ------- 183 | 184 | Trackpy issues log messages. This functionality is mainly used to report the 185 | progress of lengthy jobs, but it may be used in the future to report details of 186 | feature-finding and linking for debugging purposes. 187 | 188 | When trackpy is imported, it automatically calls `handle_logging()`, which sets 189 | the logging level and attaches a logging handler that plays nicely with 190 | IPython notebooks. You can override this by calling `ignore_logging()` and 191 | configuring the logger however you like. 192 | 193 | .. autosummary:: 194 | :toctree: generated/ 195 | 196 | trackpy.quiet 197 | trackpy.handle_logging 198 | trackpy.ignore_logging 199 | 200 | Utility functions 201 | ----------------- 202 | 203 | .. autosummary:: 204 | :toctree: generated/ 205 | 206 | trackpy.minmass_v03_change 207 | trackpy.minmass_v04_change 208 | trackpy.utils.fit_powerlaw 209 | 210 | Diagnostic functions 211 | -------------------- 212 | 213 | .. autosummary:: 214 | :toctree: generated/ 215 | 216 | trackpy.diag.performance_report 217 | trackpy.diag.dependencies 218 | 219 | Low-Level API (Advanced) 220 | ------------------------ 221 | 222 | Switching Between Numba and Pure Python 223 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 224 | 225 | Trackpy implements the most intensive (read: slowest) parts of the core feature-finding and linking algorithm in pure Python (with numpy) and also in `numba `_, which accelerates Python code. Numba can offer a major performance boost, but it is still relatively new, and it can be challenging to use. If numba is available, trackpy will use the numba implementation by default; otherwise, it will use pure Python. The following functions allow sophisticated users to manually switch between numba and pure-Python modes. This may be used, for example, to measure the performance of these two implementations on your data. 226 | 227 | .. autosummary:: 228 | :toctree: generated/ 229 | 230 | trackpy.enable_numba 231 | trackpy.disable_numba 232 | 233 | 234 | Low-Level Linking API 235 | ^^^^^^^^^^^^^^^^^^^^^ 236 | 237 | All of the linking functions in trackpy provide the same level of control over the linking algorithm itself. For almost all users, the functions above will be sufficient. But :func:`~trackpy.linking.link_df` and :func:`~trackpy.linking.link_df_iter` above do assume that the data is stored in a pandas DataFrame. For users who want to use some other iterable data structure, the functions below provide direct access to the linking code. 238 | 239 | .. autosummary:: 240 | :toctree: generated/ 241 | 242 | trackpy.link_iter 243 | trackpy.link 244 | 245 | And the following classes can be subclassed to implement a customized linking procedure. 246 | 247 | .. autosummary:: 248 | :toctree: generated/ 249 | 250 | trackpy.SubnetOversizeException 251 | 252 | Masks 253 | ^^^^^ 254 | 255 | These functions may also be useful for rolling your own algorithms: 256 | 257 | .. autosummary:: 258 | :toctree: generated/ 259 | 260 | trackpy.masks.binary_mask 261 | trackpy.masks.r_squared_mask 262 | trackpy.masks.x_squared_masks 263 | trackpy.masks.cosmask 264 | trackpy.masks.sinmask 265 | trackpy.masks.theta_mask 266 | trackpy.masks.gaussian_kernel 267 | trackpy.masks.mask_image 268 | trackpy.masks.slice_image 269 | 270 | Full API reference 271 | ------------------ 272 | 273 | A full overview of all modules and functions can be found below: 274 | 275 | .. autosummary:: 276 | :toctree: generated/ 277 | :recursive: 278 | 279 | trackpy 280 | 281 | .. 282 | Note: we excluded trackpy.tests in conf.py (autosummary_mock_imports) 283 | -------------------------------------------------------------------------------- /doc/featured-thumbnails/fluorescent-particles-in-cfs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/doc/featured-thumbnails/fluorescent-particles-in-cfs.png -------------------------------------------------------------------------------- /doc/featured-thumbnails/foam.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/doc/featured-thumbnails/foam.png -------------------------------------------------------------------------------- /doc/featured-thumbnails/interfacial-particles.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/doc/featured-thumbnails/interfacial-particles.png -------------------------------------------------------------------------------- /doc/featured-thumbnails/large-particle-in-liquid-crystal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/doc/featured-thumbnails/large-particle-in-liquid-crystal.png -------------------------------------------------------------------------------- /doc/featured-thumbnails/rearrangements-and-strain.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/doc/featured-thumbnails/rearrangements-and-strain.png -------------------------------------------------------------------------------- /doc/featured-thumbnails/tracking-sphere.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/doc/featured-thumbnails/tracking-sphere.png -------------------------------------------------------------------------------- /doc/featured-thumbnails/trajectories-in-water.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/doc/featured-thumbnails/trajectories-in-water.png -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 | 21 | 22 | 23 | Trackpy: Fast, Flexible Particle-Tracking Toolkit 24 | ================================================= 25 | 26 | .. raw:: html 27 | 28 |
29 | 51 |
52 | 53 | 54 | Trackpy is a Python package for particle tracking in 2D, 3D, and higher dimensions. 55 | 56 | For a brief introduction to the ideas behind the package, you can read the :ref:`introductory notes `. Read the :doc:`walkthrough ` to study an example project from start to finish. 57 | 58 | Much more detail can be found in the trackpy :ref:`tutorial `. You can also browse the :ref:`API reference ` to see available tools for tracking, motion analysis, plotting, and more. 59 | 60 | See the 61 | :doc:`installation instructions ` to obtain the current stable 62 | release or the version in development. 63 | 64 | To check out the code, report a bug, or contribute a new feature, please visit 65 | the `github repository `_. 66 | 67 | Different versions of the documentations are available: consult the documentation 68 | of the current `stable `_ 69 | release or the `developer `_ version. 70 | 71 | .. raw:: html 72 | 73 |
74 |
75 |
76 |

Documentation

77 | 78 | .. toctree:: 79 | :maxdepth: 1 80 | 81 | introduction 82 | installation 83 | api 84 | whatsnew 85 | 86 | .. raw:: html 87 | 88 |
89 |
90 |

Tutorial

91 | 92 | .. toctree:: 93 | :maxdepth: 1 94 | 95 | Walkthrough 96 | Prediction (Linking) 97 | Tracking in 3D 98 | Uncertainty Estimation 99 | Advanced Linking 100 | Adaptive Linking 101 | Streaming 102 | Performance 103 | Parallelized Feature Finding 104 | Tracking Large Features Such As Bubbles 105 | Tracking Particles' Rings in Bright-Field Microscopy 106 | 107 | .. raw:: html 108 | 109 |
110 |
111 |
112 | -------------------------------------------------------------------------------- /doc/installation.rst: -------------------------------------------------------------------------------- 1 | .. _installation: 2 | 3 | Installing Trackpy 4 | ------------------ 5 | 6 | For Python Novices 7 | ^^^^^^^^^^^^^^^^^^ 8 | 9 | Installation is simple on Windows, OSX, and Linux, even for Python novices. 10 | 11 | 1. Get Scientific Python 12 | """""""""""""""""""""""" 13 | 14 | To get started with Python on any platform, download and install 15 | `Anaconda `_. It comes with the 16 | common scientific Python packages built in. 17 | 18 | 2. Install trackpy 19 | """""""""""""""""" 20 | 21 | Open a command prompt. On Windows, you can use the "Anaconda Command Prompt" 22 | installed by Anaconda or Start > Applications > Command Prompt. On a Mac, look 23 | for Applications > Utilities > Terminal. Type these commands: 24 | 25 | .. code-block:: bash 26 | 27 | conda update conda 28 | conda install -c conda-forge trackpy 29 | conda install -c conda-forge pims 30 | 31 | The above installs trackpy and all its requirements, plus the recommended 32 | `PIMS `_ package that simplifies image-reading, 33 | and that is used in the trackpy tutorials. 34 | 35 | 3. Try it out! 36 | """""""""""""" 37 | 38 | Finally, to try it out, type 39 | 40 | .. code-block:: bash 41 | 42 | jupyter notebook 43 | 44 | .. note:: For older Python versions, use ``ipython notebook`` 45 | 46 | This will automatically open a browser tab, ready to interpret Python code. 47 | To get started, check out the links to tutorials at the top of this document. 48 | 49 | Updating Your Installation 50 | """""""""""""""""""""""""" 51 | 52 | Before updating to a new version of trackpy, be sure to read the 53 | :doc:`release notes` for a list of new features and any changes 54 | that may affect your existing analysis code. 55 | 56 | Latest Stable Release 57 | ^^^^^^^^^^^^^^^^^^^^^ 58 | 59 | The code is under active development. To update to the latest stable release, 60 | run this in the command prompt: 61 | 62 | .. code-block:: bash 63 | 64 | conda update -c conda-forge trackpy 65 | 66 | Latest Version Under Development 67 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 68 | 69 | The ``master`` branch on GitHub contains the latest tested development code. 70 | Sometimes this branch will be compatible with the very latest versions of 71 | other packages (especially pandas or scipy) weeks or months before a new 72 | stable version is released. Code in this branch has already been thoroughly 73 | tested. 74 | 75 | You can easily install a recent build by downloading the source from 76 | `GitHub `_: 77 | 78 | .. code-block:: bash 79 | 80 | pip install https://github.com/soft-matter/trackpy/archive/master.zip 81 | 82 | If you plan to edit the code yourself, you should use git and pip as 83 | explained below. 84 | 85 | Using environments 86 | """""""""""""""""" 87 | 88 | Some users often like to separate Python instances into environments, so that 89 | each project can have its own set of packages. Create a trackpy-dedicated 90 | environment with the name softmatter as follows: 91 | 92 | .. code-block:: bash 93 | 94 | conda create --name softmatter trackpy nb_conda 95 | 96 | The `nb_conda` is optional, but we added it to ensure that Jupyter sees this 97 | environment as well. You can switch to the environment from within Jupyter in 98 | the Kernels menu. To access the environment on the commandline, type the 99 | following: 100 | 101 | .. code-block:: bash 102 | 103 | source activate softmatter 104 | 105 | On Windows systems, leave out the `source`. You can go back to the root conda 106 | environment by activating `root`. 107 | 108 | 109 | More Information for Experienced Python Users 110 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 111 | 112 | Archlinux 113 | """"""""" 114 | 115 | Package is available for Archlinux on AUR: `Python 3 `__ 116 | 117 | pip 118 | """ 119 | 120 | We strongly recommend using conda to install trackpy, as described above, 121 | but pip is also supported. In general, you can use the latest version of each dependency. 122 | If you wish to override trackpy's minimum version requirements, you may find that 123 | trackpy still runs your analysis code with older versions of dependencies. 124 | 125 | Essential Dependencies: 126 | 127 | * Python 3 128 | * `setuptools `__ 129 | * `numpy `__ 130 | * `scipy `__ 131 | * `matplotlib `__ 132 | * `pandas `__ 133 | * `pyyaml `__ 134 | * `looseversion `__ 135 | 136 | You will also want to install the `pims `_ 137 | package that simplifies image-reading, and that is used in the trackpy tutorials. 138 | The remaining optional dependencies, listed below, are strongly recommended but 139 | can be challenging to install yourself, hence our recommendation that you 140 | use `Anaconda `_. 141 | 142 | Manual installation 143 | """"""""""""""""""" 144 | 145 | If you want to be able to edit the code yourself, you can install the package 146 | manually. First, make sure you have `git `__ version 147 | management software installed. Go to a folder where you want to have your 148 | source code, then: 149 | 150 | .. code-block:: bash 151 | 152 | git clone https://github.com/soft-matter/trackpy 153 | cd trackpy 154 | python setup.py develop 155 | 156 | We welcome any contribution to the trackpy source code, so feel free to send 157 | in your contributions on Github! To do so, make an account, fork 158 | `trackpy `__ and create a local copy 159 | using: 160 | 161 | .. code-block:: bash 162 | 163 | git clone https://github.com//trackpy 164 | 165 | Now you have a local copy of the code which you can edit, but don't start 166 | editing right away as you are currently on the ``master`` branch. We think it 167 | is good practice to keep your ``master`` branch mirroring the upstream 168 | trackpy version, so first create a new branch and push it to the remote as 169 | follows: 170 | 171 | .. code-block:: bash 172 | 173 | git branch fix-something 174 | git push --set-upstream origin fix-something 175 | 176 | Now you can edit your code in any way you like, commit your changes, and push 177 | them again to the remote. 178 | 179 | Before sending in your code, please consult 180 | `our guidelines `__. 181 | Also, see `here `__ for getting 182 | started using git. 183 | 184 | Optional Dependencies 185 | """"""""""""""""""""" 186 | 187 | These are strongly recommended to make using trackpy more convenient and faster: 188 | 189 | * The `PIMS `_ package simplifies image-reading, 190 | and is used in the trackpy tutorials. 191 | * `PyTables `__ for saving results in an HDF5 file. 192 | This is included with Anaconda. 193 | * `numba `__ for accelerated feature-finding and linking. 194 | This is included with Anaconda and Canopy. Installing it any other way is 195 | difficult; we recommend sticking with one of these. 196 | * `Pillow `__ or `PIL `__ for some display routines. 197 | This is included with Anaconda. 198 | 199 | PIMS has its own optional dependencies for reading various formats. You 200 | can read what you need for each format 201 | `here on PIMS' README `__. 202 | -------------------------------------------------------------------------------- /doc/introduction.rst: -------------------------------------------------------------------------------- 1 | .. _introduction: 2 | 3 | Introduction to Trackpy 4 | ----------------------- 5 | 6 | Trackpy is a package for tracking blob-like features in video images, following them 7 | through time, and analyzing their trajectories. It started from a Python implementation 8 | of the widely-used Crocker--Grier algorithm and is currently in transition 9 | towards a general-purpose Python tracking library. 10 | 11 | There are many similar projects. (See table below.) 12 | Our implementation is distinguished by succinct and flexible usage, 13 | a thorough testing framework ensuring code stability and accuracy, 14 | scalability, and thorough documentation. 15 | 16 | Several researchers have merged their independent efforts into this code. 17 | We would like to see others in the community adopt it and potentially 18 | contribute code to it. 19 | 20 | Features 21 | ^^^^^^^^ 22 | 23 | Basics 24 | """""" 25 | Following the `widely-used particle tracking algorithm `__, 26 | we separate *tracking* into three separate steps. In the first step, *feature finding* 27 | initial feature coordinates are obtained from the images. Subsequently, sub-pixel precision 28 | is obtained in coordinate *refinement*. Finally, the coordinates are *linked* in time yielding 29 | the feature trajectories. 30 | 31 | * The tracking algorithm originally implemented by John Crocker and Eric Weeks in IDL was 32 | completely reimplemented in Python. 33 | * A `flexible framework for least-squares fitting `__ 34 | allows for sub-pixel refinement using any radial model function in 2D and 3D. 35 | * Trackpy is actively used and tested on **Windows, Mac OSX, and Linux**, 36 | and it uses only **free, open-source** software. 37 | * Frames of video are loaded via the sister project `PIMS `__ 38 | which enables reading of several types of **video files (AVI, MOV, etc.), 39 | specialized formats (LEI, ND2, SEQ, CINE), multi-frame TIFF, or a directory of sequential 40 | images (TIFF, PNG, JPG, etc.)**. 41 | * Results are given as DataFrames, high-performance spreadsheet-like objects 42 | from `Python pandas `__ 43 | which can easily be saved to a **CSV file, Excel spreadsheet, 44 | SQL database, HDF5 file**, and more. 45 | * Particle trajectories can be 46 | characterized, grouped, and plotted using a suite of convenient functions. 47 | * To verify correctness and stability, a **suite of 500+ tests verifies basic results 48 | on each trackpy update**. 49 | 50 | Special Capabilities 51 | """""""""""""""""""" 52 | 53 | * Both feature-finding and trajectory-linking can be performed on 54 | **arbitrarily long videos** using a fixed, modest amount of memory. (Results 55 | can be read and saved to disk throughout.) 56 | * A **prediction framework** helps track particles in fluid flows, 57 | or other scenarios where velocity is correlated between time steps. 58 | * Feature-finding optionally makes use of the **history of feature coordinates** 59 | in a routine that combines linking and feature-finding. 60 | * Feature-finding and trajectory-linking works on **images with any number of dimensions**, 61 | making possible some creative applications. 62 | * **Uncertainty is estimated** following a method `described in this paper `__ by Savin and Doyle. 63 | * **High-performance** numba acceleration is used only if 64 | if available. Since these can be tricky to install on some machines, 65 | the code will automatically fall back on slower pure Python implementations 66 | * **Adaptive search** can prevent the tracking algorithm from failing 67 | or becoming too slow, by automatically making adjustments when needed. 68 | 69 | Citing Trackpy 70 | ^^^^^^^^^^^^^^ 71 | 72 | Trackpy can be cited using a DOI provided through our Zenodo 73 | `record page `_. To direct your 74 | readers to the specific version of trackpy that they can use to reproduce 75 | your results, cite the release of trackpy that you used for your work 76 | (available from the variable ``trackpy.__version__``). The 77 | record pages linked below contain author lists, other details, and complete 78 | citations in various formats. If your citation style allows for a URL, 79 | please include a link to the github repository: 80 | `github.com/soft-matter/trackpy`. 81 | 82 | ================= ========================================================================= ====================== 83 | Release (version) Zenodo Record Pages with info and citations DOI 84 | ================= ========================================================================= ====================== 85 | v0.4 and later `Versioned Record Page `__ (see Zenodo) 86 | v0.3.2 `Record Page `__ 10.5281/zenodo.60550 87 | v0.3.1 `Record Page `__ 10.5281/zenodo.55143 88 | v0.3.0 `Record Page `__ 10.5281/zenodo.34028 89 | v0.2.4 `Record Page `__ 10.5281/zenodo.12255 90 | v0.2.3 `Record Page `__ 10.5281/zenodo.11956 91 | v0.2.2 `Record Page `__ 10.5281/zenodo.11132 92 | v0.2 `Record Page `__ 10.5281/zenodo.9971 93 | ================= ========================================================================= ====================== 94 | 95 | Users often also cite this publication describing the core feature-finding 96 | and linking algorithms that trackpy is based on: 97 | 98 | Crocker, J. C., & Grier, D. G. (1996). Methods of Digital Video Microscopy for Colloidal Studies. 99 | J. Colloid Interf. Sci., 179(1), 298–310. http://doi.org/10.1006/jcis.1996.0217 100 | 101 | Related Projects 102 | ^^^^^^^^^^^^^^^^ 103 | 104 | ============================ =================================================== ========================= 105 | Author(s) Project URL Language 106 | ============================ =================================================== ========================= 107 | Crocker and Grier http://physics.nyu.edu/grierlab/software.html IDL 108 | Crocker and Weeks http://www.physics.emory.edu/~weeks/idl/ IDL 109 | Blair and Dufresne http://physics.georgetown.edu/matlab/ MATLAB 110 | Maria Kilfoil et al. https://github.com/rmcgorty/ParticleTracking-Python Python 111 | Graham Milne http://zone.ni.com/devzone/cda/epd/p/id/948 LabVIEW 112 | Ryan Smith and Gabe Spalding http://titan.iwu.edu/~gspaldin/rytrack.html stand alone/IDL GUI 113 | Peter J Lu https://github.com/peterlu/PLuTARC_centerfind2D C++ (identification only) 114 | Thomas A Caswell https://github.com/tacaswell/tracking C++ 115 | ============================ =================================================== ========================= 116 | 117 | Core Contributors 118 | ^^^^^^^^^^^^^^^^^ 119 | 120 | * **Casper van der Wel** anisotropic 3D feature-finding, plotting and analyses, framework 121 | for least-squares refinement, combined linking and feature finding 122 | * **Daniel Allan** feature-finding, uncertainty estimation, 123 | motion characterization and discrimination, plotting tools, tests 124 | * **Nathan Keim** alternative trajectory-linking implementations, major 125 | speed-ups, prediction, adaptive search 126 | * **Thomas Caswell** multiple implementations of sophisticated trajectory-linking, tests 127 | 128 | 129 | Support 130 | ^^^^^^^ 131 | 132 | This package was developed in part by Daniel Allan, as part of his 133 | PhD thesis work on microrheology in Robert L. Leheny's group at Johns Hopkins 134 | University in Baltimore, MD, USA. The work was supported by the National Science Foundation 135 | under grant number CBET-1033985. Dan can be reached at dallan@pha.jhu.edu. 136 | 137 | This package was developed in part by Thomas A Caswell as part of his 138 | PhD thesis work in Sidney R Nagel's and Margaret L Gardel's groups at 139 | the University of Chicago, Chicago IL, USA. This work was supported in 140 | part by NSF Grant DMR-1105145 and NSF-MRSEC DMR-0820054. Tom can be 141 | reached at tcaswell@gmail.com. 142 | 143 | This package was developed in part by Nathan C. Keim at Cal Poly, 144 | San Luis Obispo, California, USA and supported by NSF Grant DMR-1708870. 145 | Portions were also developed at the University of Pennsylvania, 146 | Philadelphia, USA, supported by NSF-MRSEC DMR-1120901. 147 | 148 | This package was developed in part by Casper van der Wel, as part of his 149 | PhD thesis work in Daniela Kraft’s group at the Huygens-Kamerlingh-Onnes laboratory, 150 | Institute of Physics, Leiden University, The Netherlands. This work was 151 | supported by the Netherlands Organisation for Scientific Research (NWO/OCW). 152 | -------------------------------------------------------------------------------- /doc/releases/v0.3.0.txt: -------------------------------------------------------------------------------- 1 | 2 | v0.3.0 3 | ------ 4 | 5 | Overview 6 | ~~~~~~~~ 7 | 8 | Release v0.3.0 adds many important enhancements and fixes. Feature location is extended to anisotropic 3D (and generally N-dimensional) features. The analysis and visualization tools now give first-class status to 3D work. A powerful new "adaptive search" capability gives fine-tuned control and detailed diagnostics of the trajectory-linking process. Several performance improvements make trackpy even faster. 9 | 10 | Enhancements 11 | ~~~~~~~~~~~~ 12 | 13 | - The feature-finding functionality was formerly limited to circular features, but now trackpy can locate anisotropic N-dimensional features. Their size is specified as a tuple of odd integers instead of a single odd integer. See the new 3D tutorial for details. (:issue:`162`, :issue:`239`) 14 | 15 | - The plotting and analysis tools, many of which were limited to 2D, have been extended to 3D. (:issue:`196`) 16 | 17 | - The numba engine for fast feature-finding has been extented to handle 3D images. (:issue:`242`) 18 | 19 | - A new method of calculating feature ``mass`` compensates for bit depth and the details of preprocessing. This means that masses are more consistent under changes in image brightness or format. However, to obtain results comparable to those from older trackpy versions, you may have to adjust your ``minmass`` parameter (and other parts of your code that use the mass). The new ``minmass_version_change`` function lets you convert old values of ``minmass`` to the new standard (:issue:`239`). 20 | 21 | - Feature finding returns a new column ``raw_mass`` that is the sum of intensities inside the unprocessed image. For anisotropic feature detection, size and static errors are returned per dimension: ``size_x`` etc. and ``ep_x`` etc. (:issue:`239`). 22 | 23 | - A powerful new adaptive search feature gives much richer control of the tracking (trajectory-linking) process, and it makes formerly intractable scenarios possible to solve in a reasonable time. See the new tutorials, "Advanced Linking: Subnetworks and search_range" and "Adaptive Search: Changing search_range on the Fly," for details. 24 | 25 | - Uncertainty estimation is more accurate. See the docstring of ``trackpy.uncertainty.static_error`` for details. (:issue:`239`, :issue:`259`) 26 | 27 | - The linking functions can now collect data about how each particle was linked. See the tutorial "Obtaining Diagnostic Information from Linking." 28 | 29 | - Nearby local maxima are "merged" -- i.e., interpreted as parts of the same feature -- in a more robust way. This improves accuracy in crowded images. For some data, it may have no effect, but for others it will give significantly different (but, we think, better) results than previous versions of trackpy. (:issue:`143`) 30 | 31 | - The minimum feature separation can be as small as zero. Formerly, it was constrained to be larger than the feature diameter. If ``serparation=0``, "merging" of duplicate maxima is effectively turned off. (:issue:`139`) 32 | 33 | - The percentile-based thresholding was moved into a separate function, so it can now be called directly. That will be useful to users who want to inspect what the thresholding is doing to their images. It will also be useful for profiling. (:issue:`139`) 34 | 35 | - The performance of feature-finding can now be tested in a custom way using new routines in ``artificial.py``. Users can provide a custom feature shape to test the feature-finding on their own system. 36 | 37 | - Various performance improvements make feature-finding and trajectory-linking faster than ever, in spite of the increased functionality. (:issue:`203`, :issue:`204`) 38 | 39 | - More user-friendly error messages replace cryptic errors. (:issue:`166`). 40 | 41 | Bug Fixes 42 | ~~~~~~~~~ 43 | 44 | - Fixed a bug in v0.2.3 and v0.2.4 that broke the ``circle_size`` parameter in ``annotate()``. (:issue:`169`, :issue:`170`) 45 | - Fixed a bug where subnetwork distances were not addded in quadrature in the python linker. (:issue:`212`). (The impact of this bug is not as large as one might expect. See issue link for details.) 46 | - Input images with a float datatype are internally converted to integer datatype for faster computation. They were converted to signed integers; now they are converted to unsigned. (:issue:`188`). When preprocessing is turned off, integer images are not converted at all (:issue:`264`). 47 | - Fixed a memory leak when a track is ended (:issue:`256`). 48 | - Added a warning about slow feature-finding when linking from a Pandas view (:issue:`#281`). 49 | - Slow feature-finding with more recent versions of the numba package (0.16 and 0.17 specifically) is addressed. 50 | - The ``ax`` argument of ``subpx_bias`` is removed because the figure is cleared anyway by ``subpx_bias``. 51 | 52 | API changes 53 | ~~~~~~~~~~~ 54 | 55 | - The plot function ``annotate()`` now displays the image with the vertical axis inverted, to be consistent with the ``pims`` display function and ``plot_traj()``. (:issue:`217`) 56 | - Using trackpy to access pims functionality (e.g. ``trackpy.ImageSequence()``) is now deprecated; it still works but will generate a warning message. This capability will be removed from future versions of trackpy, in favor of accessing it in the pims package directly (e.g. ``pims.ImageSequence()``). (:issue:`214`) 57 | - When trackpy estimates the error in a feature's position (returned as the ``ep`` column), the procedure can fail and result in a nonsense value. This will now result in the value of ``ep`` being NaN (not a number); previous versions of trackpy returned a negative value in this situation. To restore the previous behavior, you can replace these values with negative numbers by using the Pandas ``fillna()`` method. (:issue:`113`) 58 | - The mass calculation is changed. Before v0.3 the mass was calculated from a rescaled image. From this version, this rescaling is compensated at the end so that the mass reflects the actual intensities in the image. ``minmass_version_change()`` can calculate the different ``minmass`` value to use (:issue:`239`) 59 | -------------------------------------------------------------------------------- /doc/releases/v0.3.1.txt: -------------------------------------------------------------------------------- 1 | 2 | v0.3.1 3 | ------ 4 | 5 | Enhancements 6 | ~~~~~~~~~~~~ 7 | 8 | - Added pair correlation functions in 2D and 3D (:issue:`336`) 9 | 10 | - Mean squared displacement calculations are up to 40 times faster due to a new FFT-based algorithm (:issue:`337`) 11 | 12 | - Number of measurements in MSD calculations is more accurate (:issue:`337`) 13 | 14 | - Increased performance of `compute_drift` (:issue:`345`) 15 | 16 | Bug Fixes 17 | ~~~~~~~~~ 18 | 19 | - Bug in the python refinement code was solved: feature finding with `engine='python'` is now more accurate. (:issue:`377`) 20 | 21 | - Error in `subtract_drift` is solved (:issue:`351`) 22 | 23 | - Legends are disabled by default in plotting (:issue:`357`) 24 | 25 | - Plots in (y, x) now always have their y axis inverted (:issue:`357`) 26 | 27 | - Empty frames are not ignored anymore in `link_df` (:issue:`293`) 28 | 29 | - ``characterize`` was not passed to ``refine`` in ``locate`` (:issue:`310`) 30 | 31 | - ``annotate3d`` now works with the notebook backend (:issue:`308`) 32 | 33 | - Trackpy is now fully compatible with pandas 0.17 and 0.18.1 34 | 35 | - Trackpy is compatible with Pillow >0.3.0 (through PIMS v0.3.3) 36 | 37 | - The ``pos_columns`` default argument is not mutable anymore in ``msd``, ``imsd``, ``emsd``, and ``compute_drift``. (:issue:`337`) 38 | 39 | 40 | API changes 41 | ~~~~~~~~~~~ 42 | 43 | - ``max_lagtime`` in ``msd``, ``imsd``, and ``emsd`` is now included in the returned lagtimes (:issue:`337`) 44 | 45 | - `subtract_drift` does not act inplace by default (:issue:`351`) 46 | -------------------------------------------------------------------------------- /doc/releases/v0.4.txt: -------------------------------------------------------------------------------- 1 | v0.4.2 2 | ------ 3 | 4 | This is a minor release with two major new features: parallel processing to 5 | speed up feature-finding, and a new method for locating features in 6 | bright-field microscopy. There is also an overhaul of the walkthrough and other 7 | tutorials, and compatibility with the latest version of Pandas (0.25.1). 8 | 9 | 10 | API Changes 11 | ~~~~~~~~~~~ 12 | 13 | - "trackpy.locate" and associated functions now report "ep" to be NaN when 14 | there is only one usable background pixel to sample. (:issue:`519`) 15 | 16 | - "trackpy.legacy.linking.link_df" does not copy a DataFrame automatically 17 | anymore if the provided DataFrame is a view. (:issue:`503`) 18 | 19 | - The "locate" options in "trackpy.batch" are now collected in "**kwargs" 20 | (except for "diameter") and passed to the wrapped "trackpy.locate" function. 21 | This means when using the "meta" option of "trackpy.batch" only explicitly 22 | given options for the wrapped "trackpy.locate" are saved to the file; 23 | unspecified options using default values from "locate" aren't saved. 24 | (:issue:`499`) 25 | 26 | 27 | Enhancements 28 | ~~~~~~~~~~~~ 29 | 30 | - Added support for multiprocessing to "trackpy.batch". (:issue:`499`) 31 | 32 | - "trackpy.locate_brightfield_ring" enables precise tracking of large 33 | particles in bright-field microscopy. There is a new tutorial to demonstrate 34 | it. (:issue:`527`) 35 | 36 | - Added "trackpy.linking.link_partial" to allow linking a movie in separate 37 | pieces. (:issue:`445`) 38 | 39 | - Many updates and improvements to the tutorials. Most notably, the walkthrough 40 | is revised and updated for the latest versions of trackpy and pims, and code 41 | from all tutorials should now run in recent Python environments. 42 | (:issue:`567`, :issue:`524`, :issue:`539`, :issue:`543`, :issue:`541`, 43 | :issue:`532`, :issue:`157`, :issue:`558`, :issue:`505`, :issue:`525`). Most of 44 | these changes were made in the trackpy-examples repository on GitHub. 45 | 46 | - Smaller improvements to other documentation for the linking and 47 | feature-finding APIs. (:issue:`545`, :issue:`551`, :issue:`550`, 48 | :issue:`522`, :issue:`540`, :issue:`542`) 49 | 50 | Bug fixes 51 | ~~~~~~~~~ 52 | 53 | - Fixed incompatibility with recent Pandas (:issue:`529`, :issue:`538`) 54 | - Removed warnings in case Pandas 0.23 is used (:issue:`503`) 55 | - Removed warnings in case Pandas 0.25 is used (:issue:`545`) 56 | 57 | - "trackpy.diag.dependencies" now gives information about a more complete set 58 | of packages. 59 | 60 | v0.4.1 61 | ------ 62 | 63 | This is a minor release with some important fixes. Additionally, PIMS is no 64 | longer a required dependency, which makes installation easier. 65 | 66 | 67 | API Changes 68 | ~~~~~~~~~~~ 69 | 70 | - PIMS (Python IMage Sequence) is no longer a required dependency (:issue:`492`) 71 | 72 | 73 | Enhancements 74 | ~~~~~~~~~~~~ 75 | 76 | - Improved performance of adaptive search (:issue:`489`) 77 | - Reduced memory usage during linking (:issue:`489`) 78 | 79 | 80 | Bug fixes 81 | ~~~~~~~~~ 82 | 83 | - Fix enable_numba on Py3.6 and MS Windows systems (:issue:`486`) 84 | 85 | 86 | v0.4.0 87 | ------ 88 | 89 | This is a major release that includes new choices for linking and feature-finding algorithms, as well as many 90 | performance increases and clean-ups. It is recommended for all trackpy users, but may require 91 | minor changes to existing code, as detailed below. 92 | 93 | API Changes 94 | ~~~~~~~~~~~ 95 | 96 | - locate has a more modular structure that makes it easier to customize feature-finding (:issue:`400`, :issue:`406`) 97 | 98 | - The default value of smoothing_size is smaller, and more consistent with 99 | other implementations of this algorithm. In general, particle 100 | mass values will be different. The new minmass_v04_change function helps 101 | update minmass values in existing code (:issue:`401`, :issue:`465`) 102 | 103 | - The "minmass" parameter now has a default value of 0 (:issue:`400`) 104 | 105 | - The "filter_before" option is no longer supported (:issue:`400`) 106 | 107 | - Major refactor of the linking code giving linking a better API (:issue:`416`, :issue:`414`) 108 | 109 | - Linking diagnostics are no longer supported (:issue:`414`) 110 | 111 | - link_df always creates a copy of the data; it can no longer work in-place (:issue:`414`) 112 | 113 | 114 | Enhancements 115 | ~~~~~~~~~~~~ 116 | 117 | - Improved performance of feature finding (:issue:`361`) 118 | 119 | - Improved performance of linking (:issue:`400`, :issue:`406`) 120 | 121 | - New refinement method using least squares optimization (:issue:`407`) 122 | 123 | - New linker that combines feature-finding with linking (FindLinker) (:issue:`407`, :issue:`410`, :issue:`411`, :issue:`416`, :issue:`428`) 124 | 125 | - Linking in non-Euclidean coordinate systems and curved metrics (:issue:`448`) 126 | 127 | 128 | Bug fixes 129 | ~~~~~~~~~ 130 | 131 | - Fix compute_drift if dataframes are not sorted (:issue:`409`) 132 | 133 | - Fix double counting of non-linking penalty in non-numba linkers (:issue:`430`) 134 | 135 | - Fix the N column for emsd (:issue:`434`) 136 | -------------------------------------------------------------------------------- /doc/releases/v0.5.txt: -------------------------------------------------------------------------------- 1 | v0.5 2 | ---- 3 | 4 | This is a major release focused on enhancements. Most notably, parallel 5 | processing is now turned on by default for "trackpy.batch" and 6 | "trackpy.locate_brightfield_ring", resulting in a large performance increase 7 | for many users. Another minor speedup changes the way in which particle numbers 8 | are generated, which may cause issues with reproducibility in some existing 9 | workflows (see "API Changes" below). 10 | 11 | Note that Python 2 reached end-of-life in 2020, and it is no longer officially 12 | supported by trackpy. Also, trackpy dropped support for older NumPy, Scipy and Pandas. 13 | The current minimum requirements are: ``python>=3.6 numpy>=1.14 scipy>=1.1 pandas>=0.22``. 14 | 15 | Enhancements 16 | ~~~~~~~~~~~~ 17 | 18 | - "compute_drift()" now accepts list-like "pos_columns" (e.g. tuples). (:issue:`579`) 19 | 20 | - "trackpy.locate_brightfield_ring" now defaults to parallel execution with all 21 | available CPUs, as controlled by the optional "processes" argument (:issue:`602`) 22 | 23 | - "refine_brightfield_ring()" now uses the 5th percentile for edge detection 24 | instead of the minimum, resulting in a more robust tracking for low contrast 25 | images. (:issue:`602`) 26 | 27 | 28 | API Changes 29 | ~~~~~~~~~~~ 30 | 31 | - "trackpy.batch" now defaults to parallel execution with all available CPUs 32 | ("processes='auto'"). For most users this should be a noticeable speedup. 33 | Revert to the previous behavior with "processes=1". (:issue:`605`) 34 | 35 | - During linking, particles are not sorted before assigning new 36 | trajectories. This greatly speeds up linking under certain conditions. 37 | The actual coordinates and linked trajectories are not affected by this 38 | change, but the unique number assigned to a trajectory (in the "particle" 39 | column) may vary from run to run. The most reproducible way to identify a 40 | specific particle is still the particle's coordinates at an instant in time. 41 | To make the numbering stable again, set the "PYTHONHASHSEED" environment 42 | variable before starting Python. 43 | (:issue:`597`, :issue:`601`) 44 | -------------------------------------------------------------------------------- /doc/releases/v0.6.txt: -------------------------------------------------------------------------------- 1 | v0.6.4 2 | ------ 3 | 4 | trackpy v0.6.4 is a minor compatibility update for numpy v2.0 5 | 6 | Dependencies 7 | ~~~~~~~~~~~~ 8 | - Adds support for numpy 2.0 (@nkeim, #770). Note that as of this writing, 9 | pytables (an optional dependency for trackpy) does not yet support 10 | numpy 2.0 (see https://github.com/PyTables/PyTables/issues/1172 and 11 | #768). 12 | 13 | 14 | v0.6.3 15 | ------ 16 | 17 | trackpy v0.6.3 is a minor compatibility update for newer Pandas verisons. 18 | 19 | Dependencies 20 | ~~~~~~~~~~~~ 21 | - Improves support for latest Pandas in emsd function (@hz-xiaxz, #758) 22 | - Fix deprecated matplotlib usage in 3D plotting (@jacopoabramo, #767) 23 | 24 | 25 | v0.6.2 26 | ------ 27 | 28 | trackpy v0.6.2 includes bug fixes, and compatibility fixes with newer 29 | dependency versions. 30 | 31 | A special thanks to everyone who reported the issues fixed in this release! 32 | 33 | Bug fixes 34 | ~~~~~~~~~ 35 | - Fixed an issue with feature-finding in 4D image sequences (@VeraMTitze, #739) 36 | - Fixed a rare issue in which subtract_drift() with multiple particles 37 | resulted in a Pandas indexing error. (@kevin-duclos, #735) 38 | 39 | Dependencies 40 | ~~~~~~~~~~~~ 41 | - Adds official support for Python 3.12. (@caspervdw, #747) 42 | - Adds support for latest Pandas, NumPy, and SciPy (@marcocaggioni, #740; 43 | @nkeim, #743, @vivarose and @rodrigo-j-goncalves, #742; @caspervdw, #747) 44 | - Drops official support for Python 3.7 and earlier, NumPy 1.17 and earlier, 45 | Pandas 0.x, and SciPy 1.3 and earlier. 46 | 47 | 48 | v0.6.1 49 | ------ 50 | 51 | trackpy v0.6.1 is functionally equivalent to v0.6.0. It is being released 52 | to fix an issue with Zenodo, so that this trackpy release has a citable DOI. 53 | 54 | v0.6.0 55 | ------ 56 | 57 | This release adds an efficient way to use custom distance metrics during 58 | linking, and fixes bugs and some inconsistencies in the prediction 59 | capabilities. Some existing code that used prediction may now fail to run until 60 | a (minor) update is made. 61 | 62 | This release includes contributions by @magnunor, @freemansw1, @crisp-snakey, 63 | @rbnvrw, @caspervdw, @tacaswell, and @nkeim. 64 | 65 | Enhancements 66 | ~~~~~~~~~~~~ 67 | 68 | - Linking can now use any ``DistanceMetric`` class from sklearn to compute 69 | distances (#692 by @freemansw1) 70 | - Several documentation fixes by @magnunor and @nkeim 71 | 72 | API Changes 73 | ~~~~~~~~~~~ 74 | 75 | - When supplying an initial velocity guess to NearestVelocityPredict or 76 | DriftPredict, you must also supply the pos_columns argument to identify 77 | the names of the columns in your array. (For example, 78 | "pos_columns = ['y', 'x']".) Otherwise, creating the predictor will 79 | raise an exception that explains this change. If you provide pos_columns 80 | when creating the predictor, you do not have to supply it when subsequently 81 | using the link_df() or link_df_iter() methods to link your features. 82 | (@nkeim, #710) 83 | - ``reconnect_traj_patch()`` is now available as ``trackpy.reconnect_traj_patch`` 84 | and is described in the API documentation. (@nkeim, #648) 85 | 86 | Bug Fixes 87 | ~~~~~~~~~ 88 | 89 | - When linking with prediction, the predictor now correctly uses the same 90 | position columns as the linker, and correctly handles the pos_columns 91 | argument if specified. (@nkeim, #710) 92 | - The link_df() method of predictor objects now works correctly. This is 93 | reflected in the updated prediction tutorial. (@nkeim, #710) 94 | 95 | Dependencies 96 | ~~~~~~~~~~~~ 97 | 98 | - trackpy is now being tested in conda environments with Python 3.10. 99 | - trackpy is no longer tested with Python 3.6. 100 | - trackpy now requires the ``looseversion`` package to work with newer scipy 101 | (@crisp-snakey, #706) 102 | - Much work on testing and packaging infrastructure by @tacaswell and @caspervdw 103 | -------------------------------------------------------------------------------- /doc/tutorial.rst: -------------------------------------------------------------------------------- 1 | .. _tutorial: 2 | 3 | Trackpy tutorial 4 | ================ 5 | 6 | Basic Usage 7 | ----------- 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | Walkthrough 13 | Prediction (Linking) 14 | Tracking in 3D 15 | Uncertainty Estimation 16 | Advanced Linking 17 | Adaptive Linking 18 | 19 | Processing Large Data Sets 20 | -------------------------- 21 | 22 | .. toctree:: 23 | :maxdepth: 2 24 | 25 | Streaming 26 | Performance 27 | Parallelized Feature Finding 28 | 29 | Extending & Customizing Trackpy 30 | ------------------------------- 31 | .. toctree:: 32 | :maxdepth: 2 33 | 34 | Tracking Particles' Rings in Bright-Field Microscopy 35 | Tracking Large Features Such as Bubbles, and Visualizing a Velocity Field 36 | -------------------------------------------------------------------------------- /doc/tutorial/Makefile: -------------------------------------------------------------------------------- 1 | NOTEBOOK_DIR = ../../../trackpy-examples/notebooks 2 | FILES := $(patsubst $(NOTEBOOK_DIR)/%,%, $(wildcard $(NOTEBOOK_DIR)/*.ipynb)) 3 | 4 | notebooks: 5 | # run the nbconvert and output an html to current directory 6 | # write an .rst file so that sphinx will include the .html 7 | # the first line is a title, which is necessary for it to be included in the Sphinx toctree 8 | @- $(foreach FILE, $(FILES), \ 9 | jupyter nbconvert --to html $(NOTEBOOK_DIR)/$(FILE) --output $(CURDIR)/$(FILE:.ipynb=.html); \ 10 | bash -c 'echo -e "$(FILE:.ipynb=)\n------------------------------------------------\n.. raw:: html\n :file: "$(FILE:.ipynb=.html)' > $(FILE:.ipynb=.rst); \ 11 | ) 12 | 13 | -------------------------------------------------------------------------------- /doc/tutorial/tools/nb_to_doc.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | """ 3 | Convert empty IPython notebook to a sphinx doc page. 4 | 5 | """ 6 | import os 7 | import sys 8 | 9 | 10 | def convert_nb(nbname): 11 | 12 | os.system("runipy --o %s.ipynb --matplotlib --quiet" % nbname) 13 | os.system("ipython nbconvert --to rst %s.ipynb" % nbname) 14 | os.system("tools/nbstripout %s.ipynb" % nbname) 15 | 16 | 17 | if __name__ == "__main__": 18 | 19 | for nbname in sys.argv[1:]: 20 | convert_nb(nbname) 21 | -------------------------------------------------------------------------------- /doc/tutorial/tools/nbstripout: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """strip outputs from an IPython Notebook 3 | 4 | Opens a notebook, strips its output, and writes the outputless version to the original file. 5 | 6 | Useful mainly as a git pre-commit hook for users who don't want to track output in VCS. 7 | 8 | This does mostly the same thing as the `Clear All Output` command in the notebook UI. 9 | """ 10 | 11 | import io 12 | import sys 13 | 14 | from IPython.nbformat import current 15 | 16 | def strip_output(nb): 17 | """strip the outputs from a notebook object""" 18 | for cell in nb.worksheets[0].cells: 19 | if 'outputs' in cell: 20 | cell['outputs'] = [] 21 | if 'prompt_number' in cell: 22 | cell['prompt_number'] = None 23 | return nb 24 | 25 | if __name__ == '__main__': 26 | filename = sys.argv[1] 27 | with io.open(filename, 'r', encoding='utf8') as f: 28 | nb = current.read(f, 'json') 29 | nb = strip_output(nb) 30 | with io.open(filename, 'w', encoding='utf8') as f: 31 | current.write(nb, f, 'json') 32 | -------------------------------------------------------------------------------- /doc/whatsnew.rst: -------------------------------------------------------------------------------- 1 | .. _whatsnew: 2 | 3 | .. currentmodule:: trackpy 4 | 5 | What's new in the package 6 | ========================= 7 | 8 | A catalog of new features, improvements, and bug-fixes in each release. Follow links to the relevant GitHub issue or pull request for specific code changes and any related discussion. 9 | 10 | .. include:: releases/v0.6.txt 11 | 12 | .. include:: releases/v0.5.txt 13 | 14 | .. include:: releases/v0.4.txt 15 | -------------------------------------------------------------------------------- /doi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/doi.png -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | All examples and sample data have been moved to [trackpy-examples](https://github.com/soft-matter/trackpy-examples), a separate repository. 2 | -------------------------------------------------------------------------------- /reproducibility_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script generates the data for the tests in test_reproducibilty. 3 | """ 4 | import os 5 | 6 | import trackpy as tp 7 | import numpy as np 8 | import pims 9 | 10 | version = 'VERSION' # adjust this 11 | 12 | pos_columns = ['y', 'x'] 13 | char_columns = ['mass', 'size', 'ecc', 'signal', 'raw_mass', 'ep'] 14 | testpath = os.path.join(os.path.dirname(tp.__file__), 'tests') 15 | impath = os.path.join(testpath, 'video', 'image_sequence', '*.png') 16 | npzpath = os.path.join(testpath, 'data', 17 | 'reproducibility_v{}.npz'.format(version)) 18 | 19 | v = pims.ImageSequence(impath) 20 | # take reader that provides uint8! 21 | assert np.issubdtype(v.dtype, np.uint8) 22 | v0 = tp.invert_image(v[0]) 23 | v0_bp = tp.bandpass(v0, lshort=1, llong=9) 24 | expected_find = tp.grey_dilation(v0, separation=9) 25 | expected_find_bandpass = tp.grey_dilation(v0_bp, separation=9) 26 | expected_refine = tp.refine_com(v0, v0_bp, radius=4, 27 | coords=expected_find_bandpass) 28 | expected_refine = expected_refine[expected_refine['mass'] >= 140] 29 | expected_refine_coords = expected_refine[pos_columns].values 30 | expected_locate = tp.locate(v0, diameter=9, minmass=140) 31 | expected_locate_coords = expected_locate[pos_columns].values 32 | df = tp.locate(v0, diameter=9) 33 | df = df[(df['x'] < 64) & (df['y'] < 64)] 34 | expected_characterize = df[pos_columns + char_columns].values 35 | 36 | f = tp.batch(tp.invert_image(v), 9, minmass=140) 37 | f_crop = f[(f['x'] < 320) & (f['x'] > 280) & (f['y'] < 280) & (f['x'] > 240)] 38 | f_linked = tp.link(f_crop, search_range=5, memory=0) 39 | f_linked_memory = tp.link(f_crop, search_range=5, memory=2) 40 | link_coords = f_linked[pos_columns + ['frame']].values 41 | expected_linked = f_linked['particle'].values 42 | expected_linked_memory = f_linked_memory['particle'].values 43 | 44 | np.savez_compressed(npzpath, expected_find, expected_find_bandpass, 45 | expected_refine_coords, expected_locate_coords, 46 | link_coords, expected_linked, expected_linked_memory, 47 | expected_characterize) 48 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description_file = README.md 3 | 4 | [versioneer] 5 | VCS = git 6 | style = pep440 7 | versionfile_source = trackpy/_version.py 8 | versionfile_build = trackpy/_version.py 9 | tag_prefix = v 10 | #parentdir_prefix = 11 | 12 | [tool:pytest] 13 | testpaths = 14 | trackpy 15 | 16 | [flake8] 17 | ignore = E203, E266, E501, W503 18 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import versioneer 3 | from setuptools import setup 4 | 5 | 6 | try: 7 | descr = open(os.path.join(os.path.dirname(__file__), 'README.md')).read() 8 | except OSError: 9 | descr = '' 10 | 11 | # In some cases, the numpy include path is not present by default. 12 | # Let's try to obtain it. 13 | try: 14 | import numpy 15 | except ImportError: 16 | ext_include_dirs = [] 17 | else: 18 | ext_include_dirs = [numpy.get_include(),] 19 | 20 | setup_parameters = dict( 21 | name = "trackpy", 22 | version = versioneer.get_version(), 23 | cmdclass = versioneer.get_cmdclass(), 24 | description = "particle-tracking toolkit", 25 | author = "Trackpy Contributors", 26 | author_email = "daniel.b.allan@gmail.com", 27 | url = "https://github.com/soft-matter/trackpy", 28 | install_requires = ['numpy>=1.18', 'scipy>=1.4', 'pandas>=1', 'pyyaml', 'matplotlib', "looseversion>=1.0.1"], 29 | extras_require={"test": "pytest"}, 30 | python_requires=">=3.8", 31 | classifiers=[ 32 | "Programming Language :: Python :: 3", 33 | ], 34 | packages = ['trackpy', 'trackpy.refine', 'trackpy.linking', 'trackpy.locate_functions'], 35 | long_description = descr, 36 | long_description_content_type='text/markdown' 37 | ) 38 | 39 | setup(**setup_parameters) 40 | -------------------------------------------------------------------------------- /soft-matter-docs-deploy.enc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/soft-matter-docs-deploy.enc -------------------------------------------------------------------------------- /test_perf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | CURDIR=$(pwd) 4 | BASEDIR=$(readlink -f $(dirname $0 )) 5 | 6 | python "$BASEDIR"/vb_suite/test_perf.py $@ 7 | -------------------------------------------------------------------------------- /trackpy/__init__.py: -------------------------------------------------------------------------------- 1 | # Configure a logger from trackpy. 2 | # This must be done before utils is imported. 3 | import logging 4 | logger = logging.getLogger(__name__) 5 | 6 | 7 | from ._version import get_versions 8 | __version__ = get_versions()['version'] 9 | del get_versions 10 | 11 | from trackpy.api import * 12 | 13 | handle_logging() 14 | -------------------------------------------------------------------------------- /trackpy/api.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | from .find import percentile_threshold, grey_dilation 4 | from .motion import msd, imsd, emsd, compute_drift, subtract_drift, \ 5 | proximity, vanhove, relate_frames, velocity_corr, \ 6 | direction_corr, is_typical, diagonal_size 7 | from .static import proximity, pair_correlation_2d, pair_correlation_3d, \ 8 | cluster 9 | from .plots import annotate, annotate3d, plot_traj, ptraj, \ 10 | plot_displacements, subpx_bias, mass_size, mass_ecc, \ 11 | scatter, scatter3d, plot_traj3d, ptraj3d, plot_density_profile 12 | from .linking import (link, link_df, link_iter, link_df_iter, 13 | find_link, find_link_iter, link_partial, 14 | reconnect_traj_patch, 15 | SubnetOversizeException, UnknownLinkingError) 16 | from .filtering import filter_stubs, filter_clusters, filter 17 | from .feature import locate, batch, local_maxima, \ 18 | estimate_mass, estimate_size, minmass_v03_change, minmass_v04_change 19 | from .preprocessing import bandpass, invert_image 20 | from .framewise_data import FramewiseData, PandasHDFStore, PandasHDFStoreBig, \ 21 | PandasHDFStoreSingleNode 22 | from .locate_functions import locate_brightfield_ring 23 | from .refine import refine_com, refine_leastsq 24 | from . import predict 25 | from . import utils 26 | from . import artificial 27 | from .utils import handle_logging, ignore_logging, quiet 28 | from .try_numba import try_numba_jit, enable_numba, disable_numba 29 | -------------------------------------------------------------------------------- /trackpy/diag.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import importlib 3 | from collections import OrderedDict 4 | 5 | from . import try_numba 6 | from . import preprocessing 7 | from . import __version__ 8 | 9 | 10 | def performance_report(): 11 | """Display summary of which optional speedups are installed/enabled""" 12 | print("Yes, but could it be faster?") 13 | if try_numba.NUMBA_AVAILABLE: 14 | print("FAST: numba is available and enabled " 15 | "(fast subnets and feature-finding).") 16 | else: 17 | print("SLOW: numba was not found") 18 | 19 | 20 | def dependencies(): 21 | """ 22 | Give the version of each of the dependencies -- useful for bug reports. 23 | 24 | Returns 25 | ------- 26 | result : dict 27 | mapping the name of each package to its version string or, if an 28 | optional dependency is not installed, None 29 | """ 30 | packages = ['numpy', 'scipy', 'matplotlib', 'pandas', 31 | 'sklearn', 'pyyaml', 'tables', 'numba', 'pims'] 32 | result = OrderedDict() 33 | 34 | # trackpy itself comes first 35 | result['trackpy'] = __version__ 36 | 37 | for package_name in packages: 38 | try: 39 | package = importlib.import_module(package_name) 40 | except ImportError: 41 | result[package_name] = None 42 | else: 43 | result[package_name] = package.__version__ 44 | 45 | # Build Python version string 46 | version_info = sys.version_info 47 | version_string = '.'.join(map(str, [version_info[0], version_info[1], 48 | version_info[2]])) 49 | result['python'] = version_string 50 | 51 | return result 52 | -------------------------------------------------------------------------------- /trackpy/filtering.py: -------------------------------------------------------------------------------- 1 | """Simple functions that eliminate spurrious trajectories 2 | by wrapping pandas group-by and filter capabilities.""" 3 | 4 | __all__ = ['filter_stubs', 'filter_clusters', 'filter'] 5 | 6 | 7 | def filter_stubs(tracks, threshold=100): 8 | """Filter out trajectories with few points. They are often spurious. 9 | 10 | Parameters 11 | ---------- 12 | tracks : DataFrame 13 | must include columns named 'frame' and 'particle' 14 | threshold : integer, default 100 15 | minimum number of points (video frames) to survive 16 | 17 | Returns 18 | ------- 19 | a subset of tracks 20 | """ 21 | try: 22 | tracks['frame'] 23 | tracks['particle'] 24 | except KeyError: 25 | raise ValueError("Tracks must contain columns 'frame' and 'particle'.") 26 | grouped = tracks.reset_index(drop=True).groupby('particle') 27 | filtered = grouped.filter(lambda x: x.frame.count() >= threshold) 28 | return filtered.set_index('frame', drop=False) 29 | 30 | 31 | def filter_clusters(tracks, quantile=0.8, threshold=None): 32 | """Filter out trajectories with a mean particle size above a given quantile. 33 | 34 | Parameters 35 | ---------- 36 | tracks : DataFrame 37 | must include columns named 'particle' and 'size' 38 | quantile : number between 0 and 1 39 | quantile of particle 'size' above which to cut off 40 | threshold : number 41 | If specified, ignore quantile. 42 | 43 | Returns 44 | ------- 45 | a subset of tracks 46 | """ 47 | try: 48 | tracks['frame'] 49 | tracks['particle'] 50 | except KeyError: 51 | raise ValueError("Tracks must contain columns 'frame' and 'particle'.") 52 | if threshold is None: 53 | threshold = tracks['size'].quantile(quantile) 54 | 55 | f = lambda x: x['size'].mean() < threshold # filtering function 56 | grouped = tracks.reset_index(drop=True).groupby('particle') 57 | filtered = grouped.filter(f) 58 | return filtered.set_index('frame', drop=False) 59 | 60 | 61 | def filter(tracks, condition_func): 62 | """A workaround for a bug in pandas 0.12 63 | 64 | Parameters 65 | ---------- 66 | tracks : DataFrame 67 | must include column named 'particle' 68 | condition_func : function 69 | The function is applied to each group of data. It must 70 | return True or False. 71 | 72 | Returns 73 | ------- 74 | DataFrame 75 | a subset of tracks 76 | """ 77 | grouped = tracks.reset_index(drop=True).groupby('particle') 78 | filtered = grouped.filter(condition_func) 79 | return filtered.set_index('frame', drop=False) 80 | 81 | 82 | bust_ghosts = filter_stubs 83 | bust_clusters = filter_clusters 84 | -------------------------------------------------------------------------------- /trackpy/find.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | import logging 3 | 4 | import numpy as np 5 | import pandas as pd 6 | from scipy import ndimage 7 | from scipy.spatial import cKDTree 8 | 9 | from .utils import validate_tuple 10 | from .masks import binary_mask 11 | from .preprocessing import convert_to_int 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | def where_close(pos, separation, intensity=None): 17 | """ Returns indices of features that are closer than separation from other 18 | features. When intensity is given, the one with the lowest intensity is 19 | returned: else the most topleft is returned (to avoid randomness)""" 20 | if len(pos) == 0: 21 | return [] 22 | separation = validate_tuple(separation, pos.shape[1]) 23 | if any([s == 0 for s in separation]): 24 | return [] 25 | # Rescale positions, so that pairs are identified below a distance 26 | # of 1. 27 | if isinstance(pos, pd.DataFrame): 28 | pos_rescaled = pos.values / separation 29 | else: 30 | pos_rescaled = pos / separation 31 | duplicates = cKDTree(pos_rescaled, 30).query_pairs(1 - 1e-7) 32 | if len(duplicates) == 0: 33 | return [] 34 | index_0 = np.fromiter((x[0] for x in duplicates), dtype=int) 35 | index_1 = np.fromiter((x[1] for x in duplicates), dtype=int) 36 | if intensity is None: 37 | to_drop = np.where(np.sum(pos_rescaled[index_0], 1) > 38 | np.sum(pos_rescaled[index_1], 1), 39 | index_1, index_0) 40 | else: 41 | intensity = np.asarray(intensity) 42 | intensity_0 = intensity[index_0] 43 | intensity_1 = intensity[index_1] 44 | to_drop = np.where(intensity_0 > intensity_1, index_1, index_0) 45 | edge_cases = intensity_0 == intensity_1 46 | if np.any(edge_cases): 47 | index_0 = index_0[edge_cases] 48 | index_1 = index_1[edge_cases] 49 | to_drop[edge_cases] = np.where(np.sum(pos_rescaled[index_0], 1) > 50 | np.sum(pos_rescaled[index_1], 1), 51 | index_1, index_0) 52 | return np.unique(to_drop) 53 | 54 | 55 | def drop_close(pos, separation, intensity=None): 56 | """ Removes features that are closer than separation from other features. 57 | When intensity is given, the one with the lowest intensity is dropped: 58 | else the most topleft is dropped (to avoid randomness)""" 59 | to_drop = where_close(pos, separation, intensity) 60 | return np.delete(pos, to_drop, axis=0) 61 | 62 | 63 | def percentile_threshold(image, percentile): 64 | """Find grayscale threshold based on distribution in image.""" 65 | 66 | not_black = image[np.nonzero(image)] 67 | if len(not_black) == 0: 68 | return np.nan 69 | return np.percentile(not_black, percentile) 70 | 71 | 72 | def grey_dilation(image, separation, percentile=64, margin=None, precise=True): 73 | """Find local maxima whose brightness is above a given percentile. 74 | 75 | Parameters 76 | ---------- 77 | image : ndarray 78 | For best performance, provide an integer-type array. If the type is not 79 | of integer-type, the image will be normalized and coerced to uint8. 80 | separation : number or tuple of numbers 81 | Minimum separation between maxima. See precise for more information. 82 | percentile : float in range of [0,100], optional 83 | Features must have a peak brighter than pixels in this percentile. 84 | This helps eliminate spurious peaks. Default 64. 85 | margin : integer or tuple of integers, optional 86 | Zone of exclusion at edges of image. Default is ``separation / 2``. 87 | precise : boolean, optional 88 | Determines whether there will be an extra filtering step (``drop_close``) 89 | discarding features that are too close. Degrades performance. 90 | Because of the square kernel used, too many features are returned when 91 | precise=False. Default True. 92 | 93 | See Also 94 | -------- 95 | drop_close : removes features that are too close to brighter features 96 | grey_dilation_legacy : local maxima finding routine used until trackpy v0.3 97 | """ 98 | # convert to integer. does nothing if image is already of integer type 99 | factor, image = convert_to_int(image, dtype=np.uint8) 100 | 101 | ndim = image.ndim 102 | separation = validate_tuple(separation, ndim) 103 | if margin is None: 104 | margin = tuple([int(s / 2) for s in separation]) 105 | 106 | # Compute a threshold based on percentile. 107 | threshold = percentile_threshold(image, percentile) 108 | if np.isnan(threshold): 109 | warnings.warn("Image is completely black.", UserWarning) 110 | return np.empty((0, ndim)) 111 | 112 | # Find the largest box that fits inside the ellipse given by separation 113 | size = [int(2 * s / np.sqrt(ndim)) for s in separation] 114 | 115 | # The intersection of the image with its dilation gives local maxima. 116 | dilation = ndimage.grey_dilation(image, size, mode='constant') 117 | maxima = (image == dilation) & (image > threshold) 118 | if np.sum(maxima) == 0: 119 | warnings.warn("Image contains no local maxima.", UserWarning) 120 | return np.empty((0, ndim)) 121 | 122 | pos = np.vstack(np.where(maxima)).T 123 | 124 | # Do not accept peaks near the edges. 125 | shape = np.array(image.shape) 126 | near_edge = np.any((pos < margin) | (pos > (shape - margin - 1)), 1) 127 | pos = pos[~near_edge] 128 | 129 | if len(pos) == 0: 130 | warnings.warn("All local maxima were in the margins.", UserWarning) 131 | return np.empty((0, ndim)) 132 | 133 | # Remove local maxima that are too close to each other 134 | if precise: 135 | pos = drop_close(pos, separation, image[maxima][~near_edge]) 136 | 137 | return pos 138 | 139 | 140 | def grey_dilation_legacy(image, separation, percentile=64, margin=None): 141 | """Find local maxima whose brightness is above a given percentile. 142 | 143 | Parameters 144 | ---------- 145 | separation : minimum separation between maxima 146 | percentile : chooses minimum greyscale value for a local maximum 147 | margin : zone of exclusion at edges of image. Defaults to radius. 148 | A smarter value is set by locate(). 149 | 150 | See Also 151 | -------- 152 | grey_dilation : faster local maxima finding routine 153 | """ 154 | if margin is None: 155 | margin = separation 156 | 157 | ndim = image.ndim 158 | # Compute a threshold based on percentile. 159 | threshold = percentile_threshold(image, percentile) 160 | if np.isnan(threshold): 161 | warnings.warn("Image is completely black.", UserWarning) 162 | return np.empty((0, ndim)) 163 | 164 | if not np.issubdtype(image.dtype, np.integer): 165 | factor = 255 / image.max() 166 | image = (factor * image.clip(min=0.)).astype(np.uint8) 167 | 168 | # The intersection of the image with its dilation gives local maxima 169 | footprint = binary_mask(separation, ndim) 170 | dilation = ndimage.grey_dilation(image, footprint=footprint, 171 | mode='constant') 172 | maxima = np.vstack(np.where((image == dilation) & (image > threshold))).T 173 | if not np.size(maxima) > 0: 174 | warnings.warn("Image contains no local maxima.", UserWarning) 175 | return np.empty((0, ndim)) 176 | 177 | # Do not accept peaks near the edges. 178 | shape = np.array(image.shape) 179 | near_edge = np.any((maxima < margin) | (maxima > (shape - margin - 1)), 1) 180 | maxima = maxima[~near_edge] 181 | if not np.size(maxima) > 0: 182 | warnings.warn("All local maxima were in the margins.", UserWarning) 183 | 184 | # Return coords in as a numpy array shaped so it can be passed directly 185 | # to the DataFrame constructor. 186 | return maxima 187 | -------------------------------------------------------------------------------- /trackpy/framewise_data.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from abc import ABCMeta, abstractmethod, abstractproperty 4 | import warnings 5 | 6 | import pandas as pd 7 | 8 | from .utils import pandas_concat 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class FramewiseData: 14 | "Abstract base class defining a data container with framewise access." 15 | 16 | __metaclass__ = ABCMeta 17 | 18 | @abstractmethod 19 | def put(self, df): 20 | pass 21 | 22 | @abstractmethod 23 | def get(self, frame_no): 24 | pass 25 | 26 | @abstractproperty 27 | def frames(self): 28 | pass 29 | 30 | @abstractmethod 31 | def close(self): 32 | pass 33 | 34 | @abstractproperty 35 | def t_column(self): 36 | pass 37 | 38 | def __getitem__(self, frame_no): 39 | return self.get(frame_no) 40 | 41 | def __len__(self): 42 | return len(self.frames) 43 | 44 | def dump(self, N=None): 45 | """Return data from all, or the first N, frames in a single DataFrame 46 | 47 | Parameters 48 | ---------- 49 | N : integer 50 | optional; if None, return all frames 51 | 52 | Returns 53 | ------- 54 | DataFrame 55 | """ 56 | if N is None: 57 | return pandas_concat(iter(self)) 58 | else: 59 | i = iter(self) 60 | return pandas_concat(next(i) for _ in range(N)) 61 | 62 | @property 63 | def max_frame(self): 64 | return max(self.frames) 65 | 66 | def _validate(self, df): 67 | if self.t_column not in df.columns: 68 | raise ValueError("Cannot write frame without a column " 69 | "called {}".format(self.t_column)) 70 | if df[self.t_column].nunique() != 1: 71 | raise ValueError("Found multiple values for 'frame'. " 72 | "Write one frame at a time.") 73 | 74 | def __iter__(self): 75 | return self._build_generator() 76 | 77 | def _build_generator(self): 78 | for frame_no in self.frames: 79 | yield self.get(frame_no) 80 | 81 | def __enter__(self): 82 | return self 83 | 84 | def __exit__(self, type, value, traceback): 85 | self.close() 86 | 87 | KEY_PREFIX = 'Frame_' 88 | len_key_prefix = len(KEY_PREFIX) 89 | 90 | 91 | def code_key(frame_no): 92 | "Turn the frame_no into a 'natural name' string idiomatic of HDFStore" 93 | key = '{}{}'.format(KEY_PREFIX, frame_no) 94 | return key 95 | 96 | 97 | def decode_key(key): 98 | frame_no = int(key[len_key_prefix:]) 99 | return frame_no 100 | 101 | 102 | class PandasHDFStore(FramewiseData): 103 | """An interface to an HDF5 file with framewise access, using pandas. 104 | 105 | Save each frame's data to a node in a pandas HDFStore. 106 | 107 | Any additional keyword arguments to the constructor are passed to 108 | pandas.HDFStore(). 109 | """ 110 | 111 | def __init__(self, filename, mode='a', t_column='frame', **kwargs): 112 | self.filename = os.path.abspath(filename) 113 | self._t_column = t_column 114 | self.store = pd.HDFStore(self.filename, mode, **kwargs) 115 | 116 | @property 117 | def t_column(self): 118 | return self._t_column 119 | 120 | @property 121 | def max_frame(self): 122 | return max(self.frames) 123 | 124 | def put(self, df): 125 | if len(df) == 0: 126 | warnings.warn('An empty DataFrame was passed to put(). Continuing.') 127 | return 128 | frame_no = df[self.t_column].values[0] # validated to be all the same 129 | key = code_key(frame_no) 130 | # Store data as tabular instead of fixed-format. 131 | # Make sure remove any prexisting data, so don't really 'append'. 132 | try: 133 | self.store.remove(key) 134 | except KeyError: 135 | pass 136 | self.store.put(key, df, format='table') 137 | 138 | def get(self, frame_no): 139 | key = code_key(frame_no) 140 | frame = self.store.get(key) 141 | return frame 142 | 143 | @property 144 | def frames(self): 145 | """Returns sorted list of integer frame numbers in file""" 146 | return self._get_frame_nos() 147 | 148 | def _get_frame_nos(self): 149 | """Returns sorted list of integer frame numbers in file""" 150 | # Pandas' store.keys() scans the entire file looking for stored Pandas 151 | # structures. This is very slow for large numbers of frames. 152 | # Instead, scan the root level of the file for nodes with names 153 | # matching our scheme; we know they are DataFrames. 154 | r = [decode_key(key) for key in self.store.root._v_children.keys() if 155 | key.startswith(KEY_PREFIX)] 156 | r.sort() 157 | return r 158 | 159 | def close(self): 160 | self.store.close() 161 | 162 | 163 | class PandasHDFStoreBig(PandasHDFStore): 164 | """Like PandasHDFStore, but keeps a cache of frame numbers. 165 | 166 | This can give a large performance boost when a file contains thousands 167 | of frames. 168 | 169 | If a file was made in PandasHDFStore, opening it with this class 170 | and then closing it will add a cache (if mode != 'r'). 171 | 172 | Any additional keyword arguments to the constructor are passed to 173 | pandas.HDFStore(). 174 | """ 175 | 176 | def __init__(self, filename, mode='a', t_column='frame', **kwargs): 177 | self._CACHE_NAME = '_Frames_Cache' 178 | self._frames_cache = None 179 | self._cache_dirty = False # Whether _frames_cache needs to be written out 180 | super().__init__(filename, mode, t_column, 181 | **kwargs) 182 | 183 | @property 184 | def frames(self): 185 | # Hit memory cache, then disk cache 186 | if self._frames_cache is not None: 187 | return self._frames_cache 188 | else: 189 | try: 190 | self._frames_cache = list(self.store[self._CACHE_NAME].index.values) 191 | self._cache_dirty = False 192 | except KeyError: 193 | self._frames_cache = self._get_frame_nos() 194 | self._cache_dirty = True # In memory, but not in file 195 | return self._frames_cache 196 | 197 | def put(self, df): 198 | self._invalidate_cache() 199 | super().put(df) 200 | 201 | def rebuild_cache(self): 202 | """Delete cache on disk and rebuild it.""" 203 | self._invalidate_cache() 204 | _ = self.frames # Compute cache 205 | self._flush_cache() 206 | 207 | def _invalidate_cache(self): 208 | self._frames_cache = None 209 | try: 210 | del self.store[self._CACHE_NAME] 211 | except KeyError: pass 212 | 213 | def _flush_cache(self): 214 | """Writes frame cache if dirty and file is writable.""" 215 | if (self._frames_cache is not None and self._cache_dirty 216 | and self.store.root._v_file._iswritable()): 217 | self.store[self._CACHE_NAME] = pd.DataFrame({'dummy': 1}, 218 | index=self._frames_cache) 219 | self._cache_dirty = False 220 | 221 | def close(self): 222 | """Updates cache, writes if necessary, then closes file.""" 223 | if self.store.root._v_file._iswritable(): 224 | _ = self.frames # Compute cache 225 | self._flush_cache() 226 | super().close() 227 | 228 | 229 | class PandasHDFStoreSingleNode(FramewiseData): 230 | """An interface to an HDF5 file with framewise access, 231 | using pandas, that is faster for cross-frame queries. 232 | 233 | This implementation is more complex than PandasHDFStore, 234 | but it simplifies (speeds up?) cross-frame queries, 235 | like queries for a single probe's entire trajectory. 236 | 237 | Any additional keyword arguments to the constructor are passed to 238 | pandas.HDFStore(). 239 | """ 240 | 241 | def __init__(self, filename, key='FrameData', mode='a', t_column='frame', 242 | use_tabular_copy=False, **kwargs): 243 | self.filename = os.path.abspath(filename) 244 | self.key = key 245 | self._t_column = t_column 246 | self.store = pd.HDFStore(self.filename, mode, **kwargs) 247 | 248 | store = pd.HDFStore(self.filename) 249 | try: 250 | store[self.key] 251 | except KeyError: 252 | pass 253 | else: 254 | self._validate_node(use_tabular_copy) 255 | store.close() 256 | 257 | @property 258 | def t_column(self): 259 | return self._t_column 260 | 261 | def put(self, df): 262 | if len(df) == 0: 263 | warnings.warn('An empty DataFrame was passed to put(). Continuing.') 264 | return 265 | self._validate(df) 266 | self.store.append(self.key, df, data_columns=True) 267 | 268 | def get(self, frame_no): 269 | frame = self.store.select(self.key, '{} == {}'.format( 270 | self._t_column, frame_no)) 271 | return frame 272 | 273 | def dump(self, N=None): 274 | """Return data from all, or the first N, frames in a single DataFrame 275 | 276 | Parameters 277 | ---------- 278 | N : integer 279 | optional; if None, return all frames 280 | 281 | Returns 282 | ------- 283 | DataFrame 284 | """ 285 | if N is None: 286 | return self.store.select(self.key) 287 | else: 288 | Nth_frame = self.frames[N - 1] 289 | return self.store.select(self.key, '{} <= {}'.format( 290 | self._t_column, Nth_frame)) 291 | 292 | def close(self): 293 | self.store.close() 294 | 295 | def __del__(self): 296 | if hasattr(self, 'store'): 297 | self.close() 298 | 299 | @property 300 | def frames(self): 301 | """Returns sorted list of integer frame numbers in file""" 302 | # I assume one column can fit in memory, which is not ideal. 303 | # Chunking does not seem to be implemented for select_column. 304 | frame_nos = self.store.select_column(self.key, self.t_column).unique() 305 | frame_nos.sort() 306 | return frame_nos 307 | 308 | def _validate_node(self, use_tabular_copy): 309 | # The HDFStore might be non-tabular, which means we cannot select a 310 | # subset, and this whole structure will not work. 311 | # For convenience, this can rewrite the table into a tabular node. 312 | if use_tabular_copy: 313 | self.key = _make_tabular_copy(self.filename, self.key) 314 | 315 | pandas_type = getattr(getattr(getattr( 316 | self.store._handle.root, self.key, None), '_v_attrs', None), 317 | 'pandas_type', None) 318 | if not pandas_type == 'frame_table': 319 | raise ValueError("This node is not tabular. Call with " 320 | "use_tabular_copy=True to proceed.") 321 | 322 | 323 | def _make_tabular_copy(store, key): 324 | """Copy the contents nontabular node in a pandas HDFStore 325 | into a tabular node""" 326 | tabular_key = key + '/tabular' 327 | logger.info("Making a tabular copy of %s at %s", (key, tabular_key)) 328 | store.append(tabular_key, store.get(key), data_columns=True) 329 | return tabular_key 330 | -------------------------------------------------------------------------------- /trackpy/linking/__init__.py: -------------------------------------------------------------------------------- 1 | from .linking import (link, link_df, link_iter, link_df_iter, 2 | logger, Linker, adaptive_link_wrap) 3 | from .partial import link_partial, reconnect_traj_patch 4 | from .find_link import find_link, find_link_iter 5 | from .utils import verify_integrity, SubnetOversizeException, TrackUnstored, \ 6 | Point, UnknownLinkingError 7 | from . import legacy, subnet, subnetlinker, find_link, linking, utils 8 | -------------------------------------------------------------------------------- /trackpy/linking/partial.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import warnings 3 | import logging 4 | 5 | import numpy as np 6 | 7 | from ..utils import guess_pos_columns, validate_tuple, pandas_sort 8 | from .linking import link_iter 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | def coords_from_df_partial(df, pos_columns, t_column, link_frame_nos): 14 | for frame_no in link_frame_nos: 15 | yield frame_no, df.loc[df[t_column] == frame_no, pos_columns].values 16 | 17 | 18 | def link_partial(f, search_range, link_range, 19 | pos_columns=None, t_column='frame', **kwargs): 20 | """Patch the trajectories in a DataFrame by linking only a range of frames 21 | 22 | A dataset can be divided into several patches (time intervals) and 23 | linked separately with this function, for e.g. parallel processing. The 24 | results can then be stitched back together with reconnect_traj_patch(). 25 | 26 | Another application is to link a portion of a dataset again with adjusted 27 | parameters. 28 | 29 | Parameters 30 | ---------- 31 | f : DataFrame 32 | The DataFrame must include any number of column(s) for position and a 33 | column of frame numbers. By default, 'x' and 'y' are expected for 34 | position, and 'frame' is expected for frame number. See below for 35 | options to use custom column names. 36 | search_range : float or tuple 37 | the maximum distance features can move between frames, 38 | optionally per dimension 39 | link_range : tuple of 2 ints 40 | Only frames in the range(start, stop) will be analyzed. 41 | memory : integer, optional 42 | the maximum number of frames during which a feature can vanish, 43 | then reappear nearby, and be considered the same particle. 0 by default. 44 | pos_columns : list of str, optional 45 | Default is ['y', 'x'], or ['z', 'y', 'x'] when 'z' is present in f 46 | t_column : str, optional 47 | Default is 'frame' 48 | predictor : function, optional 49 | Improve performance by guessing where a particle will be in 50 | the next frame. 51 | For examples of how this works, see the "predict" module. 52 | adaptive_stop : float, optional 53 | If not None, when encountering an oversize subnet, retry by progressively 54 | reducing search_range until the subnet is solvable. If search_range 55 | becomes <= adaptive_stop, give up and raise a SubnetOversizeException. 56 | adaptive_step : float, optional 57 | Reduce search_range by multiplying it by this factor. 58 | link_strategy : {'recursive', 'nonrecursive', 'numba', 'drop', 'auto'} 59 | algorithm used to resolve subnetworks of nearby particles 60 | 'auto' uses numba if available 61 | 'drop' causes particles in subnetworks to go unlinked 62 | 63 | Returns 64 | ------- 65 | DataFrame with added column 'particle' containing trajectory labels. 66 | The t_column (by default: 'frame') will be coerced to integer. 67 | 68 | Notes 69 | ----- 70 | The memory option cannot retain particles at the boundaries between 71 | patches. Likewise, if a predictor depends on past trajectories, it may 72 | work poorly at the start of each patch. 73 | 74 | Examples 75 | -------- 76 | We purposely define a DataFrame with erroneously linked particles: 77 | 78 | >>> df = pd.DataFrame({ 79 | ... 'x': [5., 10., 15., 20., 10., 14., 19.], 80 | ... 'y': [0., 0., 0, 0., 2., 4., 6.], 81 | ... 'particle': [5, 5, 8, 8, 8, 5, 5], 82 | ... 'frame': [0, 1, 2, 3, 1, 2, 3] 83 | ... }) 84 | 85 | We can fix the missed link at frame 1 by calling link_partial like so: 86 | 87 | >>> link_partial(df, search_range=20., link_range=(1, 2)) 88 | x y particle frame 89 | 0 5.0 0.0 5 0 90 | 1 10.0 0.0 5 1 91 | 4 10.0 2.0 8 1 92 | 2 15.0 0.0 5 2 93 | 5 14.0 4.0 8 2 94 | 3 20.0 0.0 5 3 95 | 6 19.0 6.0 8 3 96 | 97 | Note that this partial link remapped the particle indices after the 98 | link_range. This is done by the function ``reconnect_traj_patch`` 99 | 100 | See also 101 | -------- 102 | reconnect_traj_patch 103 | """ 104 | if pos_columns is None: 105 | pos_columns = guess_pos_columns(f) 106 | ndim = len(pos_columns) 107 | search_range = validate_tuple(search_range, ndim) 108 | if kwargs.get('memory', 0) > 0: 109 | warnings.warn("Particles are not memorized over patch edges.") 110 | 111 | full_range = (int(f[t_column].min()), int(f[t_column].max()) + 1) 112 | start, stop = link_range 113 | assert start < stop 114 | if start is None: 115 | start = full_range[0] 116 | elif start < full_range[0]: 117 | start = full_range[0] 118 | 119 | if stop is None: 120 | stop = full_range[1] 121 | elif stop > full_range[1]: 122 | stop = full_range[1] 123 | 124 | link_frame_nos = range(start, stop) 125 | 126 | # copy the dataframe 127 | f = f.copy() 128 | # coerce t_column to integer type 129 | if not np.issubdtype(f[t_column].dtype, np.integer): 130 | f[t_column] = f[t_column].astype(np.integer) 131 | # sort on the t_column 132 | pandas_sort(f, t_column, inplace=True) 133 | 134 | if 'particle' in f and (start > full_range[0] or stop < full_range[1]): 135 | f['_old_particle'] = f['particle'].copy() 136 | elif not 'particle' in f: 137 | f['particle'] = -1 138 | 139 | coords_iter = coords_from_df_partial(f, pos_columns, t_column, 140 | link_frame_nos) 141 | for i, _ids in link_iter(coords_iter, search_range, **kwargs): 142 | f.loc[f[t_column] == i, 'particle'] = _ids 143 | 144 | if '_old_particle' in f: 145 | reconnect_traj_patch(f, (start, stop), '_old_particle', t_column) 146 | f.drop('_old_particle', axis=1, inplace=True) 147 | 148 | return f 149 | 150 | 151 | def reconnect_traj_patch(f, link_range, old_particle_column, t_column='frame'): 152 | """Reconnect the trajectory inside a range of frames to the trajectories 153 | outside the range. 154 | 155 | Requires a column with the original particle indices. Does not work in 156 | combination with memory. Changes the provided DataFrame inplace. 157 | 158 | See also 159 | -------- 160 | link_partial 161 | """ 162 | start, stop = link_range 163 | assert start < stop 164 | mapping_patch = dict() 165 | mapping_after = dict() 166 | 167 | # reconnect at first frame_no 168 | for p_new, p_old in f.loc[f[t_column] == start, 169 | ['particle', old_particle_column]].values: 170 | if p_old < 0: 171 | continue 172 | # renumber the track inside the patch to the number before the patch 173 | mapping_patch[p_new] = p_old 174 | 175 | # reconnect at last frame_no 176 | for p_new, p_old in f.loc[f[t_column] == stop - 1, 177 | ['particle', old_particle_column]].values: 178 | if p_old < 0: 179 | continue 180 | if p_new in mapping_patch: 181 | # already connected to a track before patch: renumber after the 182 | # patch 183 | mapping_after[p_old] = mapping_patch[p_new] 184 | else: 185 | # the track is apparently created inside the patch: use the number 186 | # after the patch 187 | mapping_patch[p_new] = p_old 188 | 189 | # renumber possible doubles inside the patch 190 | # the following ids cannot be used as new ids inside the patch: 191 | in_patch = (f[t_column] >= start) & (f[t_column] < stop) 192 | remaining = set(f.loc[in_patch, 'particle'].values) - set(mapping_patch) 193 | if len(remaining) > 0: 194 | used = set(f.loc[~in_patch, 'particle'].values) 195 | gen_ids = itertools.filterfalse(lambda x: x in used, itertools.count()) 196 | 197 | for p_new, p_mapped in zip(remaining, gen_ids): 198 | mapping_patch[p_new] = p_mapped 199 | 200 | f.loc[in_patch, 'particle'] = f.loc[in_patch, 'particle'].replace(mapping_patch) 201 | f.loc[f[t_column] >= stop, 'particle'] = \ 202 | f.loc[f[t_column] >= stop, 'particle'].replace(mapping_after) 203 | -------------------------------------------------------------------------------- /trackpy/linking/utils.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | 3 | import numpy as np 4 | import pandas as pd 5 | 6 | 7 | class SubnetOversizeException(Exception): 8 | '''An :py:exc:`Exception` to be raised when the sub-nets are too big 9 | to be efficiently linked. If you get this then either reduce your search range 10 | or increase :py:attr:`Linker.MAX_SUB_NET_SIZE`''' 11 | pass 12 | 13 | 14 | class UnknownLinkingError(Exception): 15 | pass 16 | 17 | 18 | def points_to_arr(level): 19 | """ Convert a list of Points to an ndarray of coordinates """ 20 | return np.array([p.pos for p in level]) 21 | 22 | 23 | def points_from_arr(coords, frame_no, extra_data=None): 24 | """ Convert an ndarray of coordinates to a list of PointFindLink """ 25 | if extra_data is None: 26 | return [Point(frame_no, pos) for pos in coords] 27 | else: 28 | return [Point(frame_no, pos, extra_data={key: extra_data[key][i] 29 | for key in extra_data}) 30 | for i, pos in enumerate(coords)] 31 | 32 | 33 | def coords_from_df(df, pos_columns, t_column): 34 | """A generator that returns ndarrays of coords from a DataFrame. Assumes 35 | t_column to be of integer type. Float-typed integers are also accepted. 36 | 37 | Empty frames will be returned as empty arrays of shape (0, ndim).""" 38 | 39 | # This implementation is much faster than using DataFrame.groupby. 40 | 41 | ndim = len(pos_columns) 42 | times = df[t_column].values 43 | pos = df[pos_columns].values 44 | 45 | idxs = np.argsort(times, kind="mergesort") # i.e. stable 46 | times = times[idxs] 47 | pos = pos[idxs] 48 | 49 | unique_times, time_counts = np.unique(times, return_counts=True) 50 | pos_by_frame = np.split(pos, np.cumsum(time_counts)[:-1]) 51 | 52 | idx = 0 53 | for time in range(unique_times[0], unique_times[-1] + 1): 54 | if time == unique_times[idx]: 55 | yield time, pos_by_frame[idx] 56 | idx += 1 57 | else: 58 | yield time, np.empty((0, ndim)) 59 | 60 | 61 | def coords_from_df_iter(df_iter, pos_columns, t_column): 62 | """A generator that returns ndarrays of coords from a generator of 63 | DataFrames. Also returns the first value of the t_column.""" 64 | ndim = len(pos_columns) 65 | 66 | for df in df_iter: 67 | if len(df) == 0: 68 | yield None, np.empty((0, ndim)) 69 | else: 70 | yield df[t_column].iloc[0], df[pos_columns].values 71 | 72 | 73 | def verify_integrity(df): 74 | """Verifies that particle labels are unique for each frame, and that every 75 | particle is labeled.""" 76 | is_labeled = df['particle'] >= 0 77 | if not np.all(is_labeled): 78 | frames = df.loc[~is_labeled, 'frame'].unique() 79 | raise UnknownLinkingError("Some particles were not labeled " 80 | "in frames {}.".format(frames)) 81 | grouped = df.groupby('frame')['particle'] 82 | try: 83 | not_equal = grouped.nunique() != grouped.count() 84 | except AttributeError: # for older pandas versions 85 | not_equal = grouped.apply(lambda x: len(pd.unique(x))) != grouped.count() 86 | if np.any(not_equal): 87 | where_not_equal = not_equal.index[not_equal].values 88 | raise UnknownLinkingError("There are multiple particles with the same " 89 | "label in Frames {}.".format(where_not_equal)) 90 | 91 | 92 | class Point: 93 | ''' 94 | Base class for point (features) used in tracking. This class 95 | contains all of the general stuff for interacting with 96 | :py:class:`~trackpy.linking.Track` objects. 97 | 98 | 99 | .. note:: To be used for tracking this class must be sub-classed to provide 100 | a :py:meth:`distance` function. Child classes **MUST** call 101 | :py:meth:`Point.__init__`. (See :py:class:`~trackpy.linking.PointND` for 102 | example. ) 103 | ''' 104 | __slots__ = ['_track', 'uuid', 't', 'pos', 'id', 'extra_data', 105 | 'forward_cands', 'subnet', 'relocate_neighbors', '__dict__'] 106 | @classmethod 107 | def reset_counter(cls, c=0): 108 | cls.counter = itertools.count(c) 109 | 110 | def __init__(self, t, pos, id=None, extra_data=None): 111 | self._track = None 112 | self.uuid = next(self.counter) # unique id for __hash__ 113 | self.t = t 114 | self.pos = np.asarray(pos) 115 | self.id = id 116 | if extra_data is None: 117 | self.extra_data = dict() 118 | else: 119 | self.extra_data = extra_data 120 | # self.back_cands = [] 121 | self.forward_cands = [] 122 | self.subnet = None 123 | self.relocate_neighbors = [] 124 | 125 | # def __eq__(self, other): 126 | # return self.uuid == other.uuid 127 | 128 | # def __neq__(self, other): 129 | # return not self.__eq__(other) 130 | 131 | def add_to_track(self, track): 132 | ''' 133 | :param track: the track to assign to this :py:class:`Point` 134 | 135 | Sets the track of a :py:class:`Point` object. Raises 136 | :py:exc:`Exception` if the object is already assigned a track. 137 | 138 | 139 | 140 | ''' 141 | if self._track is not None: 142 | raise Exception("trying to add a particle already in a track") 143 | self._track = track 144 | 145 | def remove_from_track(self, track): 146 | ''' 147 | :param track: the track to disassociate from this :py:class:`Point` 148 | 149 | Removes this point from the given track. Raises :py:exc:`Exception` if 150 | particle not associated with the given track. 151 | 152 | 153 | ''' 154 | if self._track != track: 155 | raise Exception("Point not associated with given track") 156 | track.remove_point(self) 157 | 158 | def in_track(self): 159 | ''' 160 | :rtype: bool 161 | 162 | Returns if a point is associated with a track ''' 163 | return self._track is not None 164 | 165 | @property 166 | def track(self): 167 | """Returns the track that this :class:`Point` is in. May be `None` """ 168 | return self._track 169 | 170 | 171 | class TrackUnstored: 172 | """ 173 | Base class for objects to represent linked tracks. 174 | 175 | Includes logic for adding features to the track, but does 176 | not store the track's particles in memory. 177 | 178 | Parameters 179 | ---------- 180 | point : Point or None, optional 181 | The first feature in the track 182 | 183 | """ 184 | __slots__ = ['id', 'indx', '__dict__'] 185 | @classmethod 186 | def reset_counter(cls, c=0): 187 | cls.counter = itertools.count(c) 188 | 189 | def __init__(self, point=None): 190 | self.id = next(self.counter) 191 | self.indx = self.id # redundant, but like trackpy 192 | if point is not None: 193 | self.add_point(point) 194 | 195 | def add_point(self, point): 196 | point.add_to_track(self) 197 | 198 | def incr_memory(self): 199 | """Mark this track as being remembered for one more frame. 200 | 201 | For diagnostic purposes.""" 202 | try: 203 | self._remembered += 1 204 | except AttributeError: 205 | self._remembered = 1 206 | 207 | def report_memory(self): 208 | """Report and reset the memory counter (when a link is made). 209 | 210 | For diagnostic purposes.""" 211 | try: 212 | m = self._remembered 213 | del self._remembered 214 | return m 215 | except AttributeError: 216 | return 0 217 | 218 | def __repr__(self): 219 | return "<%s %d>" % (self.__class__.__name__, self.indx) 220 | -------------------------------------------------------------------------------- /trackpy/locate_functions/__init__.py: -------------------------------------------------------------------------------- 1 | from .brightfield_ring import locate_brightfield_ring 2 | -------------------------------------------------------------------------------- /trackpy/locate_functions/brightfield_ring.py: -------------------------------------------------------------------------------- 1 | """ 2 | Detect particles in brightfield mode by tracking a ring of dark pixels around a 3 | bright interior part. Based on https://github.com/caspervdw/circletracking 4 | """ 5 | 6 | import warnings 7 | import numpy as np 8 | from pandas import (DataFrame, concat) 9 | 10 | from ..find import (grey_dilation, where_close) 11 | from ..refine import (refine_brightfield_ring,) 12 | from ..utils import (validate_tuple, default_pos_columns, get_pool) 13 | from ..preprocessing import convert_to_int 14 | from ..feature import locate 15 | 16 | 17 | def locate_brightfield_ring(raw_image, diameter, separation=None, 18 | previous_coords=None, processes='auto', **kwargs): 19 | """Locate particles imaged in brightfield mode of some approximate size in 20 | an image. 21 | 22 | Preprocess the image by performing a band pass and a threshold. Locate all 23 | peaks of brightness, then find the particle position by fitting the ring of 24 | dark pixels around the bright inner part of the particle. 25 | 26 | Parameters 27 | ---------- 28 | raw_image : array 29 | any N-dimensional image 30 | diameter : odd integer or tuple of odd integers 31 | This may be a single number or a tuple giving the feature's 32 | extent in each dimension, useful when the dimensions do not have 33 | equal resolution (e.g. confocal microscopy). The tuple order is the 34 | same as the image shape, conventionally (z, y, x) or (y, x). The 35 | number(s) must be odd integers. When in doubt, round up. 36 | separation : float or tuple 37 | Minimum separtion between features. 38 | Default is diameter + 1. May be a tuple, see diameter for details. 39 | previous_coords : DataFrame([x, y, r]) 40 | Optional previous particle positions from the preceding frame to use as 41 | starting point for the refinement instead of the intensity peaks. 42 | processes : integer or "auto" 43 | The number of processes to use in parallel. If <= 1, multiprocessing is 44 | disabled. If "auto", the number returned by `os.cpu_count()`` is used. 45 | kwargs: 46 | Passed to the refine function. 47 | 48 | Returns 49 | ------- 50 | DataFrame([x, y, r]) 51 | where r means the radius of the fitted circle of dark pixels around 52 | the bright interior of the particle. 53 | 54 | See Also 55 | -------- 56 | refine_brightfield_ring : performs the refinement of the particle position 57 | 58 | Notes 59 | ----- 60 | Locate works with a coordinate system that has its origin at the center of 61 | pixel (0, 0). In almost all cases this will be the topleft pixel: the 62 | y-axis is pointing downwards. 63 | 64 | This is an implementation of an algorithm described in [1]_ 65 | 66 | References 67 | ---------- 68 | .. [1] M. Rinaldin, R.W. Verweij, I. Chakraborty and D.J. Kraft, Soft 69 | Matter, 2019, 15, 1345-1360, http://dx.doi.org/10.1039/C8SM01661E 70 | 71 | """ 72 | # Validate parameters and set defaults. 73 | raw_image = np.squeeze(raw_image) 74 | shape = raw_image.shape 75 | ndim = len(shape) 76 | 77 | diameter = validate_tuple(diameter, ndim) 78 | diameter = tuple([float(x) for x in diameter]) 79 | radius = tuple([x/2.0 for x in diameter]) 80 | 81 | is_float_image = not np.issubdtype(raw_image.dtype, np.integer) 82 | 83 | if separation is None: 84 | separation = tuple([x for x in diameter]) 85 | else: 86 | separation = validate_tuple(separation, ndim) 87 | 88 | # Check whether the image looks suspiciously like a color image. 89 | if 3 in shape or 4 in shape: 90 | dim = raw_image.ndim 91 | warnings.warn("I am interpreting the image as {}-dimensional. " 92 | "If it is actually a {}-dimensional color image, " 93 | "convert it to grayscale first.".format(dim, dim-1)) 94 | 95 | image = raw_image 96 | 97 | # For optimal performance, coerce the image dtype to integer. 98 | if is_float_image: # For float images, assume bitdepth of 8. 99 | dtype = np.uint8 100 | else: # For integer images, take original dtype 101 | dtype = raw_image.dtype 102 | 103 | # Normalize_to_int does nothing if image is already of integer type. 104 | _, image = convert_to_int(image, dtype) 105 | 106 | pos_columns = default_pos_columns(image.ndim) 107 | 108 | has_user_input = False 109 | if previous_coords is None or len(previous_coords) == 0: 110 | coords_df = locate(raw_image, diameter, separation=separation, 111 | characterize=False) 112 | coords_df = coords_df[pos_columns] 113 | else: 114 | coords_df = previous_coords 115 | has_user_input = True 116 | 117 | if len(coords_df) == 0: 118 | warnings.warn("No particles found in the image before refinement.") 119 | return coords_df 120 | 121 | pool, map_func = get_pool(processes) 122 | refined = [] 123 | 124 | try: 125 | for result in map_func(_get_refined_coords, [(coords, pos_columns, image, radius, kwargs, has_user_input) for _, coords in coords_df.iterrows()]): 126 | if result is None: 127 | continue 128 | refined.append(result) 129 | finally: 130 | if pool: 131 | # Ensure correct termination of Pool 132 | pool.terminate() 133 | 134 | columns = np.unique(np.concatenate((pos_columns, ['r'], coords_df.columns))) 135 | if len(refined) == 0: 136 | warnings.warn("No particles found in the image after refinement.") 137 | return DataFrame(columns=columns) 138 | 139 | refined = DataFrame.from_dict(refined, orient='columns') 140 | refined.reset_index(drop=True, inplace=True) 141 | 142 | # Flat peaks return multiple nearby maxima. Eliminate duplicates. 143 | if np.all(np.greater(separation, 0)): 144 | to_drop = where_close(refined[pos_columns], separation) 145 | refined.drop(to_drop, axis=0, inplace=True) 146 | refined.reset_index(drop=True, inplace=True) 147 | 148 | # If this is a pims Frame object, it has a frame number. 149 | # Tag it on; this is helpful for parallelization. 150 | if hasattr(raw_image, 'frame_no') and raw_image.frame_no is not None: 151 | refined['frame'] = int(raw_image.frame_no) 152 | 153 | return refined 154 | 155 | def _get_refined_coords(args): 156 | coords, pos_columns, image, radius, kwargs, has_user_input = args 157 | positions = coords[pos_columns] 158 | result = refine_brightfield_ring(image, radius, positions, 159 | pos_columns=pos_columns, **kwargs) 160 | if result is None: 161 | if has_user_input: 162 | warnings.warn(("Lost particle {:d} (x={:.0f}, y={:.0f})" + 163 | " after refinement.").format(int(coords['particle']), coords['x'], 164 | coords['y'])) 165 | return None 166 | 167 | # Make a copy of old coords and overwrite with result 168 | # In this way any extra columns from previous_coords are preserved 169 | new_coords = coords.copy() 170 | for column in result.index.tolist(): 171 | # make a new column if necessary, otherwise overwrite 172 | new_coords[column] = result.get(column) 173 | 174 | return new_coords 175 | 176 | -------------------------------------------------------------------------------- /trackpy/masks.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .utils import memo, validate_tuple 3 | 4 | __all__ = ['binary_mask', 'r_squared_mask', 'cosmask', 'sinmask', 5 | 'theta_mask'] 6 | 7 | 8 | @memo 9 | def binary_mask(radius, ndim): 10 | "Elliptical mask in a rectangular array" 11 | radius = validate_tuple(radius, ndim) 12 | points = [np.arange(-rad, rad + 1) for rad in radius] 13 | if len(radius) > 1: 14 | coords = np.array(np.meshgrid(*points, indexing="ij")) 15 | else: 16 | coords = np.array([points[0]]) 17 | r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)] 18 | return sum(r) <= 1 19 | 20 | 21 | @memo 22 | def N_binary_mask(radius, ndim): 23 | return np.sum(binary_mask(radius, ndim)) 24 | 25 | 26 | @memo 27 | def r_squared_mask(radius, ndim): 28 | "Mask with values r^2 inside radius and 0 outside" 29 | radius = validate_tuple(radius, ndim) 30 | points = [np.arange(-rad, rad + 1) for rad in radius] 31 | if len(radius) > 1: 32 | coords = np.array(np.meshgrid(*points, indexing="ij")) 33 | else: 34 | coords = np.array([points[0]]) 35 | r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)] 36 | r2 = np.sum(coords**2, 0).astype(int) 37 | r2[sum(r) > 1] = 0 38 | return r2 39 | 40 | 41 | @memo 42 | def x_squared_masks(radius, ndim): 43 | "Returns ndim masks with values x^2 inside radius and 0 outside" 44 | radius = validate_tuple(radius, ndim) 45 | points = [np.arange(-rad, rad + 1) for rad in radius] 46 | if len(radius) > 1: 47 | coords = np.array(np.meshgrid(*points, indexing="ij")) 48 | else: 49 | coords = np.array([points[0]]) 50 | r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)] 51 | masks = np.asarray(coords**2, dtype=int) 52 | masks[:, sum(r) > 1] = 0 53 | return masks 54 | 55 | 56 | @memo 57 | def theta_mask(radius): 58 | """Mask of values giving angular position relative to center. The angle is 59 | defined according to ISO standards in which the angle is measured counter- 60 | clockwise from the x axis, measured in a normal coordinate system with y- 61 | axis pointing up and x axis pointing right. 62 | 63 | In other words: for increasing angle, the coordinate moves counterclockwise 64 | around the feature center starting on the right side. 65 | 66 | However, in most images, the y-axis will point down so that the coordinate 67 | will appear to move clockwise around the feature center. 68 | """ 69 | # 2D only 70 | radius = validate_tuple(radius, 2) 71 | tan_of_coord = lambda y, x: np.arctan2(y - radius[0], x - radius[1]) 72 | return np.fromfunction(tan_of_coord, [r * 2 + 1 for r in radius]) 73 | 74 | 75 | @memo 76 | def sinmask(radius): 77 | "Sin of theta_mask" 78 | return np.sin(2*theta_mask(radius)) 79 | 80 | 81 | @memo 82 | def cosmask(radius): 83 | "Sin of theta_mask" 84 | return np.cos(2*theta_mask(radius)) 85 | 86 | 87 | @memo 88 | def gaussian_kernel(sigma, truncate=4.0): 89 | "1D discretized gaussian" 90 | lw = int(truncate * sigma + 0.5) 91 | x = np.arange(-lw, lw+1) 92 | result = np.exp(x**2/(-2*sigma**2)) 93 | return result / np.sum(result) 94 | 95 | 96 | def get_slice(coords, shape, radius): 97 | """Returns the slice and origin that belong to ``slice_image``""" 98 | # interpret parameters 99 | ndim = len(shape) 100 | radius = validate_tuple(radius, ndim) 101 | coords = np.atleast_2d(np.round(coords).astype(int)) 102 | # drop features that have no pixels inside the image 103 | in_bounds = np.array([(coords[:, i] >= -r) & (coords[:, i] < sh + r) 104 | for i, sh, r in zip(range(ndim), shape, radius)]) 105 | coords = coords[np.all(in_bounds, axis=0)] 106 | # return if no coordinates are left 107 | if len(coords) == 0: 108 | return tuple([slice(None, 0)] * ndim), None 109 | # calculate the box 110 | lower = coords.min(axis=0) - radius 111 | upper = coords.max(axis=0) + radius + 1 112 | # calculate the slices 113 | origin = [None] * ndim 114 | slices = [None] * ndim 115 | for i, sh, low, up in zip(range(ndim), shape, lower, upper): 116 | lower_bound_trunc = max(0, low) 117 | upper_bound_trunc = min(sh, up) 118 | slices[i] = slice(int(round(lower_bound_trunc)), 119 | int(round(upper_bound_trunc))) 120 | origin[i] = lower_bound_trunc 121 | return tuple(slices), origin 122 | 123 | 124 | def slice_image(pos, image, radius): 125 | """ Slice a box around a group of features from an image. 126 | 127 | The box is the smallest box that contains all coordinates up to `radius` 128 | from any coordinate. 129 | 130 | Parameters 131 | ---------- 132 | image : ndarray 133 | The image that will be sliced 134 | pos : iterable 135 | An iterable (e.g. list or ndarray) that contains the feature positions 136 | radius : number or tuple of numbers 137 | Defines the size of the slice. Every pixel that has a distance lower or 138 | equal to `radius` to a feature position is included. 139 | 140 | Returns 141 | ------- 142 | tuple of: 143 | - the sliced image 144 | - the coordinate of the slice origin (top-left pixel) 145 | """ 146 | slices, origin = get_slice(pos, image.shape, radius) 147 | return image[slices], origin 148 | 149 | 150 | def get_mask(pos, shape, radius, include_edge=True, return_masks=False): 151 | """ Create a binary mask that masks pixels farther than radius to all 152 | given feature positions. 153 | 154 | Optionally returns the masks that recover the individual feature pixels from 155 | a masked image, as follows: ``image[mask][masks_single[i]]`` 156 | 157 | Parameters 158 | ---------- 159 | pos : ndarray (N x 2 or N x 3) 160 | Feature positions 161 | shape : tuple 162 | The shape of the image 163 | radius : number or tuple 164 | Radius of the individual feature masks 165 | include_edge : boolean, optional 166 | Determine whether pixels at exactly one radius from a position are 167 | included. Default True. 168 | return_masks : boolean, optional 169 | Also return masks that recover the single features from a masked image. 170 | Default False. 171 | 172 | Returns 173 | ------- 174 | ndarray containing a binary mask 175 | if return_masks==True, returns a tuple of [masks, masks_singles] 176 | """ 177 | ndim = len(shape) 178 | radius = validate_tuple(radius, ndim) 179 | pos = np.atleast_2d(pos) 180 | 181 | if include_edge: 182 | in_mask = [np.sum(((np.indices(shape).T - p) / radius)**2, -1) <= 1 183 | for p in pos] 184 | else: 185 | in_mask = [np.sum(((np.indices(shape).T - p) / radius)**2, -1) < 1 186 | for p in pos] 187 | mask_total = np.any(in_mask, axis=0).T 188 | if return_masks: 189 | masks_single = np.empty((len(pos), mask_total.sum()), dtype=bool) 190 | for i, _in_mask in enumerate(in_mask): 191 | masks_single[i] = _in_mask.T[mask_total] 192 | return mask_total, masks_single 193 | else: 194 | return mask_total 195 | 196 | 197 | def mask_image(pos, image, radius, origin=None, invert=False, 198 | include_edge=None): 199 | """ Masks an image so that pixels farther than radius to all given feature 200 | positions become 0. 201 | 202 | Parameters 203 | ---------- 204 | pos : ndarray 205 | Feature positions (N x 2 or N x 3) 206 | image : ndarray 207 | radius : number or tuple 208 | Radius of the individual feature masks 209 | origin : tuple, optional 210 | The topleft coordinate (origin) of the image. 211 | invert : boolean, optional 212 | If invert==True, the features instead of the background will become 0. 213 | include_edge : boolean, optional 214 | Determine whether pixels at exactly one radius from a position are 215 | included in the feature mask. 216 | Defaults to True if invert==False, and to False if invert==True. 217 | """ 218 | if origin is not None: 219 | pos = np.atleast_2d(pos) - np.array(origin)[np.newaxis, :] 220 | 221 | if include_edge is None: 222 | include_edge = not invert 223 | 224 | mask_cluster = get_mask(pos, image.shape, radius, include_edge=include_edge) 225 | 226 | if invert: 227 | mask_cluster = ~mask_cluster 228 | 229 | return image * mask_cluster.astype(np.uint8) 230 | -------------------------------------------------------------------------------- /trackpy/refine/__init__.py: -------------------------------------------------------------------------------- 1 | from .center_of_mass import refine_com, refine_com_arr 2 | from .least_squares import refine_leastsq 3 | from .brightfield_ring import refine_brightfield_ring 4 | -------------------------------------------------------------------------------- /trackpy/tests/README.rst: -------------------------------------------------------------------------------- 1 | Testing 2 | ======= 3 | 4 | This suite of tests verifies that everything is working as expected. There are unit tests, checking for sanity. There are systems tests, running multistep calculations to reproduce the result of a control experiment such as the viscosity of water. (So far, that's the only one.) 5 | 6 | Tests should be run using unittest_. 7 | 8 | .. _unittest: https://docs.python.org/3/library/unittest.html 9 | -------------------------------------------------------------------------------- /trackpy/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/__init__.py -------------------------------------------------------------------------------- /trackpy/tests/common.py: -------------------------------------------------------------------------------- 1 | import os, glob 2 | import unittest 3 | import numpy as np 4 | from numpy.testing import assert_allclose, assert_equal, assert_array_equal 5 | import trackpy as tp 6 | from scipy.spatial import cKDTree 7 | from trackpy.utils import pandas_sort, make_pandas_strict 8 | from trackpy.artificial import SimulatedImage 9 | from matplotlib.pyplot import imread 10 | 11 | 12 | class StrictTestCase(unittest.TestCase): 13 | @classmethod 14 | def setUpClass(cls): 15 | # Suppress logging messages 16 | tp.quiet() 17 | # Catch attempts to set values on an inadvertent copy of a Pandas object. 18 | make_pandas_strict() 19 | # Make numpy strict 20 | np.seterr('raise') 21 | 22 | 23 | def sort_positions(actual, expected): 24 | tree = cKDTree(actual) 25 | deviations, argsort = tree.query([expected]) 26 | if len(set(range(len(actual))) - set(argsort[0])) > 0: 27 | raise AssertionError("Position sorting failed. At least one feature is " 28 | "very far from where it should be.") 29 | return deviations, actual[argsort][0] 30 | 31 | 32 | def assert_coordinates_close(actual, expected, atol): 33 | assert_equal(len(actual), len(expected)) 34 | _, sorted_actual = sort_positions(actual, expected) 35 | assert_allclose(sorted_actual, expected, atol=atol) 36 | 37 | 38 | def assert_traj_equal(actual, expected, pos_atol=1): 39 | assert_equal(len(actual), len(expected)) 40 | actual = pandas_sort(actual, 'frame').reset_index(drop=True) 41 | expected = pandas_sort(expected, 'frame').reset_index(drop=True) 42 | actual_order = [] 43 | for frame_no in expected['frame'].unique(): 44 | actual_f = actual[actual['frame'] == frame_no] 45 | expected_f = expected[expected['frame'] == frame_no] 46 | assert_equal(len(actual_f), len(expected_f), 47 | err_msg='Actual and expected numbers of features ' 48 | 'differ in frame %i' % frame_no) 49 | tree = cKDTree(actual_f[['y', 'x']].values) 50 | devs, argsort = tree.query(expected_f[['y', 'x']].values) 51 | assert_allclose(devs, 0., atol=pos_atol) 52 | actual_order.extend(actual_f.index[argsort].tolist()) 53 | 54 | actual = actual.loc[actual_order].reset_index(drop=True, inplace=False) 55 | for p_actual in actual.particle.unique(): 56 | actual_ind = actual.index[actual['particle'] == p_actual] 57 | p_expected = expected.loc[actual_ind[0], 'particle'] 58 | expected_ind = expected.index[expected['particle'] == p_expected] 59 | assert_array_equal(actual_ind, expected_ind, 60 | err_msg='Actual and expected linking results ' 61 | 'differ for actual particle %i/expected particle %i' 62 | '' % (p_actual, p_expected)) 63 | 64 | 65 | class TrackpyFrame(np.ndarray): 66 | """Simplified version of pims.Frame, for testing purposes only.""" 67 | def __new__(cls, input_array, frame_no=None): 68 | # get a view of the input data as a Frame object 69 | obj = np.asarray(input_array).view(cls) 70 | obj.frame_no = frame_no 71 | return obj 72 | 73 | def __array_finalize__(self, obj): 74 | if obj is None: 75 | return 76 | self.frame_no = getattr(obj, 'frame_no', None) 77 | 78 | def __array_wrap__(self, out_arr, context=None, return_scalar=False): 79 | # Handle scalars so as not to break ndimage. 80 | # See http://stackoverflow.com/a/794812/1221924 81 | if out_arr.ndim == 0: 82 | return out_arr[()] 83 | 84 | return np.ndarray.__array_wrap__(self, out_arr, context, return_scalar) 85 | 86 | 87 | class TrackpyImageSequence: 88 | """Simplified version of pims.ImageSequence, for testing purposes only.""" 89 | def __init__(self, path_spec): 90 | self._get_files(path_spec) 91 | 92 | tmp = imread(self._filepaths[0]) 93 | self._first_frame_shape = tmp.shape 94 | 95 | def _get_files(self, path_spec): 96 | # deal with if input is _not_ a string 97 | if not isinstance(path_spec, str): 98 | # assume it is iterable and off we go! 99 | self._filepaths = list(path_spec) 100 | self._count = len(self._filepaths) 101 | return 102 | 103 | self.pathname = os.path.abspath(path_spec) # used by __repr__ 104 | if os.path.isdir(path_spec): 105 | directory = path_spec 106 | filenames = os.listdir(directory) 107 | make_full_path = lambda filename: ( 108 | os.path.abspath(os.path.join(directory, filename))) 109 | filepaths = list(map(make_full_path, filenames)) 110 | else: 111 | filepaths = glob.glob(path_spec) 112 | self._filepaths = list(sorted(filepaths)) 113 | self._count = len(self._filepaths) 114 | 115 | # If there were no matches, this was probably a user typo. 116 | if self._count == 0: 117 | raise OSError("No files were found matching that path.") 118 | 119 | def __getitem__(self, j): 120 | image = imread(self._filepaths[j]) 121 | image = (image * 255).astype(np.uint8) 122 | return TrackpyFrame(image, frame_no=j) 123 | 124 | def __len__(self): 125 | return self._count 126 | 127 | @property 128 | def frame_shape(self): 129 | return self._first_frame_shape 130 | 131 | @property 132 | def dtype(self): 133 | return np.uint8 134 | 135 | 136 | class CoordinateReader: 137 | """Generate a pims.FramesSquence-like object that draws features at 138 | given coordinates""" 139 | def __init__(self, f, shape, size, t=None, **kwargs): 140 | self._f = f.copy() 141 | self.pos_columns = ['z', 'y', 'x'][-len(shape):] 142 | self.shape = shape 143 | self.size = size 144 | self.kwargs = kwargs 145 | self.im = SimulatedImage(shape, size, **self.kwargs) 146 | if t is None: 147 | self._len = int(f['frame'].max() + 1) 148 | self._inds = range(self._len) 149 | else: 150 | self._len = len(t) 151 | self._inds = t 152 | 153 | def __len__(self): 154 | return self._len 155 | 156 | def __iter__(self): 157 | # this is actually a hack to get find_link working with float-typed indices 158 | return (self.get_frame(i) for i in self._inds) 159 | 160 | def __getitem__(self, key): 161 | return self.get_frame(key) 162 | 163 | def get_frame(self, ind): 164 | self.im.clear() 165 | pos = self._f.loc[self._f['frame'] == ind, self.pos_columns].values 166 | for _pos in pos: 167 | self.im.draw_feature(_pos) 168 | return TrackpyFrame(self.im(), frame_no=ind) 169 | 170 | @property 171 | def pixel_type(self): 172 | return self.im.dtype 173 | 174 | @property 175 | def frame_shape(self): 176 | return self.im.shape 177 | -------------------------------------------------------------------------------- /trackpy/tests/data/reproduce_duplicate_track_assignment.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/data/reproduce_duplicate_track_assignment.npy -------------------------------------------------------------------------------- /trackpy/tests/data/reproducibility_v0.4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/data/reproducibility_v0.4.npz -------------------------------------------------------------------------------- /trackpy/tests/data/sparse_trajectories.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/data/sparse_trajectories.npy -------------------------------------------------------------------------------- /trackpy/tests/test_correlations.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_allclose 3 | from pandas import DataFrame 4 | from trackpy.tests.common import StrictTestCase 5 | from trackpy.utils import pandas_concat 6 | 7 | import trackpy as tp 8 | 9 | 10 | class TestCorrelations(StrictTestCase): 11 | 12 | def setUp(self): 13 | np.random.seed(0) 14 | randn = np.random.randn 15 | N = 500 16 | a = DataFrame(randn(N, 2), columns=['x', 'y']) 17 | b = DataFrame(a[['x', 'y']] + 0.1*randn(N, 2), columns=['x', 'y']) 18 | a['particle'] = np.arange(N) 19 | b['particle'] = np.arange(N) 20 | a['frame'] = 0 21 | b['frame'] = 1 22 | self.random_walk = pandas_concat([a, b]) 23 | 24 | def test_no_correlations(self): 25 | v = tp.velocity_corr(self.random_walk, 0, 1) 26 | binned = v.groupby(np.digitize(v.r, np.linspace(0, 1, 10))).mean() 27 | actual = binned['dot_product'] 28 | expected = np.zeros_like(actual) 29 | assert_allclose(actual, expected, atol=1e-3) 30 | 31 | 32 | if __name__ == '__main__': 33 | import unittest 34 | unittest.main() 35 | -------------------------------------------------------------------------------- /trackpy/tests/test_feature_saving.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import os 3 | import unittest 4 | import warnings 5 | 6 | import numpy as np 7 | from numpy.testing import assert_almost_equal, assert_allclose 8 | import pandas 9 | 10 | from pandas.testing import ( 11 | assert_series_equal, 12 | assert_frame_equal, 13 | ) 14 | 15 | import trackpy as tp 16 | from trackpy.tests.common import StrictTestCase 17 | from trackpy.tests.common import TrackpyImageSequence 18 | 19 | # Quiet warnings about get_store being deprecated. 20 | # These come from pandas.io and are caused by line 62: 21 | # s = self.storage_class(STORE_NAME) 22 | import warnings 23 | warnings.filterwarnings("ignore", message="get_store is deprecated") 24 | 25 | path, _ = os.path.split(os.path.abspath(__file__)) 26 | 27 | 28 | def _random_hash(): 29 | return ''.join(map(str, np.random.randint(0, 10, 10))) 30 | 31 | 32 | def _skip_if_no_pytables(): 33 | try: 34 | import tables 35 | except ImportError: 36 | raise unittest.SkipTest('pytables not installed. Skipping.') 37 | 38 | # https://github.com/soft-matter/trackpy/issues/643 39 | if tables.hdf5_version == "1.8.5-patch1": 40 | raise unittest.SkipTest('this pytables version has an incompatible HDF5 version. Skipping.') 41 | 42 | class FeatureSavingTester: 43 | def prepare(self, batch_params=None): 44 | directory = os.path.join(path, 'video', 'image_sequence') 45 | v = TrackpyImageSequence(os.path.join(directory, '*.png')) 46 | self.v = [tp.invert_image(v[i]) for i in range(2)] 47 | # mass depends on pixel dtype, which differs per reader 48 | minmass = self.v[0].max().item() * 2 49 | self.PARAMS = {'diameter': 11, 'minmass': minmass} 50 | if batch_params is not None: 51 | self.PARAMS.update(batch_params) 52 | self.expected = tp.batch(self.v, engine='python', meta=False, 53 | **self.PARAMS) 54 | 55 | def test_storage(self): 56 | STORE_NAME = 'temp_for_testing_{}.h5'.format(_random_hash()) 57 | if os.path.isfile(STORE_NAME): 58 | os.remove(STORE_NAME) 59 | try: 60 | s = self.storage_class(STORE_NAME) 61 | except OSError: 62 | unittest.SkipTest('Cannot make an HDF5 file. Skipping') 63 | else: 64 | tp.batch(self.v, output=s, engine='python', meta=False, 65 | **self.PARAMS) 66 | self.assertEqual(len(s), 2) 67 | self.assertEqual(s.max_frame, 1) 68 | count_total_dumped = s.dump()['frame'].nunique() 69 | count_one_dumped = s.dump(1)['frame'].nunique() 70 | self.assertEqual(count_total_dumped, 2) 71 | self.assertEqual(count_one_dumped, 1) 72 | assert_frame_equal(s.dump().reset_index(drop=True), 73 | self.expected.reset_index(drop=True)) 74 | assert_frame_equal(s[0], s.get(0)) 75 | 76 | # Putting an empty df should warn 77 | with warnings.catch_warnings(record=True) as w: 78 | warnings.simplefilter('ignore') 79 | warnings.simplefilter('always', UserWarning) 80 | s.put(pandas.DataFrame()) 81 | assert len(w) == 1 82 | s.close() 83 | os.remove(STORE_NAME) 84 | 85 | 86 | class TestPandasHDFStore(FeatureSavingTester, StrictTestCase): 87 | def setUp(self): 88 | _skip_if_no_pytables() 89 | self.prepare() 90 | self.storage_class = tp.PandasHDFStore 91 | 92 | 93 | class TestPandasHDFStoreBig(FeatureSavingTester, StrictTestCase): 94 | def setUp(self): 95 | _skip_if_no_pytables() 96 | self.prepare() 97 | self.storage_class = tp.PandasHDFStoreBig 98 | 99 | def test_cache(self): 100 | """Store some frames, make a cache, then store some more frames.""" 101 | STORE_NAME = 'temp_for_testing_{}.h5'.format(_random_hash()) 102 | if os.path.isfile(STORE_NAME): 103 | os.remove(STORE_NAME) 104 | try: 105 | s = self.storage_class(STORE_NAME) 106 | except OSError: 107 | unittest.SkipTest('Cannot make an HDF5 file. Skipping') 108 | else: 109 | framedata = self.expected[self.expected.frame == 0] 110 | def putfake(store, i): 111 | fdat = framedata.copy() 112 | fdat.frame = i 113 | store.put(fdat) 114 | for i in range(10): putfake(s, i) 115 | assert s._frames_cache is None 116 | s._flush_cache() # Should do nothing 117 | assert set(range(10)) == set(s.frames) # Make cache 118 | assert set(range(10)) == set(s.frames) # Hit memory cache 119 | assert s._frames_cache is not None 120 | assert s._cache_dirty 121 | assert s._CACHE_NAME not in s.store 122 | 123 | s._flush_cache() 124 | assert s._CACHE_NAME in s.store 125 | assert not s._cache_dirty 126 | 127 | # Invalidate cache 128 | for i in range(10, 20): putfake(s, i) 129 | assert s._frames_cache is None 130 | assert s._CACHE_NAME not in s.store 131 | assert set(range(20)) == set(s.frames) 132 | assert s._frames_cache is not None 133 | 134 | s.rebuild_cache() # Just to try it 135 | 136 | s.close() # Write cache 137 | 138 | # Load cache from disk 139 | s = self.storage_class(STORE_NAME, 'r') 140 | assert set(range(20)) == set(s.frames) # Hit cache 141 | assert not s._cache_dirty 142 | 143 | s.close() 144 | os.remove(STORE_NAME) 145 | 146 | 147 | class TestSingleThreaded(FeatureSavingTester, StrictTestCase): 148 | def setUp(self): 149 | _skip_if_no_pytables() 150 | 151 | # Check that the argument is getting passed to utils.get_pool() 152 | with self.assertRaises(TypeError): 153 | self.prepare(batch_params={'processes': 'junk'}) 154 | 155 | self.prepare(batch_params={'processes': 1}) 156 | self.storage_class = tp.PandasHDFStoreBig 157 | 158 | 159 | class TestPandasHDFStoreBigCompressed(FeatureSavingTester, StrictTestCase): 160 | def setUp(self): 161 | _skip_if_no_pytables() 162 | self.prepare() 163 | self.storage_class = functools.partial( 164 | tp.PandasHDFStoreBig, complevel=4, complib='zlib', 165 | fletcher32=True) 166 | 167 | 168 | class TestPandasHDFStoreSingleNode(FeatureSavingTester, StrictTestCase): 169 | def setUp(self): 170 | _skip_if_no_pytables() 171 | self.prepare() 172 | self.storage_class = tp.PandasHDFStoreSingleNode 173 | 174 | 175 | class TestPandasHDFStoreSingleNodeCompressed(FeatureSavingTester, 176 | StrictTestCase): 177 | def setUp(self): 178 | _skip_if_no_pytables() 179 | self.prepare() 180 | self.storage_class = functools.partial( 181 | tp.PandasHDFStoreSingleNode, 182 | complevel=4, complib='zlib', fletcher32=True) 183 | 184 | 185 | if __name__ == '__main__': 186 | import unittest 187 | unittest.main() 188 | -------------------------------------------------------------------------------- /trackpy/tests/test_find.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import trackpy as tp 4 | from trackpy.artificial import draw_feature 5 | from trackpy.tests.common import assert_coordinates_close, StrictTestCase 6 | from trackpy.find import grey_dilation, grey_dilation_legacy 7 | 8 | 9 | class TestFindGreyDilation(StrictTestCase): 10 | def test_separation_fast(self): 11 | separation = 20 12 | for angle in np.arange(0, 360, 15): 13 | im = np.zeros((128, 128), dtype=np.uint8) 14 | pos = [[64, 64], [64 + separation * np.sin(angle/180*np.pi), 15 | 64 + separation * np.cos(angle/180*np.pi)]] 16 | 17 | # setup features: features with equal signal will always be 18 | # detected by a grey dilation, so make them unequal 19 | draw_feature(im, pos[0], 3, 240) 20 | draw_feature(im, pos[1], 3, 250) 21 | 22 | # find both of them 23 | f = grey_dilation(im, separation - 1, precise=False) 24 | assert_coordinates_close(f, pos, atol=1) 25 | 26 | # find only the brightest 27 | if angle in [45, 135, 225, 315]: 28 | # for unprecise, a too small square kernel is used, which is 29 | # perfect for 45-degree angles 30 | f = grey_dilation(im, separation + 1, precise=False) 31 | assert_coordinates_close(f, pos[1:], atol=1) 32 | else: 33 | # but too small by a factor of sqrt(ndim) for 90-degree angles 34 | f = grey_dilation(im, separation*np.sqrt(2) + 1, precise=False) 35 | assert_coordinates_close(f, pos[1:], atol=1) 36 | 37 | 38 | def test_separation(self): 39 | separation = 20 40 | for angle in np.arange(0, 360, 15): 41 | im = np.zeros((128, 128), dtype=np.uint8) 42 | pos = [[64, 64], [64 + separation * np.sin(angle/180*np.pi), 43 | 64 + separation * np.cos(angle/180*np.pi)]] 44 | 45 | # setup features: features with equal signal will always be 46 | # detected by a grey dilation, so make them unequal 47 | draw_feature(im, pos[0], 3, 240) 48 | draw_feature(im, pos[1], 3, 250) 49 | 50 | # find both of them 51 | f = grey_dilation(im, separation - 1) 52 | assert_coordinates_close(f, pos, atol=1) 53 | 54 | # find only the brightest 55 | f = grey_dilation(im, separation + 1) 56 | assert_coordinates_close(f, pos[1:], atol=1) 57 | 58 | def test_separation_anisotropic(self): 59 | separation = (10, 20) 60 | for angle in np.arange(0, 360, 15): 61 | im = np.zeros((128, 128), dtype=np.uint8) 62 | pos = [[64, 64], [64 + separation[0] * np.sin(angle/180*np.pi), 63 | 64 + separation[1] * np.cos(angle/180*np.pi)]] 64 | 65 | # setup features: features with equal signal will always be 66 | # detected by a grey dilation, so make them unequal 67 | draw_feature(im, pos[0], 3, 240) 68 | draw_feature(im, pos[1], 3, 250) 69 | 70 | # find both of them 71 | f = grey_dilation(im, (9, 19)) 72 | assert_coordinates_close(f, pos, atol=1) 73 | 74 | # find only the brightest 75 | f = grey_dilation(im, (11, 21)) 76 | assert_coordinates_close(f, pos[1:], atol=1) 77 | 78 | def test_float_image(self): 79 | separation = 20 80 | angle = 45 81 | im = np.zeros((128, 128), dtype=np.float64) 82 | pos = [[64, 64], [64 + separation * np.sin(angle/180*np.pi), 83 | 64 + separation * np.cos(angle/180*np.pi)]] 84 | 85 | # setup features: features with equal signal will always be 86 | # detected by a grey dilation, so make them unequal 87 | draw_feature(im, pos[0], 3, 240) 88 | draw_feature(im, pos[1], 3, 250) 89 | 90 | # find both of them 91 | f = grey_dilation(im, separation - 1, precise=False) 92 | assert_coordinates_close(f, pos, atol=1) 93 | 94 | 95 | 96 | class TestFindGreyDilationLegacy(StrictTestCase): 97 | def test_separation(self): 98 | separation = 20 99 | for angle in np.arange(0, 360, 15): 100 | im = np.zeros((128, 128), dtype=np.uint8) 101 | pos = [[64, 64], [64 + separation * np.sin(angle/180*np.pi), 102 | 64 + separation * np.cos(angle/180*np.pi)]] 103 | 104 | # setup features: features with equal signal will always be 105 | # detected by grey_dilation_legacy, so make them unequal 106 | draw_feature(im, pos[0], 3, 240) 107 | draw_feature(im, pos[1], 3, 250) 108 | 109 | # find both of them 110 | f = grey_dilation_legacy(im, separation - 1) 111 | assert_coordinates_close(f, pos, atol=1) 112 | 113 | # find only the brightest 114 | f = grey_dilation_legacy(im, separation + 1) 115 | assert_coordinates_close(f, pos[1:], atol=1) 116 | 117 | 118 | if __name__ == '__main__': 119 | import unittest 120 | unittest.main() 121 | -------------------------------------------------------------------------------- /trackpy/tests/test_mask.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from numpy.testing import assert_equal 4 | 5 | from trackpy.masks import slice_image, mask_image 6 | from trackpy.tests.common import StrictTestCase 7 | 8 | 9 | class TestSlicing(StrictTestCase): 10 | def test_slicing_2D(self): 11 | im = np.empty((9, 9)) 12 | 13 | # center 14 | for radius in range(1, 5): 15 | sli, origin = slice_image([4, 4], im, radius) 16 | assert_equal(sli.shape, (radius*2+1,) * 2) 17 | assert_equal(origin, (4 - radius,) * 2) 18 | 19 | # edge 20 | for radius in range(1, 5): 21 | sli, origin = slice_image([0, 4], im, radius) 22 | assert_equal(sli.shape, (radius + 1, radius*2+1)) 23 | 24 | # edge 25 | for radius in range(1, 5): 26 | sli, origin = slice_image([4, 0], im, radius) 27 | assert_equal(sli.shape, (radius*2+1, radius + 1)) 28 | 29 | # corner 30 | for radius in range(1, 5): 31 | sli, origin = slice_image([0, 0], im, radius) 32 | assert_equal(sli.shape, (radius+1, radius + 1)) 33 | 34 | # outside of image 35 | for radius in range(2, 5): 36 | sli, origin = slice_image([-1, 4], im, radius) 37 | assert_equal(sli.shape, (radius, radius*2+1)) 38 | 39 | # outside of image 40 | for radius in range(2, 5): 41 | sli, origin = slice_image([-1, -1], im, radius) 42 | assert_equal(sli.shape, (radius, radius)) 43 | 44 | # no slice 45 | for radius in range(2, 5): 46 | sli, origin = slice_image([-10, 20], im, radius) 47 | assert_equal(sli.shape, (0, 0)) 48 | 49 | 50 | def test_slicing_3D(self): 51 | im = np.empty((9, 9, 9)) 52 | 53 | # center 54 | for radius in range(1, 5): 55 | sli, origin = slice_image([4, 4, 4], im, radius) 56 | assert_equal(sli.shape, (radius*2+1,) * 3) 57 | assert_equal(origin, (4 - radius,) * 3) 58 | 59 | # face 60 | for radius in range(1, 5): 61 | sli, origin = slice_image([0, 4, 4], im, radius) 62 | assert_equal(sli.shape, (radius + 1, radius*2+1, radius*2+1)) 63 | 64 | # edge 65 | for radius in range(1, 5): 66 | sli, origin = slice_image([4, 0, 0], im, radius) 67 | assert_equal(sli.shape, (radius*2+1, radius + 1, radius + 1)) 68 | 69 | # corner 70 | for radius in range(1, 5): 71 | sli, origin = slice_image([0, 0, 0], im, radius) 72 | assert_equal(sli.shape, (radius+1, radius + 1, radius + 1)) 73 | 74 | # outside of image 75 | for radius in range(2, 5): 76 | sli, origin = slice_image([-1, 4, 4], im, radius) 77 | assert_equal(sli.shape, (radius, radius*2+1, radius*2+1)) 78 | 79 | # outside of image 80 | for radius in range(2, 5): 81 | sli, origin = slice_image([-1, -1, 4], im, radius) 82 | assert_equal(sli.shape, (radius, radius, radius*2+1)) 83 | 84 | # no slice 85 | for radius in range(2, 5): 86 | sli, origin = slice_image([-10, 20, 30], im, radius) 87 | assert_equal(sli.shape, (0, 0, 0)) 88 | 89 | def test_slicing_2D_multiple(self): 90 | im = np.empty((9, 9)) 91 | radius = 2 92 | 93 | sli, origin = slice_image([[4, 4], [4, 4]], im, radius) 94 | assert_equal(sli.shape, (5, 5)) 95 | assert_equal(origin, (2, 2)) 96 | 97 | sli, origin = slice_image([[4, 2], [4, 6]], im, radius) 98 | assert_equal(sli.shape, (5, 9)) 99 | assert_equal(origin, (2, 0)) 100 | 101 | sli, origin = slice_image([[2, 4], [6, 4]], im, radius) 102 | assert_equal(sli.shape, (9, 5)) 103 | assert_equal(origin, (0, 2)) 104 | 105 | sli, origin = slice_image([[2, 4], [6, 4], [-10, 20]], im, radius) 106 | assert_equal(sli.shape, (9, 5)) 107 | assert_equal(origin, (0, 2)) 108 | 109 | def test_slicing_3D_multiple(self): 110 | im = np.empty((9, 9, 9)) 111 | radius = 2 112 | 113 | sli, origin = slice_image([[4, 4, 4], [4, 4, 4]], im, radius) 114 | assert_equal(sli.shape, (5, 5, 5)) 115 | assert_equal(origin, (2, 2, 2)) 116 | 117 | sli, origin = slice_image([[4, 2, 4], [4, 6, 4]], im, radius) 118 | assert_equal(sli.shape, (5, 9, 5)) 119 | assert_equal(origin, (2, 0, 2)) 120 | 121 | sli, origin = slice_image([[4, 2, 6], [4, 6, 2]], im, radius) 122 | assert_equal(sli.shape, (5, 9, 9)) 123 | assert_equal(origin, (2, 0, 0)) 124 | 125 | sli, origin = slice_image([[4, 2, 4], [4, 6, 4], [-10, 4, 4]], im, radius) 126 | assert_equal(sli.shape, (5, 9, 5)) 127 | assert_equal(origin, (2, 0, 2)) 128 | 129 | 130 | class TestMasking(StrictTestCase): 131 | def test_masking_single_2D(self): 132 | im = np.ones((9, 9)) 133 | radius = 1 # N pix is 5 134 | 135 | sli = mask_image([4, 4], im, radius) 136 | assert_equal(sli.sum(), 5) 137 | assert_equal(sli.shape, im.shape) 138 | 139 | sli = mask_image([0, 4], im, radius) 140 | assert_equal(sli.sum(), 4) 141 | 142 | sli = mask_image([4, 0], im, radius) 143 | assert_equal(sli.sum(), 4) 144 | 145 | sli = mask_image([0, 0], im, radius) 146 | assert_equal(sli.sum(), 3) 147 | 148 | sli = mask_image([-1, 4], im, radius) 149 | assert_equal(sli.sum(), 1) 150 | 151 | sli = mask_image([-1, -1], im, radius) 152 | assert_equal(sli.sum(), 0) 153 | 154 | 155 | def test_masking_multiple_2D(self): 156 | im = np.ones((9, 9)) 157 | radius = 1 # N pix is 5 158 | 159 | sli = mask_image([[4, 2], [4, 6]], im, radius) 160 | assert_equal(sli.sum(), 10) 161 | 162 | sli = mask_image([[4, 4], [4, 4]], im, radius) 163 | assert_equal(sli.sum(), 5) 164 | 165 | sli = mask_image([[0, 4], [4, 4]], im, radius) 166 | assert_equal(sli.sum(), 9) 167 | 168 | sli = mask_image([[-1, 4], [4, 4]], im, radius) 169 | assert_equal(sli.sum(), 6) 170 | 171 | sli = mask_image([[-20, 40], [4, 4]], im, radius) 172 | assert_equal(sli.sum(), 5) 173 | 174 | def test_masking_single_3D(self): 175 | im = np.ones((9, 9, 9)) 176 | radius = 1 # N pix is 7 177 | 178 | sli = mask_image([4, 4, 4], im, radius) 179 | assert_equal(sli.sum(), 7) 180 | assert_equal(sli.shape, im.shape) 181 | 182 | sli = mask_image([0, 4, 4], im, radius) 183 | assert_equal(sli.sum(), 6) 184 | 185 | sli = mask_image([4, 0, 0], im, radius) 186 | assert_equal(sli.sum(), 5) 187 | 188 | sli = mask_image([0, 0, 0], im, radius) 189 | assert_equal(sli.sum(), 4) 190 | 191 | sli = mask_image([-1, 4, 4], im, radius) 192 | assert_equal(sli.sum(), 1) 193 | 194 | sli = mask_image([-1, -1, -1], im, radius) 195 | assert_equal(sli.sum(), 0) 196 | 197 | def test_masking_multiple_3D(self): 198 | im = np.ones((9, 9, 9)) 199 | radius = 1 # N pix is 7 200 | 201 | sli = mask_image([[4, 4, 4], [4, 4, 4]], im, radius) 202 | assert_equal(sli.sum(), 7) 203 | assert_equal(sli.shape, im.shape) 204 | 205 | sli = mask_image([[4, 4, 6], [4, 4, 2]], im, radius) 206 | assert_equal(sli.sum(), 14) 207 | 208 | sli = mask_image([[4, 4, 0], [4, 4, 4]], im, radius) 209 | assert_equal(sli.sum(), 13) 210 | 211 | 212 | if __name__ == '__main__': 213 | import unittest 214 | unittest.main() 215 | -------------------------------------------------------------------------------- /trackpy/tests/test_misc.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import types 4 | import unittest 5 | import warnings 6 | 7 | import trackpy 8 | import trackpy.diag 9 | from trackpy.tests.common import StrictTestCase 10 | from trackpy.try_numba import NUMBA_AVAILABLE 11 | 12 | path, _ = os.path.split(os.path.abspath(__file__)) 13 | 14 | 15 | class DiagTests(StrictTestCase): 16 | def test_performance_report(self): 17 | trackpy.diag.performance_report() 18 | 19 | def test_dependencies(self): 20 | trackpy.diag.dependencies() 21 | 22 | 23 | class LoggerTests(StrictTestCase): 24 | def test_heirarchy(self): 25 | self.assertTrue(trackpy.linking.logger.parent is trackpy.logger) 26 | self.assertTrue(trackpy.feature.logger.parent is trackpy.logger) 27 | self.assertTrue(trackpy.preprocessing.logger.parent is trackpy.logger) 28 | 29 | def test_convenience_funcs(self): 30 | trackpy.quiet(True) 31 | self.assertEqual(trackpy.logger.level, logging.WARN) 32 | trackpy.quiet(False) 33 | self.assertEqual(trackpy.logger.level, logging.INFO) 34 | 35 | trackpy.ignore_logging() 36 | self.assertEqual(len(trackpy.logger.handlers), 0) 37 | self.assertEqual(trackpy.logger.level, logging.NOTSET) 38 | self.assertTrue(trackpy.logger.propagate) 39 | 40 | trackpy.handle_logging() 41 | self.assertEqual(len(trackpy.logger.handlers), 1) 42 | self.assertEqual(trackpy.logger.level, logging.INFO) 43 | self.assertEqual(trackpy.logger.propagate, 1) 44 | 45 | 46 | class NumbaTests(StrictTestCase): 47 | def setUp(self): 48 | if not NUMBA_AVAILABLE: 49 | raise unittest.SkipTest("Numba not installed. Skipping.") 50 | self.funcs = trackpy.try_numba._registered_functions 51 | 52 | def tearDown(self): 53 | if NUMBA_AVAILABLE: 54 | trackpy.enable_numba() 55 | 56 | def test_registered_numba_functions(self): 57 | self.assertGreater(len(self.funcs), 0) 58 | 59 | def test_enabled(self): 60 | trackpy.enable_numba() 61 | for registered_func in self.funcs: 62 | module = __import__(registered_func.module_name, fromlist='.') 63 | func = getattr(module, registered_func.func_name) 64 | self.assertIs(func, registered_func.compiled) 65 | self.assertNotIsInstance(func, types.FunctionType) 66 | 67 | def test_disabled(self): 68 | trackpy.disable_numba() 69 | for registered_func in self.funcs: 70 | module = __import__(registered_func.module_name, fromlist='.') 71 | func = getattr(module, registered_func.func_name) 72 | self.assertIs(func, registered_func.ordinary) 73 | self.assertIsInstance(func, types.FunctionType) 74 | -------------------------------------------------------------------------------- /trackpy/tests/test_motion.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pandas import DataFrame, Series 3 | 4 | from pandas.testing import ( 5 | assert_series_equal, 6 | assert_frame_equal, 7 | ) 8 | 9 | from numpy.testing import assert_almost_equal 10 | 11 | import trackpy as tp 12 | from trackpy.utils import pandas_sort, pandas_concat, is_pandas_since_220 13 | from trackpy.tests.common import StrictTestCase 14 | 15 | 16 | def random_walk(N): 17 | return np.cumsum(np.random.randn(N)) 18 | 19 | 20 | def conformity(df): 21 | """ Organize toy data to look like real data. Be strict about dtypes: 22 | particle is a float and frame is an integer.""" 23 | df['frame'] = df['frame'].astype(np.int64) 24 | df['x'] = df['x'].astype(float) 25 | df['y'] = df['y'].astype(float) 26 | df.set_index('frame', drop=False, inplace=True) 27 | if 'particle' in df.columns: 28 | df['particle'] = df['particle'].astype(float) 29 | return pandas_sort(df, by=['frame', 'particle']) 30 | else: 31 | return pandas_sort(df, by=['frame']) 32 | 33 | 34 | def assert_traj_equal(t1, t2): 35 | return assert_frame_equal(conformity(t1), conformity(t2)) 36 | 37 | 38 | def add_drift(df, drift): 39 | df = df.copy() 40 | df['x'] = df['x'].add(drift['x'], fill_value=0) 41 | df['y'] = df['y'].add(drift['y'], fill_value=0) 42 | return df 43 | 44 | 45 | class TestDrift(StrictTestCase): 46 | def setUp(self): 47 | N = 10 48 | Y = 1 49 | a = DataFrame({'x': np.zeros(N), 'y': np.zeros(N), 50 | 'frame': np.arange(N), 'particle': np.zeros(N)}) 51 | b = DataFrame({'x': np.zeros(N - 1), 'y': Y + np.zeros(N - 1), 52 | 'frame': np.arange(1, N), 'particle': np.ones(N - 1)}) 53 | self.dead_still = conformity(pandas_concat([a, b])) 54 | pandas_sort(self.dead_still, ['frame', 'particle'], inplace=True) 55 | 56 | P = 1000 # particles 57 | A = 0.00001 # step amplitude 58 | np.random.seed(0) 59 | particles = [DataFrame({'x': A*random_walk(N), 'y': A*random_walk(N), 60 | 'frame': np.arange(N), 'particle': i}) 61 | for i in range(P)] 62 | self.many_walks = conformity(pandas_concat(particles)) 63 | 64 | self.unlabeled_walks = self.many_walks.copy() 65 | del self.unlabeled_walks['particle'] 66 | 67 | a = DataFrame({'x': np.arange(N), 'y': np.zeros(N), 68 | 'frame': np.arange(N), 'particle': np.zeros(N)}) 69 | b = DataFrame({'x': np.arange(1, N), 'y': Y + np.zeros(N - 1), 70 | 'frame': np.arange(1, N), 'particle': np.ones(N - 1)}) 71 | self.steppers = conformity(pandas_concat([a, b])) 72 | 73 | # Single-particle trajectory with no particle label 74 | self.single_stepper = conformity(a.copy()) 75 | del self.single_stepper['particle'] 76 | 77 | def test_no_drift(self): 78 | N = 10 79 | expected = DataFrame({'x': np.zeros(N), 'y': np.zeros(N)}).iloc[1:] 80 | expected = expected.astype('float') 81 | expected.index.name = 'frame' 82 | expected.columns = ['x', 'y'] 83 | # ^ no drift measured for Frame 0 84 | 85 | actual = tp.compute_drift(self.dead_still) 86 | assert_frame_equal(actual, expected[['y', 'x']]) 87 | 88 | actual_rolling = tp.compute_drift(self.dead_still, smoothing=2) 89 | assert_frame_equal(actual_rolling, expected[['y', 'x']]) 90 | 91 | def test_constant_drift(self): 92 | N = 10 93 | expected = DataFrame({'x': np.arange(N), 'y': np.zeros(N)}).iloc[1:] 94 | expected = expected.astype('float') 95 | expected.index.name = 'frame' 96 | expected.columns = ['x', 'y'] 97 | 98 | actual = tp.compute_drift(self.steppers) 99 | assert_frame_equal(actual, expected[['y', 'x']]) 100 | 101 | def test_subtract_zero_drift(self): 102 | N = 10 103 | drift = DataFrame(np.zeros((N - 1, 2)), 104 | np.arange(1, N, dtype=int)).astype('float64') 105 | drift.columns = ['x', 'y'] 106 | drift.index.name = 'frame' 107 | actual = tp.subtract_drift(self.dead_still, drift) 108 | assert_traj_equal(actual, self.dead_still) 109 | actual = tp.subtract_drift(self.many_walks, drift) 110 | assert_traj_equal(actual, self.many_walks) 111 | actual = tp.subtract_drift(self.steppers, drift) 112 | assert_traj_equal(actual, self.steppers) 113 | 114 | def test_subtract_constant_drift(self): 115 | N = 10 116 | # Add a constant drift here, and then use subtract_drift to 117 | # subtract it. 118 | drift = DataFrame(np.outer(np.arange(N - 1), [1, 1]), 119 | index=np.arange(1, N, dtype=int)).astype('float64') 120 | drift.columns = ['x', 'y'] 121 | drift.index.name = 'frame' 122 | actual = tp.subtract_drift(add_drift(self.dead_still, drift), drift) 123 | assert_traj_equal(actual, self.dead_still) 124 | actual = tp.subtract_drift(add_drift(self.many_walks, drift), drift) 125 | assert_traj_equal(actual, self.many_walks) 126 | actual = tp.subtract_drift(add_drift(self.steppers, drift), drift) 127 | assert_traj_equal(actual, self.steppers) 128 | 129 | actual = tp.subtract_drift(add_drift(self.single_stepper, drift), drift) 130 | assert_traj_equal(actual, self.single_stepper) 131 | 132 | # Test that subtract_drift is OK without particle labels. 133 | # In principle, Series.sub() may raise an error because 134 | # the 'frame' index is duplicated. 135 | # Don't check the result since we can't compare unlabeled trajectories! 136 | actual = tp.subtract_drift(add_drift(self.unlabeled_walks, drift), 137 | drift) 138 | 139 | 140 | class TestMSD(StrictTestCase): 141 | def setUp(self): 142 | N = 10 143 | Y = 1 144 | a = DataFrame({'x': np.zeros(N), 'y': np.zeros(N), 145 | 'frame': np.arange(N), 'particle': np.zeros(N)}) 146 | b = DataFrame({'x': np.zeros(N - 1), 'y': Y + np.zeros(N - 1), 147 | 'frame': np.arange(1, N), 'particle': np.ones(N - 1)}) 148 | self.dead_still = conformity(pandas_concat([a, b])) 149 | 150 | P = 50 # particles 151 | A = 1 # step amplitude 152 | np.random.seed(0) 153 | particles = [DataFrame({'x': A*random_walk(N), 'y': A*random_walk(N), 154 | 'frame': np.arange(N), 'particle': i}) 155 | for i in range(P)] 156 | self.many_walks = conformity(pandas_concat(particles)) 157 | 158 | a = DataFrame({'x': np.arange(N), 'y': np.zeros(N), 159 | 'frame': np.arange(N), 'particle': np.zeros(N)}) 160 | b = DataFrame({'x': np.arange(1, N), 'y': Y + np.zeros(N - 1), 161 | 'frame': np.arange(1, N), 'particle': np.ones(N - 1)}) 162 | self.steppers = conformity(pandas_concat([a, b])) 163 | 164 | def test_zero_emsd(self): 165 | N = 10 166 | actual = tp.emsd(self.dead_still, 1, 1) 167 | expected = Series(np.zeros(N, dtype=float), 168 | index=np.arange(N, dtype=float)).iloc[1:] 169 | expected.index.name = 'lagt' 170 | expected.name = 'msd' 171 | # HACK: Float64Index imprecision ruins index equality. 172 | # Test them separately. If that works, make them exactly the same. 173 | assert_almost_equal(actual.index.values, expected.index.values) 174 | actual.index = expected.index 175 | assert_series_equal(actual, expected) 176 | 177 | def test_linear_emsd(self): 178 | A = 1 179 | EARLY = 7 # only early lag times have good stats 180 | actual = tp.emsd(self.many_walks, 1, 1, max_lagtime=EARLY) 181 | a = np.arange(EARLY+1, dtype='float64') 182 | expected = Series(2*A*a, index=a).iloc[1:] 183 | expected.name = 'msd' 184 | expected.index.name = 'lagt' 185 | # HACK: Float64Index imprecision ruins index equality. 186 | # Test them separately. If that works, make them exactly the same. 187 | assert_almost_equal(actual.index.values, expected.index.values) 188 | actual.index = expected.index 189 | assert_series_equal(np.round(actual), expected) 190 | 191 | def test_linear_emsd_gaps(self): 192 | A = 1 193 | EARLY = 4 # only early lag times have good stats 194 | gapped_walks = self.many_walks.reset_index(drop=True) 195 | to_drop = np.random.choice(gapped_walks.index, 196 | int(len(gapped_walks) * 0.1), replace=False) 197 | gapped_walks = gapped_walks.drop(to_drop, axis=0) 198 | 199 | actual = tp.emsd(gapped_walks, 1, 1, max_lagtime=EARLY) 200 | a = np.arange(EARLY+1, dtype='float64') 201 | expected = Series(2*A*a, index=a).iloc[1:] 202 | expected.name = 'msd' 203 | expected.index.name = 'lagt' 204 | # HACK: Float64Index imprecision ruins index equality. 205 | # Test them separately. If that works, make them exactly the same. 206 | assert_almost_equal(actual.index.values, expected.index.values) 207 | actual.index = expected.index 208 | assert_series_equal(np.round(actual), expected) 209 | 210 | def test_direction_corr(self): 211 | # just a smoke test 212 | f1, f2 = 2, 6 213 | df = tp.motion.direction_corr(self.many_walks, f1, f2) 214 | P = len(self.many_walks.particle.unique()) 215 | assert len(df) == (P * (P - 1)) / 2 216 | 217 | 218 | class TestSpecial(StrictTestCase): 219 | def setUp(self): 220 | N = 10 221 | Y = 1 222 | a = DataFrame({'x': np.arange(N), 'y': np.zeros(N), 223 | 'frame': np.arange(N), 'particle': np.zeros(N)}) 224 | b = DataFrame({'x': np.arange(1, N), 'y': Y + np.zeros(N - 1), 225 | 'frame': np.arange(1, N), 'particle': np.ones(N - 1)}) 226 | self.steppers = conformity(pandas_concat([a, b])) 227 | 228 | def test_theta_entropy(self): 229 | # just a smoke test 230 | theta_entropy = lambda x: tp.motion.theta_entropy(x, plot=False) 231 | self.steppers.groupby('particle').apply( 232 | theta_entropy, 233 | **({"include_groups": False} if is_pandas_since_220 else {}), 234 | ) 235 | 236 | def test_relate_frames(self): 237 | # Check completeness of output 238 | pos_columns = ['x', 'y'] 239 | f1, f2 = 2, 6 240 | df = tp.motion.relate_frames(self.steppers, f1, f2, pos_columns=pos_columns) 241 | for c in pos_columns: 242 | assert c in df 243 | assert c + '_b' in df 244 | assert 'd' + c in df 245 | assert 'dr' in df 246 | assert 'direction' in df 247 | 248 | 249 | if __name__ == '__main__': 250 | import unittest 251 | unittest.main() 252 | -------------------------------------------------------------------------------- /trackpy/tests/test_plot_traj_labeling.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import matplotlib.pyplot as plt 6 | 7 | from trackpy import ptraj 8 | from trackpy.utils import suppress_plotting 9 | from trackpy.tests.common import StrictTestCase 10 | 11 | path, _ = os.path.split(os.path.abspath(__file__)) 12 | 13 | 14 | class TestLabeling(StrictTestCase): 15 | def setUp(self): 16 | self.sparse = pd.DataFrame(np.load( 17 | os.path.join(path, 'data', 'sparse_trajectories.npy'))) 18 | 19 | def test_labeling_sparse_trajectories(self): 20 | suppress_plotting() 21 | ptraj(self.sparse, label=True) # No errors? 22 | 23 | 24 | if __name__ == '__main__': 25 | import unittest 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /trackpy/tests/test_plots.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | import numpy as np 5 | import pandas as pd 6 | from pandas import Series, DataFrame 7 | 8 | import trackpy 9 | from trackpy import plots 10 | from trackpy.utils import suppress_plotting, fit_powerlaw 11 | from trackpy.tests.common import StrictTestCase 12 | 13 | # Quiet warnings about Axes not being compatible with tight_layout 14 | import warnings 15 | warnings.filterwarnings("ignore", message="This figure includes Axes that are not compatible with tight_layout") 16 | 17 | path, _ = os.path.split(os.path.abspath(__file__)) 18 | 19 | try: 20 | import pims 21 | except ImportError: 22 | PIMS_AVAILABLE = False 23 | else: 24 | PIMS_AVAILABLE = True 25 | 26 | 27 | def _skip_if_no_pims(): 28 | if not PIMS_AVAILABLE: 29 | raise unittest.SkipTest('PIMS not installed. Skipping.') 30 | 31 | 32 | class TestPlots(StrictTestCase): 33 | def setUp(self): 34 | # older matplotlib may raise an invalid error 35 | np.seterr(invalid='ignore') 36 | self.sparse = pd.DataFrame(np.load( 37 | os.path.join(path, 'data', 'sparse_trajectories.npy'))) 38 | 39 | def test_labeling_sparse_trajectories(self): 40 | suppress_plotting() 41 | plots.plot_traj(self.sparse, label=True) 42 | 43 | def test_ptraj_empty(self): 44 | suppress_plotting() 45 | f = lambda: plots.plot_traj(DataFrame(columns=self.sparse.columns)) 46 | self.assertRaises(ValueError, f) 47 | 48 | def test_ptraj_unicode_labels(self): 49 | # smoke test 50 | plots.plot_traj(self.sparse, mpp=0.5) 51 | 52 | def test_ptraj_t_column(self): 53 | suppress_plotting() 54 | df = self.sparse.copy() 55 | cols = list(df.columns) 56 | cols[cols.index('frame')] = 'arbitrary name' 57 | df.columns = cols 58 | plots.plot_traj(df, t_column='arbitrary name') 59 | 60 | def test_ptraj3d(self): 61 | sparse3d = self.sparse.copy() 62 | sparse3d['z'] = 0 63 | # smoke test 64 | plots.plot_traj3d(sparse3d) 65 | 66 | def test_annotate(self): 67 | suppress_plotting() 68 | f = DataFrame({'x': [0, 1], 'y': [0, 1], 'frame': [0, 0], 69 | 'mass': [10, 20]}) 70 | frame = np.random.randint(0, 255, (5, 5)) 71 | 72 | # Basic usage 73 | plots.annotate(f, frame) 74 | plots.annotate(f, frame, color='r') 75 | 76 | # Coloring by threshold 77 | plots.annotate(f, frame, split_category='mass', 78 | split_thresh=15, color=['r', 'g']) 79 | plots.annotate(f, frame, split_category='mass', 80 | split_thresh=[15], color=['r', 'g']) 81 | plots.annotate(f, frame, split_category='mass', 82 | split_thresh=[15, 25], color=['r', 'g', 'b']) 83 | 84 | # Check that bad parameters raise an error. 85 | 86 | # Too many colors 87 | bad_call = lambda: plots.annotate( 88 | f, frame, split_category='mass', split_thresh=15, color=['r', 'g', 'b']) 89 | self.assertRaises(ValueError, bad_call) 90 | 91 | # Not enough colors 92 | bad_call = lambda: plots.annotate( 93 | f, frame, split_category='mass', split_thresh=15, color=['r']) 94 | self.assertRaises(ValueError, bad_call) 95 | bad_call = lambda: plots.annotate( 96 | f, frame, split_category='mass', split_thresh=15, color='r') 97 | self.assertRaises(ValueError, bad_call) 98 | 99 | # Nonexistent column name for split_category 100 | bad_call = lambda: plots.annotate( 101 | f, frame, split_category='not a column', split_thresh=15, color='r') 102 | self.assertRaises(ValueError, bad_call) 103 | 104 | # 3D image 105 | bad_call = lambda: plots.annotate(f, frame[np.newaxis, :, :]) 106 | self.assertRaises(ValueError, bad_call) 107 | 108 | def test_annotate3d(self): 109 | _skip_if_no_pims() 110 | suppress_plotting() 111 | f = DataFrame({'x': [0, 1], 'y': [0, 1], 'z': [0, 1], 'frame': [0, 0], 112 | 'mass': [10, 20]}) 113 | frame = np.random.randint(0, 255, (5, 5, 5)) 114 | 115 | plots.annotate3d(f, frame) 116 | plots.annotate3d(f, frame, color='r') 117 | 118 | # 2D image 119 | bad_call = lambda: plots.annotate3d(f, frame[0]) 120 | self.assertRaises(ValueError, bad_call) 121 | 122 | # Rest of the functionality is covered by annotate tests 123 | 124 | def test_fit_powerlaw(self): 125 | # smoke test 126 | suppress_plotting() 127 | em = Series([1, 2, 3], index=[1, 2, 3]) 128 | fit_powerlaw(em) 129 | fit_powerlaw(em, plot=False) 130 | 131 | 132 | if __name__ == '__main__': 133 | import unittest 134 | unittest.main() 135 | -------------------------------------------------------------------------------- /trackpy/tests/test_preprocessing.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from numpy.testing import assert_allclose 4 | 5 | from trackpy.preprocessing import (bandpass, legacy_bandpass, 6 | legacy_bandpass_fftw) 7 | from trackpy.artificial import gen_nonoverlapping_locations, draw_spots 8 | from trackpy.tests.common import StrictTestCase 9 | 10 | 11 | class LegacyPreprocessingTests(StrictTestCase): 12 | def setUp(self): 13 | pos = gen_nonoverlapping_locations((512, 512), 200, 20) 14 | self.frame = draw_spots((512, 512), pos, 20, noise_level=100) 15 | self.margin = 11 16 | self.bp_scipy = bandpass(self.frame, 2, 11)[self.margin:-self.margin, 17 | self.margin:-self.margin] 18 | 19 | def test_legacy_bandpass(self): 20 | lbp_numpy = legacy_bandpass(self.frame, 2, 5)[self.margin:-self.margin, 21 | self.margin:-self.margin] 22 | assert_allclose(lbp_numpy, self.bp_scipy, atol=1.1) 23 | 24 | def test_legacy_bandpass_fftw(self): 25 | try: 26 | import pyfftw 27 | except ImportError: 28 | raise unittest.SkipTest("pyfftw not installed. Skipping.") 29 | lbp_fftw = legacy_bandpass_fftw(self.frame, 2, 5)[self.margin:-self.margin, 30 | self.margin:-self.margin] 31 | assert_allclose(lbp_fftw, self.bp_scipy, atol=1.1) 32 | 33 | 34 | if __name__ == '__main__': 35 | import unittest 36 | unittest.main() 37 | -------------------------------------------------------------------------------- /trackpy/tests/test_reproducibility.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import pandas as pd 4 | from numpy.testing import assert_array_equal, assert_allclose 5 | from scipy.spatial import cKDTree 6 | 7 | import trackpy as tp 8 | from trackpy.preprocessing import invert_image 9 | from trackpy.tests.common import TrackpyImageSequence 10 | from trackpy.tests.common import assert_traj_equal, StrictTestCase 11 | 12 | path, _ = os.path.split(os.path.abspath(__file__)) 13 | 14 | 15 | reproduce_fn = os.path.join(path, 'data', 'reproducibility_v0.4.npz') 16 | 17 | 18 | def compare_pos_df(actual, expected, pos_atol=0.001, lost_atol=1): 19 | """Returns indices of equal and different positions inside dataframes 20 | `actual` and `expected`.""" 21 | lost0 = [] 22 | appeared1 = [] 23 | dev0 = [] 24 | dev1 = [] 25 | equal0 = [] 26 | equal1 = [] 27 | for frame_no, expected_frame in expected.groupby('frame'): 28 | coords0 = expected_frame[['y', 'x']].values 29 | actual_frame = actual[actual['frame'] == frame_no] 30 | coords1 = actual_frame[['y', 'x']].values 31 | 32 | # use a KDTree to find nearest neighbors 33 | tree = cKDTree(coords1) 34 | devs, inds = tree.query(coords0) # find nearest neighbors 35 | 36 | i_lost0 = np.argwhere(devs > lost_atol).ravel() 37 | # features that are equal 38 | i_equal0 = np.argwhere(devs < pos_atol).ravel() 39 | i_equal1 = inds[i_equal0] 40 | # features that are the same, but deviate in position 41 | i_dev0 = np.argwhere((devs < lost_atol) & (devs >= pos_atol)).ravel() 42 | i_dev1 = inds[i_dev0] 43 | # features that present in f1 and not in f0 44 | i_appeared1 = np.argwhere(~np.in1d(np.arange(len(coords1)), 45 | np.concatenate( 46 | [i_equal0, i_dev0]))).ravel() 47 | lost0.append(expected_frame.iloc[i_lost0].index.values) 48 | appeared1.append(actual_frame.iloc[i_appeared1].index.values) 49 | dev0.append(expected_frame.iloc[i_dev0].index.values) 50 | dev1.append(actual_frame.iloc[i_dev1].index.values) 51 | equal0.append(expected_frame.iloc[i_equal0].index.values) 52 | equal1.append(actual_frame.iloc[i_equal1].index.values) 53 | 54 | return np.concatenate(lost0), np.concatenate(appeared1), \ 55 | (np.concatenate(dev0), np.concatenate(dev1)), \ 56 | (np.concatenate(equal0), np.concatenate(equal1)), 57 | 58 | 59 | class TestReproducibility(StrictTestCase): 60 | @classmethod 61 | def setUpClass(cls): 62 | super().setUpClass() 63 | npz = np.load(reproduce_fn) 64 | cls.expected_find_raw = npz['arr_0'] 65 | cls.expected_find_bp = npz['arr_1'] 66 | cls.expected_refine = npz['arr_2'] 67 | cls.expected_locate = npz['arr_3'] 68 | cls.coords_link = npz['arr_4'] 69 | cls.expected_link = npz['arr_5'] 70 | cls.expected_link_memory = npz['arr_6'] 71 | cls.expected_characterize = npz['arr_7'] 72 | 73 | cls.v = TrackpyImageSequence(os.path.join(path, 'video', 74 | 'image_sequence', '*.png')) 75 | cls.v0_inverted = invert_image(cls.v[0]) 76 | 77 | def setUp(self): 78 | self.diameter = 9 79 | self.minmass = 140 80 | self.memory = 2 81 | self.bandpass_params = dict(lshort=1, llong=self.diameter) 82 | self.find_params = dict(separation=self.diameter) 83 | self.refine_params = dict(radius=int(self.diameter // 2)) 84 | self.locate_params = dict(diameter=self.diameter, minmass=self.minmass, 85 | characterize=False) 86 | self.link_params = dict(search_range=5) 87 | self.characterize_params = dict(diameter=self.diameter, 88 | characterize=True) 89 | self.pos_columns = ['y', 'x'] 90 | self.char_columns = ['mass', 'size', 'ecc', 'signal', 'raw_mass', 'ep'] 91 | 92 | def test_find_raw(self): 93 | actual = tp.grey_dilation(self.v0_inverted, **self.find_params) 94 | assert_array_equal(actual, self.expected_find_raw) 95 | 96 | def test_find_bp(self): 97 | image_bp = tp.bandpass(self.v0_inverted, **self.bandpass_params) 98 | actual = tp.grey_dilation(image_bp, **self.find_params) 99 | assert_array_equal(actual, self.expected_find_bp) 100 | 101 | def test_refine(self): 102 | coords_v0 = self.expected_find_bp 103 | image_bp = tp.bandpass(self.v0_inverted, **self.bandpass_params) 104 | df = tp.refine_com(self.v0_inverted, image_bp, coords=coords_v0, 105 | **self.refine_params) 106 | actual = df[df['mass'] >= self.minmass][self.pos_columns].values 107 | 108 | assert_allclose(actual, self.expected_refine) 109 | 110 | def test_locate(self): 111 | df = tp.locate(self.v0_inverted, **self.locate_params) 112 | actual = df[self.pos_columns].values 113 | assert_allclose(actual, self.expected_locate) 114 | 115 | def test_link_nomemory(self): 116 | expected = pd.DataFrame(self.coords_link, 117 | columns=self.pos_columns + ['frame']) 118 | expected['frame'] = expected['frame'].astype(np.int64) 119 | actual = tp.link(expected, **self.link_params) 120 | expected['particle'] = self.expected_link 121 | 122 | assert_traj_equal(actual, expected) 123 | 124 | def test_link_memory(self): 125 | expected = pd.DataFrame(self.coords_link, 126 | columns=self.pos_columns + ['frame']) 127 | expected['frame'] = expected['frame'].astype(np.int64) 128 | actual = tp.link(expected, memory=self.memory, **self.link_params) 129 | expected['particle'] = self.expected_link_memory 130 | 131 | assert_traj_equal(actual, expected) 132 | 133 | def test_characterize(self): 134 | df = tp.locate(self.v0_inverted, diameter=9) 135 | df = df[(df['x'] < 64) & (df['y'] < 64)] 136 | actual_coords = df[self.pos_columns].values 137 | actual_char = df[self.char_columns].values 138 | 139 | try: 140 | assert_allclose(actual_coords, 141 | self.expected_characterize[:, :2]) 142 | except AssertionError: 143 | raise AssertionError('The characterize tests failed as the coords' 144 | ' found by locate were not reproduced.') 145 | assert_allclose(actual_char, 146 | self.expected_characterize[:, 2:]) 147 | -------------------------------------------------------------------------------- /trackpy/tests/video/bulk-water_frame0.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/bulk-water_frame0.npy -------------------------------------------------------------------------------- /trackpy/tests/video/bulk-water_frame1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/bulk-water_frame1.npy -------------------------------------------------------------------------------- /trackpy/tests/video/image_sequence/T76S3F00001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/image_sequence/T76S3F00001.png -------------------------------------------------------------------------------- /trackpy/tests/video/image_sequence/T76S3F00002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/image_sequence/T76S3F00002.png -------------------------------------------------------------------------------- /trackpy/tests/video/image_sequence/T76S3F00003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/image_sequence/T76S3F00003.png -------------------------------------------------------------------------------- /trackpy/tests/video/image_sequence/T76S3F00004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/image_sequence/T76S3F00004.png -------------------------------------------------------------------------------- /trackpy/tests/video/image_sequence/T76S3F00005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/image_sequence/T76S3F00005.png -------------------------------------------------------------------------------- /trackpy/tests/video/seq_frame0.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/seq_frame0.npy -------------------------------------------------------------------------------- /trackpy/tests/video/seq_frame1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/seq_frame1.npy -------------------------------------------------------------------------------- /trackpy/tests/video/stuck.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/stuck.tif -------------------------------------------------------------------------------- /trackpy/tests/video/stuck_frame0.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/stuck_frame0.npy -------------------------------------------------------------------------------- /trackpy/tests/video/stuck_frame1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/video/stuck_frame1.npy -------------------------------------------------------------------------------- /trackpy/tests/water/bulk-water.mov: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/water/bulk-water.mov -------------------------------------------------------------------------------- /trackpy/tests/wire/horizontal expected result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/wire/horizontal expected result.png -------------------------------------------------------------------------------- /trackpy/tests/wire/horizontal_frame.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/wire/horizontal_frame.npy -------------------------------------------------------------------------------- /trackpy/tests/wire/oblique expected result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/wire/oblique expected result.png -------------------------------------------------------------------------------- /trackpy/tests/wire/oblique_frame.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/wire/oblique_frame.npy -------------------------------------------------------------------------------- /trackpy/tests/wire/vertical expected result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/wire/vertical expected result.png -------------------------------------------------------------------------------- /trackpy/tests/wire/vertical_frame.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soft-matter/trackpy/2d6f31f63150838109c24a1fa48d4a96f530faa4/trackpy/tests/wire/vertical_frame.npy -------------------------------------------------------------------------------- /trackpy/tracking.py: -------------------------------------------------------------------------------- 1 | from trackpy.linking import * # legacy 2 | -------------------------------------------------------------------------------- /trackpy/try_numba.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import warnings 3 | 4 | # re-import some builtins for legacy numba versions if future is installed 5 | try: 6 | from __builtin__ import int, round 7 | except ImportError: 8 | from builtins import int, round 9 | 10 | ENABLE_NUMBA_ON_IMPORT = True 11 | _registered_functions = list() # functions that can be numba-compiled 12 | 13 | NUMBA_AVAILABLE = True 14 | message = '' 15 | 16 | try: 17 | # numba deprecationwarnings from numpy 1.20 18 | with warnings.catch_warnings(): 19 | warnings.filterwarnings("ignore", module="numpy") 20 | import numba 21 | except ImportError: 22 | NUMBA_AVAILABLE = False 23 | message = ("To use numba-accelerated variants of core " 24 | "functions, you must install numba.") 25 | 26 | 27 | class RegisteredFunction: 28 | """Enable toggling between original function and numba-compiled one.""" 29 | 30 | def __init__(self, func, fallback=None, jit_kwargs=None): 31 | self.func = func 32 | self.func_name = func.__name__ 33 | self.module_name = func.__module__ 34 | self.jit_kwargs = jit_kwargs 35 | if fallback is not None: 36 | self.ordinary = fallback 37 | else: 38 | self.ordinary = func 39 | 40 | @property 41 | def compiled(self): 42 | # Compile it if this is the first time. 43 | if (not hasattr(self, '_compiled')) and NUMBA_AVAILABLE: 44 | if self.jit_kwargs is not None: 45 | self._compiled = numba.jit(**self.jit_kwargs)(self.func) 46 | else: 47 | self._compiled = numba.jit(self.func) 48 | return self._compiled 49 | 50 | def point_to_compiled_func(self): 51 | setattr(sys.modules[self.module_name], self.func_name, self.compiled) 52 | 53 | def point_to_ordinary_func(self): 54 | setattr(sys.modules[self.module_name], self.func_name, self.ordinary) 55 | 56 | 57 | def try_numba_jit(func=None, **kwargs): 58 | """Wrapper for numba.jit() that treats the function as pure Python if numba is missing. 59 | 60 | Usage is as with jit(): Either as a bare decorator (no parentheses), or with keyword 61 | arguments. 62 | 63 | The resulting compiled numba function can subsequently be turned on or off with 64 | enable_numba() and disable_numba(). It will be on by default.""" 65 | def return_decorator(func): 66 | # Register the function with a global list of numba-enabled functions. 67 | f = RegisteredFunction(func, jit_kwargs=kwargs) 68 | _registered_functions.append(f) 69 | 70 | if ENABLE_NUMBA_ON_IMPORT and NUMBA_AVAILABLE: 71 | # Overwrite the function's reference with a numba-compiled function. 72 | # This can be undone by calling disable_numba() 73 | return f.compiled 74 | else: 75 | return f.ordinary 76 | if func is None: 77 | return return_decorator 78 | else: 79 | return return_decorator(func) 80 | 81 | def disable_numba(): 82 | """Do not use numba-accelerated functions, even if numba is available.""" 83 | for f in _registered_functions: 84 | f.point_to_ordinary_func() 85 | 86 | 87 | def enable_numba(): 88 | """Use numba-accelerated variants of core functions.""" 89 | if NUMBA_AVAILABLE: 90 | for f in _registered_functions: 91 | f.point_to_compiled_func() 92 | else: 93 | raise ImportError(message) 94 | -------------------------------------------------------------------------------- /trackpy/uncertainty.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.ndimage import binary_dilation 3 | from pandas import DataFrame 4 | 5 | from .masks import binary_mask, x_squared_masks 6 | from .utils import memo, validate_tuple 7 | 8 | 9 | def measure_noise(image_bp, image_raw, radius): 10 | """Compute the mean and standard deviation of the dark pixels outside the 11 | signal. The bandpassed image is used to identify background regions. The 12 | raw image is used to characterize the background. 13 | See Biophysical journal 88(1) 623-638 Figure C. 14 | 15 | Parameters 16 | ---------- 17 | image_bp : ndarray 18 | preprocessed (bandpassed) image 19 | image_raw : ndarray 20 | raw image 21 | radius : number or tuple of numbers 22 | feature radius used for centroid identification 23 | 24 | Returns 25 | ------- 26 | background mean, background standard deviation 27 | """ 28 | structure = binary_mask(radius, image_bp.ndim) 29 | background = ~binary_dilation(image_bp, structure=structure) 30 | n_background = background.sum() 31 | if n_background == 0: # edge case of no background identified 32 | return np.nan, np.nan 33 | elif n_background == 1: # edge case of not enough background identified 34 | return image_raw[background].mean(), np.nan 35 | else: 36 | return image_raw[background].mean(), image_raw[background].std() 37 | 38 | 39 | @memo 40 | def _root_sum_x_squared(radius, ndim): 41 | "Returns the root of the sum of all x^2 inside the mask for each dim." 42 | masks = x_squared_masks(radius, ndim) 43 | r2 = np.sum(masks, axis=tuple(range(1, ndim + 1))) # each ax except first 44 | return np.sqrt(r2) 45 | 46 | 47 | def _static_error(mass, noise, radius, noise_size): 48 | coord_moments = _root_sum_x_squared(radius, len(radius)) 49 | N_S = noise / mass 50 | if np.all(radius[1:] == radius[:-1]) and \ 51 | np.all(noise_size[1:] == noise_size[:-1]): 52 | ep = N_S * noise_size[0] * coord_moments[0] 53 | else: 54 | ep = N_S[:, np.newaxis] * \ 55 | (np.array(noise_size) * np.array(coord_moments))[np.newaxis, :] 56 | return ep 57 | 58 | 59 | def static_error(features, noise, diameter, noise_size=1, ndim=2): 60 | """Compute the uncertainty in particle position ("the static error"). 61 | 62 | Parameters 63 | ---------- 64 | features : DataFrame of features 65 | The feature dataframe should have a `mass` column that is already 66 | background corrected. 67 | noise : number or DataFrame having `noise` column, indexed on `frame` 68 | standard deviation of the noise 69 | diameter : number or tuple, feature diameter used to locate centroids 70 | noise_size : noise correlation length, may be tuple-valued 71 | ndim : number of image dimensions, default 2 72 | if diameter is tuple-valued then its length will override ndim 73 | 74 | Returns 75 | ------- 76 | DataFrame of static error estimates, indexed like the features. 77 | When either radius or noise_size are anisotropic, the returned DataFrame 78 | contains one column for each dimension. 79 | 80 | Where uncertainty estimation fails, NaN is returned. 81 | 82 | Note 83 | ---- 84 | This is an adjusted version of the process described by Thierry Savin and 85 | Patrick S. Doyle in their paper "Static and Dynamic Errors in Particle 86 | Tracking Microrheology," Biophysical Journal 88(1) 623-638. 87 | 88 | Instead of measuring the peak intensity of the feature and calculating the 89 | total intensity (assuming a certain feature shape), the total intensity 90 | (=mass) is summed directly from the data. This quantity is more robust 91 | to noise and gives a better estimate of the static error. 92 | 93 | In addition, the sum of squared coordinates is calculated by taking the 94 | discrete sum instead of taking the continuous limit and integrating. This 95 | makes it possible to generalize this analysis to anisotropic masks. 96 | """ 97 | if hasattr(diameter, '__iter__'): 98 | ndim = len(diameter) 99 | noise_size = validate_tuple(noise_size, ndim)[::-1] 100 | diameter = validate_tuple(diameter, ndim)[::-1] 101 | radius = tuple([d // 2 for d in diameter]) 102 | 103 | if np.isscalar(noise): 104 | ep = _static_error(features['mass'], noise, radius, noise_size) 105 | else: 106 | assert 'noise' in noise 107 | temp = features.join(noise, on='frame') 108 | ep = _static_error(temp['mass'], temp['noise'], radius, noise_size) 109 | 110 | ep[ep < 0] = np.nan 111 | 112 | if ep.ndim == 1: 113 | ep.name = 'ep' 114 | elif ep.ndim == 2: 115 | if ndim < 4: 116 | coord_columns = ['ep_x', 'ep_y', 'ep_z'][:ndim] 117 | else: 118 | coord_columns = map(lambda i: 'ep_x' + str(i), range(ndim)) 119 | ep = DataFrame(ep, columns=coord_columns, index=features.index) 120 | return ep 121 | --------------------------------------------------------------------------------