├── .github └── workflows │ └── python-publish.yml ├── .gitignore ├── DOWNLOAD_WEIGHTS.md ├── LICENSE ├── README.md ├── docs ├── .buildinfo ├── .doctrees │ ├── environment.pickle │ ├── includeme │ │ ├── apidocuments.doctree │ │ └── readmefile.doctree │ └── index.doctree ├── .nojekyll ├── Makefile ├── README.md ├── _images │ ├── cars.gif │ └── cows.gif ├── _modules │ ├── index.html │ └── motrackers │ │ ├── centroid_kf_tracker.html │ │ ├── detectors │ │ ├── caffe.html │ │ ├── detector.html │ │ ├── tf.html │ │ └── yolo.html │ │ ├── iou_tracker.html │ │ ├── kalman_tracker.html │ │ ├── sort_tracker.html │ │ ├── track.html │ │ ├── tracker.html │ │ └── utils │ │ ├── filechooser_utils.html │ │ └── misc.html ├── _sources │ ├── includeme │ │ ├── apidocuments.rst.txt │ │ └── readmefile.rst.txt │ └── index.rst.txt ├── _static │ ├── _sphinx_javascript_frameworks_compat.js │ ├── basic.css │ ├── css │ │ ├── badge_only.css │ │ ├── fonts │ │ │ ├── Roboto-Slab-Bold.woff │ │ │ ├── Roboto-Slab-Bold.woff2 │ │ │ ├── Roboto-Slab-Regular.woff │ │ │ ├── Roboto-Slab-Regular.woff2 │ │ │ ├── fontawesome-webfont.eot │ │ │ ├── fontawesome-webfont.svg │ │ │ ├── fontawesome-webfont.ttf │ │ │ ├── fontawesome-webfont.woff │ │ │ ├── fontawesome-webfont.woff2 │ │ │ ├── lato-bold-italic.woff │ │ │ ├── lato-bold-italic.woff2 │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bold.woff2 │ │ │ ├── lato-normal-italic.woff │ │ │ ├── lato-normal-italic.woff2 │ │ │ ├── lato-normal.woff │ │ │ └── lato-normal.woff2 │ │ └── theme.css │ ├── doctools.js │ ├── documentation_options.js │ ├── file.png │ ├── jquery-3.6.0.js │ ├── jquery.js │ ├── js │ │ ├── badge_only.js │ │ ├── html5shiv-printshiv.min.js │ │ ├── html5shiv.min.js │ │ └── theme.js │ ├── language_data.js │ ├── minus.png │ ├── plus.png │ ├── pygments.css │ ├── searchtools.js │ ├── sphinx_highlight.js │ ├── underscore-1.13.1.js │ └── underscore.js ├── genindex.html ├── includeme │ ├── apidocuments.html │ └── readmefile.html ├── index.html ├── make.bat ├── objects.inv ├── readme │ ├── CODE_OF_CONDUCT.md │ ├── CONTRIBUTING.md │ └── REFERENCES.md ├── search.html ├── searchindex.js └── source │ ├── conf.py │ ├── includeme │ ├── apidocuments.rst │ └── readmefile.rst │ └── index.rst ├── examples ├── assets │ ├── cars.gif │ └── cows.gif ├── example_notebooks │ ├── detector_Caffe_SSDMobileNet.ipynb │ ├── detector_TF_SSDMobileNetV2.ipynb │ ├── detector_YOLOv3.ipynb │ ├── mot_Caffe_SSDMobileNet.ipynb │ ├── mot_TF_SSDMobileNetV2.ipynb │ └── mot_YOLOv3.ipynb ├── example_scripts │ ├── detector_Caffe_SSDMobileNet.py │ ├── detector_TF_SSDMobileNetV2.py │ ├── detector_YOLOv3.py │ ├── mot_Caffe_SSDMobileNet.py │ ├── mot_TF_SSDMobileNet.py │ ├── mot_YOLOv3.py │ └── readme.md ├── motmetrics_eval │ ├── data │ │ ├── TUD-Campus │ │ │ ├── gt.txt │ │ │ └── test.txt │ │ ├── TUD-Stadtmitte │ │ │ ├── gt.txt │ │ │ └── test.txt │ │ └── iotest │ │ │ ├── detrac.mat │ │ │ ├── detrac.xml │ │ │ ├── motchallenge.txt │ │ │ └── vatic.txt │ ├── motmeterics.py │ └── readme.md ├── pretrained_models │ ├── caffemodel_weights │ │ ├── get_caffemodel.sh │ │ └── ssd_mobilenet_caffe_names.json │ ├── tensorflow_weights │ │ ├── get_ssd_model.sh │ │ └── ssd_mobilenet_v2_coco_names.json │ └── yolo_weights │ │ ├── coco_names.json │ │ └── get_yolo.sh ├── readme.md └── video_data │ └── readme.md ├── motrackers ├── __init__.py ├── centroid_kf_tracker.py ├── detectors │ ├── __init__.py │ ├── caffe.py │ ├── detector.py │ ├── tf.py │ └── yolo.py ├── iou_tracker.py ├── kalman_tracker.py ├── sort_tracker.py ├── track.py ├── tracker.py ├── tracker_img.py └── utils │ ├── __init__.py │ ├── filechooser_utils.py │ └── misc.py └── pyproject.toml /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | name: Upload motrackers to PyPI 2 | 3 | # Making this workflow only run when a new tag is added to the GitHub repo. 4 | # As done here: 5 | # https://stackoverflow.com/questions/18216991/create-a-tag-in-a-github-repository 6 | on: 7 | push: 8 | tags: 9 | - "v*" 10 | 11 | permissions: 12 | contents: read 13 | 14 | jobs: 15 | build-n-publish: 16 | name: Build and publish to PyPI 17 | runs-on: ubuntu-latest 18 | 19 | steps: 20 | - name: Checkout source 21 | uses: actions/checkout@v3 22 | 23 | - name: Set up Python 24 | uses: actions/setup-python@v3 25 | with: 26 | python-version: '3.x' 27 | 28 | - name: Build source and wheel distributions 29 | run: | 30 | sudo apt-get update 31 | pip install --upgrade pip 32 | python -m pip install --upgrade build twine 33 | python -m build 34 | twine check --strict dist/* 35 | 36 | - name: Publish distribution to PyPI 37 | uses: pypa/gh-action-pypi-publish@release/v1 38 | with: 39 | user: __token__ 40 | password: ${{ secrets.PYPI_API_TOKEN }} 41 | 42 | # https://github.com/actions/first-interaction/issues/10#issuecomment-1475121828 43 | - name: Create GitHub Release 44 | id: create_release 45 | uses: actions/create-release@v1 46 | env: 47 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token 48 | with: 49 | tag_name: ${{ github.ref }} 50 | release_name: ${{ github.ref }} 51 | draft: false 52 | prerelease: false 53 | 54 | - name: Get Asset name 55 | run: | 56 | export PKG=$(ls dist/ | grep tar) 57 | set -- $PKG 58 | echo "name=$1" >> $GITHUB_ENV 59 | - name: Upload Release Asset (sdist) to GitHub 60 | id: upload-release-asset 61 | uses: actions/upload-release-asset@v1 62 | env: 63 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 64 | with: 65 | upload_url: ${{ steps.create_release.outputs.upload_url }} 66 | asset_path: dist/${{ env.name }} 67 | asset_name: ${{ env.name }} 68 | asset_content_type: application/zip 69 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints/ 2 | examples/video_data/ 3 | .idea/ 4 | examples/output.avi 5 | examples/pretrained_models/caffemodel_weights/ 6 | examples/pretrained_models/tensorflow_weights/ 7 | examples/pretrained_models/yolo_weights/ 8 | 9 | # Byte-compiled / optimized / DLL files 10 | __pycache__/ 11 | *.py[cod] 12 | *$py.class 13 | 14 | # C extensions 15 | *.so 16 | 17 | # Distribution / packaging 18 | .Python 19 | build/ 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | wheels/ 31 | pip-wheel-metadata/ 32 | share/python-wheels/ 33 | *.egg-info/ 34 | .installed.cfg 35 | *.egg 36 | MANIFEST 37 | 38 | # PyInstaller 39 | # Usually these files are written by a python script from a template 40 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 41 | *.manifest 42 | *.spec 43 | 44 | # Installer logs 45 | pip-log.txt 46 | pip-delete-this-directory.txt 47 | 48 | # Unit test / coverage reports 49 | htmlcov/ 50 | .tox/ 51 | .nox/ 52 | .coverage 53 | .coverage.* 54 | .cache 55 | nosetests.xml 56 | coverage.xml 57 | *.cover 58 | *.py,cover 59 | .hypothesis/ 60 | .pytest_cache/ 61 | 62 | # Translations 63 | *.mo 64 | *.pot 65 | 66 | # Django stuff: 67 | *.log 68 | local_settings.py 69 | db.sqlite3 70 | db.sqlite3-journal 71 | 72 | # Flask stuff: 73 | instance/ 74 | .webassets-cache 75 | 76 | # Scrapy stuff: 77 | .scrapy 78 | 79 | # Sphinx documentation 80 | docs/_build/ 81 | 82 | # PyBuilder 83 | target/ 84 | 85 | # Jupyter Notebook 86 | .ipynb_checkpoints 87 | 88 | # IPython 89 | profile_default/ 90 | ipython_config.py 91 | 92 | # pyenv 93 | .python-version 94 | 95 | # pipenv 96 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 97 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 98 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 99 | # install all needed dependencies. 100 | #Pipfile.lock 101 | 102 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 103 | __pypackages__/ 104 | 105 | # Celery stuff 106 | celerybeat-schedule 107 | celerybeat.pid 108 | 109 | # SageMath parsed files 110 | *.sage.py 111 | 112 | # Environments 113 | .env 114 | .venv 115 | env/ 116 | venv/ 117 | ENV/ 118 | env.bak/ 119 | venv.bak/ 120 | 121 | # Spyder project settings 122 | .spyderproject 123 | .spyproject 124 | 125 | # Rope project settings 126 | .ropeproject 127 | 128 | # mkdocs documentation 129 | /site 130 | 131 | # mypy 132 | .mypy_cache/ 133 | .dmypy.json 134 | dmypy.json 135 | 136 | # Pyre type checker 137 | .pyre/ -------------------------------------------------------------------------------- /DOWNLOAD_WEIGHTS.md: -------------------------------------------------------------------------------- 1 | ## Download pretrained neural-network weights. 2 | [[Webpage](https://adipandas.github.io/multi-object-tracker/)] 3 | [[GitHub](https://github.com/adipandas/multi-object-tracker)] 4 | 5 | ##### YOLOv3 6 | 7 | ``` 8 | cd multi-object-tracker 9 | cd ./examples/pretrained_models/yolo_weights 10 | sudo chmod +x ./get_yolo.sh 11 | ./get_yolo.sh 12 | ``` 13 | 14 | ##### TensorFlow - MobileNetSSDv2 15 | ``` 16 | cd multi-object-tracker 17 | cd ./pretrained_models/tensorflow_weights 18 | sudo chmod +x ./get_ssd_model.sh 19 | ./get_ssd_model.sh 20 | ``` 21 | 22 | ##### Caffemodel - MobileNetSSD 23 | ``` 24 | cd multi-object-tracker 25 | cd ./pretrained_models/caffemodel_weights 26 | sudo chmod +x ./get_caffemodel.sh 27 | ./get_caffemodel.sh 28 | ``` 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Aditya M. Deshpande 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [cars-yolo-output]: examples/assets/cars.gif "Sample Output with YOLO" 2 | [cows-tf-ssd-output]: examples/assets/cows.gif "Sample Output with SSD" 3 | 4 | # Multi-object trackers in Python 5 | Easy to use implementation of various multi-object tracking algorithms. 6 | 7 | [![DOI](https://zenodo.org/badge/148338463.svg)](https://zenodo.org/badge/latestdoi/148338463) 8 | 9 | 10 | 11 | `YOLOv3 + CentroidTracker` | `TF-MobileNetSSD + CentroidTracker` 12 | :-------------------------:|:-------------------------: 13 | ![Cars with YOLO][cars-yolo-output] | ![Cows with tf-SSD][cows-tf-ssd-output] 14 | Video source: [link](https://flic.kr/p/L6qyxj) | Video source: [link](https://flic.kr/p/26WeEWy) 15 | 16 | 17 | ## Available Multi Object Trackers 18 | 19 | - CentroidTracker 20 | - IOUTracker 21 | - CentroidKF_Tracker 22 | - SORT 23 | 24 | 25 | ## Available OpenCV-based object detectors: 26 | 27 | - detector.TF_SSDMobileNetV2 28 | - detector.Caffe_SSDMobileNet 29 | - detector.YOLOv3 30 | 31 | ## Installation 32 | 33 | Pip install for OpenCV (version 3.4.3 or later) is available [here](https://pypi.org/project/opencv-python/) and can be done with the following command: 34 | ``` 35 | pip install motrackers 36 | ``` 37 | 38 | Additionally, you can install the package through GitHub instead: 39 | ``` 40 | git clone https://github.com/adipandas/multi-object-tracker 41 | cd multi-object-tracker 42 | pip install [-e] . 43 | ``` 44 | 45 | **Note - for using neural network models with GPU** 46 | For using the opencv `dnn`-based object detection modules provided in this repository with GPU, you may have to compile a CUDA enabled version of OpenCV from source. 47 | * To build opencv from source, refer the following links: 48 | [[link-1](https://docs.opencv.org/master/df/d65/tutorial_table_of_content_introduction.html)], 49 | [[link-2](https://www.pyimagesearch.com/2020/02/03/how-to-use-opencvs-dnn-module-with-nvidia-gpus-cuda-and-cudnn/)] 50 | 51 | ## How to use?: Examples 52 | 53 | The interface for each tracker is simple and similar. Please refer the example template below. 54 | 55 | ``` 56 | from motrackers import CentroidTracker # or IOUTracker, CentroidKF_Tracker, SORT 57 | input_data = ... 58 | detector = ... 59 | tracker = CentroidTracker(...) # or IOUTracker(...), CentroidKF_Tracker(...), SORT(...) 60 | while True: 61 | done, image = 62 | if done: 63 | break 64 | detection_bboxes, detection_confidences, detection_class_ids = detector.detect(image) 65 | # NOTE: 66 | # * `detection_bboxes` are numpy.ndarray of shape (n, 4) with each row containing (bb_left, bb_top, bb_width, bb_height) 67 | # * `detection_confidences` are numpy.ndarray of shape (n,); 68 | # * `detection_class_ids` are numpy.ndarray of shape (n,). 69 | output_tracks = tracker.update(detection_bboxes, detection_confidences, detection_class_ids) 70 | # `output_tracks` is a list with each element containing tuple of 71 | # (, , , , , , , , , ) 72 | for track in output_tracks: 73 | frame, id, bb_left, bb_top, bb_width, bb_height, confidence, x, y, z = track 74 | assert len(track) == 10 75 | print(track) 76 | ``` 77 | 78 | Please refer [examples](https://github.com/adipandas/multi-object-tracker/tree/master/examples) folder of this repository for more details. You can clone and run the examples. 79 | 80 | ## Pretrained object detection models 81 | 82 | You will have to download the pretrained weights for the neural-network models. 83 | The shell scripts for downloading these are provided [here](https://github.com/adipandas/multi-object-tracker/tree/master/examples/pretrained_models) below respective folders. 84 | Please refer [DOWNLOAD_WEIGHTS.md](https://github.com/adipandas/multi-object-tracker/blob/master/DOWNLOAD_WEIGHTS.md) for more details. 85 | 86 | ### Notes 87 | * There are some variations in implementations as compared to what appeared in papers of `SORT` and `IoU Tracker`. 88 | * In case you find any bugs in the algorithm, I will be happy to accept your pull request or you can create an issue to point it out. 89 | 90 | ## References, Credits and Contributions 91 | Please see [REFERENCES.md](https://github.com/adipandas/multi-object-tracker/blob/master/docs/readme/REFERENCES.md) and [CONTRIBUTING.md](https://github.com/adipandas/multi-object-tracker/blob/master/docs/readme/CONTRIBUTING.md). 92 | 93 | ## Citation 94 | 95 | If you use this repository in your work, please consider citing it with: 96 | ``` 97 | @misc{multiobjtracker_amd2018, 98 | author = {Deshpande, Aditya M.}, 99 | title = {Multi-object trackers in Python}, 100 | year = {2020}, 101 | publisher = {GitHub}, 102 | journal = {GitHub repository}, 103 | howpublished = {\url{https://github.com/adipandas/multi-object-tracker}}, 104 | } 105 | ``` 106 | 107 | -------------------------------------------------------------------------------- /docs/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: cc69624f44c2d9a8c8c17ada5937586c 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /docs/.doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/.doctrees/environment.pickle -------------------------------------------------------------------------------- /docs/.doctrees/includeme/apidocuments.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/.doctrees/includeme/apidocuments.doctree -------------------------------------------------------------------------------- /docs/.doctrees/includeme/readmefile.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/.doctrees/includeme/readmefile.doctree -------------------------------------------------------------------------------- /docs/.doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/.doctrees/index.doctree -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/.nojekyll -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # DOCs for Multi-object Trackers in Python 2 | 3 | Install the following to build this documentation: 4 | 1. Sphinx - [[link](https://www.sphinx-doc.org/)] 5 | 2. sphinx-googleanalytics - [[link](https://github.com/sphinx-contrib/googleanalytics)] 6 | 7 | ## How to build 8 | ``` 9 | cd multi-object-tracker/docs 10 | sphinx-build -b html source . 11 | ``` 12 | -------------------------------------------------------------------------------- /docs/_images/cars.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_images/cars.gif -------------------------------------------------------------------------------- /docs/_images/cows.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_images/cows.gif -------------------------------------------------------------------------------- /docs/_modules/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Overview: module code — Multi-object trackers in Python 1.0.0 documentation 7 | 8 | 9 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 |
26 | 58 | 59 |
63 | 64 |
65 |
66 |
67 |
    68 |
  • »
  • 69 |
  • Overview: module code
  • 70 |
  • 71 |
  • 72 |
73 |
74 |
75 | 95 |
96 | 97 |
98 | 99 |
100 |

© Copyright 2021, Aditya M. Deshpande.

101 |
102 | 103 | Built with Sphinx using a 104 | theme 105 | provided by Read the Docs. 106 | 107 | 108 |
109 |
110 |
111 |
112 |
113 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /docs/_sources/includeme/apidocuments.rst.txt: -------------------------------------------------------------------------------- 1 | .. reference_docs: 2 | 3 | Tracker 4 | ======= 5 | 6 | .. autoclass:: motrackers.tracker.Tracker 7 | :members: 8 | 9 | SORT 10 | ==== 11 | 12 | .. autofunction:: motrackers.sort_tracker.assign_tracks2detection_iou 13 | 14 | .. autoclass:: motrackers.sort_tracker.SORT 15 | :members: 16 | 17 | IOU Tracker 18 | =========== 19 | 20 | .. autoclass:: motrackers.iou_tracker.IOUTracker 21 | :members: 22 | 23 | Kalman Filter based Centroid Tracker 24 | ==================================== 25 | 26 | .. autofunction:: motrackers.centroid_kf_tracker.assign_tracks2detection_centroid_distances 27 | 28 | .. autoclass:: motrackers.centroid_kf_tracker.CentroidKF_Tracker 29 | :members: 30 | 31 | Tracks 32 | ====== 33 | 34 | .. autoclass:: motrackers.track.Track 35 | :members: 36 | 37 | .. autoclass:: motrackers.track.KFTrackSORT 38 | :members: 39 | 40 | .. autoclass:: motrackers.track.KFTrack4DSORT 41 | :members: 42 | 43 | .. autoclass:: motrackers.track.KFTrackCentroid 44 | :members: 45 | 46 | Kalman Filters 47 | ============== 48 | 49 | .. autoclass:: motrackers.kalman_tracker.KalmanFilter 50 | :members: 51 | 52 | .. autoclass:: motrackers.kalman_tracker.KFTrackerConstantAcceleration 53 | :members: 54 | 55 | .. autoclass:: motrackers.kalman_tracker.KFTracker1D 56 | :members: 57 | 58 | .. autoclass:: motrackers.kalman_tracker.KFTracker2D 59 | :members: 60 | 61 | .. autoclass:: motrackers.kalman_tracker.KFTracker4D 62 | :members: 63 | 64 | .. autoclass:: motrackers.kalman_tracker.KFTrackerSORT 65 | :members: 66 | 67 | Object Detection 68 | ================ 69 | 70 | .. autoclass:: motrackers.detectors.detector.Detector 71 | :members: 72 | 73 | .. autoclass:: motrackers.detectors.caffe.Caffe_SSDMobileNet 74 | :members: 75 | 76 | .. autoclass:: motrackers.detectors.tf.TF_SSDMobileNetV2 77 | :members: 78 | 79 | .. autoclass:: motrackers.detectors.yolo.YOLOv3 80 | :members: 81 | 82 | Utilities 83 | ========= 84 | 85 | .. autofunction:: motrackers.utils.misc.get_centroid 86 | 87 | .. autofunction:: motrackers.utils.misc.iou 88 | 89 | .. autofunction:: motrackers.utils.misc.iou_xywh 90 | 91 | .. autofunction:: motrackers.utils.misc.xyxy2xywh 92 | 93 | .. autofunction:: motrackers.utils.misc.xywh2xyxy 94 | 95 | .. autofunction:: motrackers.utils.misc.midwh2xywh 96 | 97 | .. autofunction:: motrackers.utils.misc.intersection_complement_indices 98 | 99 | .. autofunction:: motrackers.utils.misc.nms 100 | 101 | .. autofunction:: motrackers.utils.misc.draw_tracks 102 | 103 | .. autofunction:: motrackers.utils.misc.load_labelsjson 104 | 105 | .. autofunction:: motrackers.utils.misc.dict2jsonfile 106 | 107 | .. autofunction:: motrackers.utils.filechooser_utils.create_filechooser 108 | 109 | .. autofunction:: motrackers.utils.filechooser_utils.select_caffemodel_prototxt 110 | 111 | .. autofunction:: motrackers.utils.filechooser_utils.select_caffemodel_weights 112 | 113 | .. autofunction:: motrackers.utils.filechooser_utils.select_caffemodel 114 | 115 | .. autofunction:: motrackers.utils.filechooser_utils.select_videofile 116 | 117 | .. autofunction:: motrackers.utils.filechooser_utils.select_yolo_weights 118 | 119 | .. autofunction:: motrackers.utils.filechooser_utils.select_coco_labels 120 | 121 | .. autofunction:: motrackers.utils.filechooser_utils.select_yolo_config 122 | 123 | .. autofunction:: motrackers.utils.filechooser_utils.select_yolo_model 124 | 125 | .. autofunction:: motrackers.utils.filechooser_utils.select_pbtxt 126 | 127 | .. autofunction:: motrackers.utils.filechooser_utils.select_tfmobilenet_weights 128 | 129 | .. autofunction:: motrackers.utils.filechooser_utils.select_tfmobilenet 130 | 131 | .. mdinclude:: ./../../../DOWNLOAD_WEIGHTS.md 132 | 133 | .. mdinclude:: ./../../readme/REFERENCES.md 134 | 135 | .. mdinclude:: ./../../readme/CODE_OF_CONDUCT.md 136 | -------------------------------------------------------------------------------- /docs/_sources/includeme/readmefile.rst.txt: -------------------------------------------------------------------------------- 1 | .. mdinclude:: ./../../../README.md 2 | 3 | Example: `TF-MobileNetSSD + CentroidTracker` 4 | ============================================ 5 | 6 | .. image:: ./../../../examples/assets/cows.gif 7 | :alt: Cows with tf-SSD 8 | :target: https://flic.kr/p/26WeEWy 9 | :class: with-shadow 10 | :width: 600px 11 | 12 | Example: `YOLOv3 + CentroidTracker` 13 | =================================== 14 | 15 | .. image:: ./../../../examples/assets/cars.gif 16 | :alt: Cars with YOLO 17 | :target: https://flic.kr/p/L6qyxj 18 | :class: with-shadow 19 | :width: 600px -------------------------------------------------------------------------------- /docs/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. Multi-object trackers in Python documentation master file, created by 2 | sphinx-quickstart on Sat Feb 27 09:59:32 2021. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Multi-object trackers in Python's documentation! 7 | =========================================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | includeme/readmefile.rst 13 | includeme/apidocuments.rst 14 | 15 | 16 | Indices and tables 17 | ================== 18 | 19 | * :ref:`genindex` 20 | 21 | -------------------------------------------------------------------------------- /docs/_static/_sphinx_javascript_frameworks_compat.js: -------------------------------------------------------------------------------- 1 | /* 2 | * _sphinx_javascript_frameworks_compat.js 3 | * ~~~~~~~~~~ 4 | * 5 | * Compatability shim for jQuery and underscores.js. 6 | * 7 | * WILL BE REMOVED IN Sphinx 6.0 8 | * xref RemovedInSphinx60Warning 9 | * 10 | */ 11 | 12 | /** 13 | * select a different prefix for underscore 14 | */ 15 | $u = _.noConflict(); 16 | 17 | 18 | /** 19 | * small helper function to urldecode strings 20 | * 21 | * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL 22 | */ 23 | jQuery.urldecode = function(x) { 24 | if (!x) { 25 | return x 26 | } 27 | return decodeURIComponent(x.replace(/\+/g, ' ')); 28 | }; 29 | 30 | /** 31 | * small helper function to urlencode strings 32 | */ 33 | jQuery.urlencode = encodeURIComponent; 34 | 35 | /** 36 | * This function returns the parsed url parameters of the 37 | * current request. Multiple values per key are supported, 38 | * it will always return arrays of strings for the value parts. 39 | */ 40 | jQuery.getQueryParameters = function(s) { 41 | if (typeof s === 'undefined') 42 | s = document.location.search; 43 | var parts = s.substr(s.indexOf('?') + 1).split('&'); 44 | var result = {}; 45 | for (var i = 0; i < parts.length; i++) { 46 | var tmp = parts[i].split('=', 2); 47 | var key = jQuery.urldecode(tmp[0]); 48 | var value = jQuery.urldecode(tmp[1]); 49 | if (key in result) 50 | result[key].push(value); 51 | else 52 | result[key] = [value]; 53 | } 54 | return result; 55 | }; 56 | 57 | /** 58 | * highlight a given string on a jquery object by wrapping it in 59 | * span elements with the given class name. 60 | */ 61 | jQuery.fn.highlightText = function(text, className) { 62 | function highlight(node, addItems) { 63 | if (node.nodeType === 3) { 64 | var val = node.nodeValue; 65 | var pos = val.toLowerCase().indexOf(text); 66 | if (pos >= 0 && 67 | !jQuery(node.parentNode).hasClass(className) && 68 | !jQuery(node.parentNode).hasClass("nohighlight")) { 69 | var span; 70 | var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); 71 | if (isInSVG) { 72 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 73 | } else { 74 | span = document.createElement("span"); 75 | span.className = className; 76 | } 77 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 78 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 79 | document.createTextNode(val.substr(pos + text.length)), 80 | node.nextSibling)); 81 | node.nodeValue = val.substr(0, pos); 82 | if (isInSVG) { 83 | var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); 84 | var bbox = node.parentElement.getBBox(); 85 | rect.x.baseVal.value = bbox.x; 86 | rect.y.baseVal.value = bbox.y; 87 | rect.width.baseVal.value = bbox.width; 88 | rect.height.baseVal.value = bbox.height; 89 | rect.setAttribute('class', className); 90 | addItems.push({ 91 | "parent": node.parentNode, 92 | "target": rect}); 93 | } 94 | } 95 | } 96 | else if (!jQuery(node).is("button, select, textarea")) { 97 | jQuery.each(node.childNodes, function() { 98 | highlight(this, addItems); 99 | }); 100 | } 101 | } 102 | var addItems = []; 103 | var result = this.each(function() { 104 | highlight(this, addItems); 105 | }); 106 | for (var i = 0; i < addItems.length; ++i) { 107 | jQuery(addItems[i].parent).before(addItems[i].target); 108 | } 109 | return result; 110 | }; 111 | 112 | /* 113 | * backward compatibility for jQuery.browser 114 | * This will be supported until firefox bug is fixed. 115 | */ 116 | if (!jQuery.browser) { 117 | jQuery.uaMatch = function(ua) { 118 | ua = ua.toLowerCase(); 119 | 120 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || 121 | /(webkit)[ \/]([\w.]+)/.exec(ua) || 122 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || 123 | /(msie) ([\w.]+)/.exec(ua) || 124 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || 125 | []; 126 | 127 | return { 128 | browser: match[ 1 ] || "", 129 | version: match[ 2 ] || "0" 130 | }; 131 | }; 132 | jQuery.browser = {}; 133 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; 134 | } 135 | -------------------------------------------------------------------------------- /docs/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/Roboto-Slab-Bold.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/Roboto-Slab-Bold.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/Roboto-Slab-Regular.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/Roboto-Slab-Regular.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/lato-bold-italic.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/lato-bold-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/lato-bold.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/lato-normal-italic.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/lato-normal-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/lato-normal.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/css/fonts/lato-normal.woff2 -------------------------------------------------------------------------------- /docs/_static/doctools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * doctools.js 3 | * ~~~~~~~~~~~ 4 | * 5 | * Base JavaScript utilities for all Sphinx HTML documentation. 6 | * 7 | * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | "use strict"; 12 | 13 | const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ 14 | "TEXTAREA", 15 | "INPUT", 16 | "SELECT", 17 | "BUTTON", 18 | ]); 19 | 20 | const _ready = (callback) => { 21 | if (document.readyState !== "loading") { 22 | callback(); 23 | } else { 24 | document.addEventListener("DOMContentLoaded", callback); 25 | } 26 | }; 27 | 28 | /** 29 | * Small JavaScript module for the documentation. 30 | */ 31 | const Documentation = { 32 | init: () => { 33 | Documentation.initDomainIndexTable(); 34 | Documentation.initOnKeyListeners(); 35 | }, 36 | 37 | /** 38 | * i18n support 39 | */ 40 | TRANSLATIONS: {}, 41 | PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), 42 | LOCALE: "unknown", 43 | 44 | // gettext and ngettext don't access this so that the functions 45 | // can safely bound to a different name (_ = Documentation.gettext) 46 | gettext: (string) => { 47 | const translated = Documentation.TRANSLATIONS[string]; 48 | switch (typeof translated) { 49 | case "undefined": 50 | return string; // no translation 51 | case "string": 52 | return translated; // translation exists 53 | default: 54 | return translated[0]; // (singular, plural) translation tuple exists 55 | } 56 | }, 57 | 58 | ngettext: (singular, plural, n) => { 59 | const translated = Documentation.TRANSLATIONS[singular]; 60 | if (typeof translated !== "undefined") 61 | return translated[Documentation.PLURAL_EXPR(n)]; 62 | return n === 1 ? singular : plural; 63 | }, 64 | 65 | addTranslations: (catalog) => { 66 | Object.assign(Documentation.TRANSLATIONS, catalog.messages); 67 | Documentation.PLURAL_EXPR = new Function( 68 | "n", 69 | `return (${catalog.plural_expr})` 70 | ); 71 | Documentation.LOCALE = catalog.locale; 72 | }, 73 | 74 | /** 75 | * helper function to focus on search bar 76 | */ 77 | focusSearchBar: () => { 78 | document.querySelectorAll("input[name=q]")[0]?.focus(); 79 | }, 80 | 81 | /** 82 | * Initialise the domain index toggle buttons 83 | */ 84 | initDomainIndexTable: () => { 85 | const toggler = (el) => { 86 | const idNumber = el.id.substr(7); 87 | const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); 88 | if (el.src.substr(-9) === "minus.png") { 89 | el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; 90 | toggledRows.forEach((el) => (el.style.display = "none")); 91 | } else { 92 | el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; 93 | toggledRows.forEach((el) => (el.style.display = "")); 94 | } 95 | }; 96 | 97 | const togglerElements = document.querySelectorAll("img.toggler"); 98 | togglerElements.forEach((el) => 99 | el.addEventListener("click", (event) => toggler(event.currentTarget)) 100 | ); 101 | togglerElements.forEach((el) => (el.style.display = "")); 102 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); 103 | }, 104 | 105 | initOnKeyListeners: () => { 106 | // only install a listener if it is really needed 107 | if ( 108 | !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && 109 | !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS 110 | ) 111 | return; 112 | 113 | document.addEventListener("keydown", (event) => { 114 | // bail for input elements 115 | if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; 116 | // bail with special keys 117 | if (event.altKey || event.ctrlKey || event.metaKey) return; 118 | 119 | if (!event.shiftKey) { 120 | switch (event.key) { 121 | case "ArrowLeft": 122 | if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; 123 | 124 | const prevLink = document.querySelector('link[rel="prev"]'); 125 | if (prevLink && prevLink.href) { 126 | window.location.href = prevLink.href; 127 | event.preventDefault(); 128 | } 129 | break; 130 | case "ArrowRight": 131 | if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; 132 | 133 | const nextLink = document.querySelector('link[rel="next"]'); 134 | if (nextLink && nextLink.href) { 135 | window.location.href = nextLink.href; 136 | event.preventDefault(); 137 | } 138 | break; 139 | } 140 | } 141 | 142 | // some keyboard layouts may need Shift to get / 143 | switch (event.key) { 144 | case "/": 145 | if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; 146 | Documentation.focusSearchBar(); 147 | event.preventDefault(); 148 | } 149 | }); 150 | }, 151 | }; 152 | 153 | // quick alias for translations 154 | const _ = Documentation.gettext; 155 | 156 | _ready(Documentation.init); 157 | -------------------------------------------------------------------------------- /docs/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '1.0.0', 4 | LANGUAGE: 'en', 5 | COLLAPSE_INDEX: false, 6 | BUILDER: 'html', 7 | FILE_SUFFIX: '.html', 8 | LINK_SUFFIX: '.html', 9 | HAS_SOURCE: true, 10 | SOURCELINK_SUFFIX: '.txt', 11 | NAVIGATION_WITH_KEYS: false, 12 | SHOW_SEARCH_SUMMARY: true, 13 | ENABLE_SEARCH_SHORTCUTS: true, 14 | }; -------------------------------------------------------------------------------- /docs/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/file.png -------------------------------------------------------------------------------- /docs/_static/js/badge_only.js: -------------------------------------------------------------------------------- 1 | !function(e){var t={};function r(n){if(t[n])return t[n].exports;var o=t[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=4)}({4:function(e,t,r){}}); -------------------------------------------------------------------------------- /docs/_static/js/html5shiv-printshiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3-pre | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /docs/_static/js/html5shiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /docs/_static/js/theme.js: -------------------------------------------------------------------------------- 1 | !function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t0 63 | var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 64 | var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 65 | var s_v = "^(" + C + ")?" + v; // vowel in stem 66 | 67 | this.stemWord = function (w) { 68 | var stem; 69 | var suffix; 70 | var firstch; 71 | var origword = w; 72 | 73 | if (w.length < 3) 74 | return w; 75 | 76 | var re; 77 | var re2; 78 | var re3; 79 | var re4; 80 | 81 | firstch = w.substr(0,1); 82 | if (firstch == "y") 83 | w = firstch.toUpperCase() + w.substr(1); 84 | 85 | // Step 1a 86 | re = /^(.+?)(ss|i)es$/; 87 | re2 = /^(.+?)([^s])s$/; 88 | 89 | if (re.test(w)) 90 | w = w.replace(re,"$1$2"); 91 | else if (re2.test(w)) 92 | w = w.replace(re2,"$1$2"); 93 | 94 | // Step 1b 95 | re = /^(.+?)eed$/; 96 | re2 = /^(.+?)(ed|ing)$/; 97 | if (re.test(w)) { 98 | var fp = re.exec(w); 99 | re = new RegExp(mgr0); 100 | if (re.test(fp[1])) { 101 | re = /.$/; 102 | w = w.replace(re,""); 103 | } 104 | } 105 | else if (re2.test(w)) { 106 | var fp = re2.exec(w); 107 | stem = fp[1]; 108 | re2 = new RegExp(s_v); 109 | if (re2.test(stem)) { 110 | w = stem; 111 | re2 = /(at|bl|iz)$/; 112 | re3 = new RegExp("([^aeiouylsz])\\1$"); 113 | re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 114 | if (re2.test(w)) 115 | w = w + "e"; 116 | else if (re3.test(w)) { 117 | re = /.$/; 118 | w = w.replace(re,""); 119 | } 120 | else if (re4.test(w)) 121 | w = w + "e"; 122 | } 123 | } 124 | 125 | // Step 1c 126 | re = /^(.+?)y$/; 127 | if (re.test(w)) { 128 | var fp = re.exec(w); 129 | stem = fp[1]; 130 | re = new RegExp(s_v); 131 | if (re.test(stem)) 132 | w = stem + "i"; 133 | } 134 | 135 | // Step 2 136 | re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; 137 | if (re.test(w)) { 138 | var fp = re.exec(w); 139 | stem = fp[1]; 140 | suffix = fp[2]; 141 | re = new RegExp(mgr0); 142 | if (re.test(stem)) 143 | w = stem + step2list[suffix]; 144 | } 145 | 146 | // Step 3 147 | re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; 148 | if (re.test(w)) { 149 | var fp = re.exec(w); 150 | stem = fp[1]; 151 | suffix = fp[2]; 152 | re = new RegExp(mgr0); 153 | if (re.test(stem)) 154 | w = stem + step3list[suffix]; 155 | } 156 | 157 | // Step 4 158 | re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; 159 | re2 = /^(.+?)(s|t)(ion)$/; 160 | if (re.test(w)) { 161 | var fp = re.exec(w); 162 | stem = fp[1]; 163 | re = new RegExp(mgr1); 164 | if (re.test(stem)) 165 | w = stem; 166 | } 167 | else if (re2.test(w)) { 168 | var fp = re2.exec(w); 169 | stem = fp[1] + fp[2]; 170 | re2 = new RegExp(mgr1); 171 | if (re2.test(stem)) 172 | w = stem; 173 | } 174 | 175 | // Step 5 176 | re = /^(.+?)e$/; 177 | if (re.test(w)) { 178 | var fp = re.exec(w); 179 | stem = fp[1]; 180 | re = new RegExp(mgr1); 181 | re2 = new RegExp(meq1); 182 | re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 183 | if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) 184 | w = stem; 185 | } 186 | re = /ll$/; 187 | re2 = new RegExp(mgr1); 188 | if (re.test(w) && re2.test(w)) { 189 | re = /.$/; 190 | w = w.replace(re,""); 191 | } 192 | 193 | // and turn initial Y back to y 194 | if (firstch == "y") 195 | w = firstch.toLowerCase() + w.substr(1); 196 | return w; 197 | } 198 | } 199 | 200 | -------------------------------------------------------------------------------- /docs/_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/minus.png -------------------------------------------------------------------------------- /docs/_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/_static/plus.png -------------------------------------------------------------------------------- /docs/_static/pygments.css: -------------------------------------------------------------------------------- 1 | pre { line-height: 125%; } 2 | td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } 3 | span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } 4 | td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } 5 | span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } 6 | .highlight .hll { background-color: #ffffcc } 7 | .highlight { background: #f8f8f8; } 8 | .highlight .c { color: #3D7B7B; font-style: italic } /* Comment */ 9 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 10 | .highlight .k { color: #008000; font-weight: bold } /* Keyword */ 11 | .highlight .o { color: #666666 } /* Operator */ 12 | .highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ 13 | .highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ 14 | .highlight .cp { color: #9C6500 } /* Comment.Preproc */ 15 | .highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ 16 | .highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ 17 | .highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ 18 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 19 | .highlight .ge { font-style: italic } /* Generic.Emph */ 20 | .highlight .gr { color: #E40000 } /* Generic.Error */ 21 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 22 | .highlight .gi { color: #008400 } /* Generic.Inserted */ 23 | .highlight .go { color: #717171 } /* Generic.Output */ 24 | .highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ 25 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 26 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 27 | .highlight .gt { color: #0044DD } /* Generic.Traceback */ 28 | .highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ 29 | .highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ 30 | .highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ 31 | .highlight .kp { color: #008000 } /* Keyword.Pseudo */ 32 | .highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ 33 | .highlight .kt { color: #B00040 } /* Keyword.Type */ 34 | .highlight .m { color: #666666 } /* Literal.Number */ 35 | .highlight .s { color: #BA2121 } /* Literal.String */ 36 | .highlight .na { color: #687822 } /* Name.Attribute */ 37 | .highlight .nb { color: #008000 } /* Name.Builtin */ 38 | .highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */ 39 | .highlight .no { color: #880000 } /* Name.Constant */ 40 | .highlight .nd { color: #AA22FF } /* Name.Decorator */ 41 | .highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */ 42 | .highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ 43 | .highlight .nf { color: #0000FF } /* Name.Function */ 44 | .highlight .nl { color: #767600 } /* Name.Label */ 45 | .highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ 46 | .highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ 47 | .highlight .nv { color: #19177C } /* Name.Variable */ 48 | .highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ 49 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 50 | .highlight .mb { color: #666666 } /* Literal.Number.Bin */ 51 | .highlight .mf { color: #666666 } /* Literal.Number.Float */ 52 | .highlight .mh { color: #666666 } /* Literal.Number.Hex */ 53 | .highlight .mi { color: #666666 } /* Literal.Number.Integer */ 54 | .highlight .mo { color: #666666 } /* Literal.Number.Oct */ 55 | .highlight .sa { color: #BA2121 } /* Literal.String.Affix */ 56 | .highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ 57 | .highlight .sc { color: #BA2121 } /* Literal.String.Char */ 58 | .highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ 59 | .highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ 60 | .highlight .s2 { color: #BA2121 } /* Literal.String.Double */ 61 | .highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ 62 | .highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ 63 | .highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ 64 | .highlight .sx { color: #008000 } /* Literal.String.Other */ 65 | .highlight .sr { color: #A45A77 } /* Literal.String.Regex */ 66 | .highlight .s1 { color: #BA2121 } /* Literal.String.Single */ 67 | .highlight .ss { color: #19177C } /* Literal.String.Symbol */ 68 | .highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ 69 | .highlight .fm { color: #0000FF } /* Name.Function.Magic */ 70 | .highlight .vc { color: #19177C } /* Name.Variable.Class */ 71 | .highlight .vg { color: #19177C } /* Name.Variable.Global */ 72 | .highlight .vi { color: #19177C } /* Name.Variable.Instance */ 73 | .highlight .vm { color: #19177C } /* Name.Variable.Magic */ 74 | .highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /docs/_static/sphinx_highlight.js: -------------------------------------------------------------------------------- 1 | /* Highlighting utilities for Sphinx HTML documentation. */ 2 | "use strict"; 3 | 4 | const SPHINX_HIGHLIGHT_ENABLED = true 5 | 6 | /** 7 | * highlight a given string on a node by wrapping it in 8 | * span elements with the given class name. 9 | */ 10 | const _highlight = (node, addItems, text, className) => { 11 | if (node.nodeType === Node.TEXT_NODE) { 12 | const val = node.nodeValue; 13 | const parent = node.parentNode; 14 | const pos = val.toLowerCase().indexOf(text); 15 | if ( 16 | pos >= 0 && 17 | !parent.classList.contains(className) && 18 | !parent.classList.contains("nohighlight") 19 | ) { 20 | let span; 21 | 22 | const closestNode = parent.closest("body, svg, foreignObject"); 23 | const isInSVG = closestNode && closestNode.matches("svg"); 24 | if (isInSVG) { 25 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 26 | } else { 27 | span = document.createElement("span"); 28 | span.classList.add(className); 29 | } 30 | 31 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 32 | parent.insertBefore( 33 | span, 34 | parent.insertBefore( 35 | document.createTextNode(val.substr(pos + text.length)), 36 | node.nextSibling 37 | ) 38 | ); 39 | node.nodeValue = val.substr(0, pos); 40 | 41 | if (isInSVG) { 42 | const rect = document.createElementNS( 43 | "http://www.w3.org/2000/svg", 44 | "rect" 45 | ); 46 | const bbox = parent.getBBox(); 47 | rect.x.baseVal.value = bbox.x; 48 | rect.y.baseVal.value = bbox.y; 49 | rect.width.baseVal.value = bbox.width; 50 | rect.height.baseVal.value = bbox.height; 51 | rect.setAttribute("class", className); 52 | addItems.push({ parent: parent, target: rect }); 53 | } 54 | } 55 | } else if (node.matches && !node.matches("button, select, textarea")) { 56 | node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); 57 | } 58 | }; 59 | const _highlightText = (thisNode, text, className) => { 60 | let addItems = []; 61 | _highlight(thisNode, addItems, text, className); 62 | addItems.forEach((obj) => 63 | obj.parent.insertAdjacentElement("beforebegin", obj.target) 64 | ); 65 | }; 66 | 67 | /** 68 | * Small JavaScript module for the documentation. 69 | */ 70 | const SphinxHighlight = { 71 | 72 | /** 73 | * highlight the search words provided in localstorage in the text 74 | */ 75 | highlightSearchWords: () => { 76 | if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight 77 | 78 | // get and clear terms from localstorage 79 | const url = new URL(window.location); 80 | const highlight = 81 | localStorage.getItem("sphinx_highlight_terms") 82 | || url.searchParams.get("highlight") 83 | || ""; 84 | localStorage.removeItem("sphinx_highlight_terms") 85 | url.searchParams.delete("highlight"); 86 | window.history.replaceState({}, "", url); 87 | 88 | // get individual terms from highlight string 89 | const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); 90 | if (terms.length === 0) return; // nothing to do 91 | 92 | // There should never be more than one element matching "div.body" 93 | const divBody = document.querySelectorAll("div.body"); 94 | const body = divBody.length ? divBody[0] : document.querySelector("body"); 95 | window.setTimeout(() => { 96 | terms.forEach((term) => _highlightText(body, term, "highlighted")); 97 | }, 10); 98 | 99 | const searchBox = document.getElementById("searchbox"); 100 | if (searchBox === null) return; 101 | searchBox.appendChild( 102 | document 103 | .createRange() 104 | .createContextualFragment( 105 | '" 109 | ) 110 | ); 111 | }, 112 | 113 | /** 114 | * helper function to hide the search marks again 115 | */ 116 | hideSearchWords: () => { 117 | document 118 | .querySelectorAll("#searchbox .highlight-link") 119 | .forEach((el) => el.remove()); 120 | document 121 | .querySelectorAll("span.highlighted") 122 | .forEach((el) => el.classList.remove("highlighted")); 123 | localStorage.removeItem("sphinx_highlight_terms") 124 | }, 125 | 126 | initEscapeListener: () => { 127 | // only install a listener if it is really needed 128 | if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; 129 | 130 | document.addEventListener("keydown", (event) => { 131 | // bail for input elements 132 | if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; 133 | // bail with special keys 134 | if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; 135 | if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { 136 | SphinxHighlight.hideSearchWords(); 137 | event.preventDefault(); 138 | } 139 | }); 140 | }, 141 | }; 142 | 143 | _ready(SphinxHighlight.highlightSearchWords); 144 | _ready(SphinxHighlight.initEscapeListener); 145 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/docs/objects.inv -------------------------------------------------------------------------------- /docs/readme/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at adityadeshpande2010@gmail.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /docs/readme/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | * @adipandas 2 | 3 | * @cnavarrete : Bug fix [issue #22](https://github.com/adipandas/multi-object-tracker/issues/22) 4 | 5 | * @cansik : Pull request [#31](https://github.com/adipandas/multi-object-tracker/pull/31) [#32](https://github.com/adipandas/multi-object-tracker/pull/32) 6 | -------------------------------------------------------------------------------- /docs/readme/REFERENCES.md: -------------------------------------------------------------------------------- 1 | # References and Credits 2 | 3 | [[Webpage](https://adipandas.github.io/multi-object-tracker/)] 4 | [[GitHub](https://github.com/adipandas/multi-object-tracker)] 5 | 6 | This work is based on the following literature: 7 | 8 | 1. Bochinski, E., Eiselein, V., & Sikora, T. (2017, August). High-speed tracking-by-detection without using image information. In 2017 14th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS) (pp. 1-6). IEEE. [[pdf](http://elvera.nue.tu-berlin.de/files/1517Bochinski2017.pdf)] 9 | 2. Bewley, A., Ge, Z., Ott, L., Ramos, F., & Upcroft, B. (2016, September). Simple online and realtime tracking. In 2016 IEEE International Conference on Image Processing (ICIP) (pp. 3464-3468). IEEE. [[arxiv](https://arxiv.org/abs/1602.00763)] 10 | 3. YOLOv3. [[pdf](https://pjreddie.com/media/files/papers/YOLOv3.pdf)][[website](https://pjreddie.com/darknet/yolo/)] 11 | 4. Kalman Filter. [[wiki](https://en.wikipedia.org/wiki/Kalman_filter)] 12 | 5. TensorFlow Object Detection API [[GitHub](https://github.com/tensorflow/models/tree/master/research/object_detection)] 13 | 6. Caffe [[website](https://caffe.berkeleyvision.org/)][[GitHub](https://github.com/BVLC/caffe)] 14 | 7. OpenCV. [[GitHub](https://github.com/opencv/opencv)] [[Website](https://opencv.org/)] -------------------------------------------------------------------------------- /docs/search.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Search — Multi-object trackers in Python 1.0.0 documentation 7 | 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 |
29 | 61 | 62 |
66 | 67 |
68 |
69 |
70 |
    71 |
  • »
  • 72 |
  • Search
  • 73 |
  • 74 |
  • 75 |
76 |
77 |
78 |
79 |
80 | 81 | 88 | 89 | 90 |
91 | 92 |
93 | 94 |
95 |
96 |
97 | 98 |
99 | 100 |
101 |

© Copyright 2021, Aditya M. Deshpande.

102 |
103 | 104 | Built with Sphinx using a 105 | theme 106 | provided by Read the Docs. 107 | 108 | 109 |
110 |
111 |
112 |
113 |
114 | 119 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import os 14 | import sys 15 | sys.path.insert(0, os.path.abspath('./../../motrackers')) 16 | sys.path.insert(0, os.path.abspath('./../../')) 17 | 18 | 19 | # -- Project information ----------------------------------------------------- 20 | 21 | project = 'Multi-object trackers in Python' 22 | copyright = '2021, Aditya M. Deshpande' 23 | author = 'Aditya M. Deshpande' 24 | 25 | # The full version, including alpha/beta/rc tags 26 | release = '1.0.0' 27 | 28 | 29 | # -- General configuration --------------------------------------------------- 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'm2r2'] 35 | source_suffix = ['.rst', '.md'] 36 | 37 | # Add any paths that contain templates here, relative to this directory. 38 | templates_path = ['_templates'] 39 | 40 | # List of patterns, relative to source directory, that match files and 41 | # directories to ignore when looking for source files. 42 | # This pattern also affects html_static_path and html_extra_path. 43 | exclude_patterns = [] 44 | 45 | 46 | # -- Options for HTML output ------------------------------------------------- 47 | 48 | # The theme to use for HTML and HTML Help pages. See the documentation for 49 | # a list of builtin themes. 50 | # 51 | html_theme = 'sphinx_rtd_theme' 52 | 53 | # Add any paths that contain custom static files (such as style sheets) here, 54 | # relative to this directory. They are copied after the builtin static files, 55 | # so a file named "default.css" will overwrite the builtin "default.css". 56 | html_static_path = ['_static'] 57 | -------------------------------------------------------------------------------- /docs/source/includeme/apidocuments.rst: -------------------------------------------------------------------------------- 1 | .. reference_docs: 2 | 3 | Tracker 4 | ======= 5 | 6 | .. autoclass:: motrackers.tracker.Tracker 7 | :members: 8 | 9 | SORT 10 | ==== 11 | 12 | .. autofunction:: motrackers.sort_tracker.assign_tracks2detection_iou 13 | 14 | .. autoclass:: motrackers.sort_tracker.SORT 15 | :members: 16 | 17 | IOU Tracker 18 | =========== 19 | 20 | .. autoclass:: motrackers.iou_tracker.IOUTracker 21 | :members: 22 | 23 | Kalman Filter based Centroid Tracker 24 | ==================================== 25 | 26 | .. autofunction:: motrackers.centroid_kf_tracker.assign_tracks2detection_centroid_distances 27 | 28 | .. autoclass:: motrackers.centroid_kf_tracker.CentroidKF_Tracker 29 | :members: 30 | 31 | Tracks 32 | ====== 33 | 34 | .. autoclass:: motrackers.track.Track 35 | :members: 36 | 37 | .. autoclass:: motrackers.track.KFTrackSORT 38 | :members: 39 | 40 | .. autoclass:: motrackers.track.KFTrack4DSORT 41 | :members: 42 | 43 | .. autoclass:: motrackers.track.KFTrackCentroid 44 | :members: 45 | 46 | Kalman Filters 47 | ============== 48 | 49 | .. autoclass:: motrackers.kalman_tracker.KalmanFilter 50 | :members: 51 | 52 | .. autoclass:: motrackers.kalman_tracker.KFTrackerConstantAcceleration 53 | :members: 54 | 55 | .. autoclass:: motrackers.kalman_tracker.KFTracker1D 56 | :members: 57 | 58 | .. autoclass:: motrackers.kalman_tracker.KFTracker2D 59 | :members: 60 | 61 | .. autoclass:: motrackers.kalman_tracker.KFTracker4D 62 | :members: 63 | 64 | .. autoclass:: motrackers.kalman_tracker.KFTrackerSORT 65 | :members: 66 | 67 | Object Detection 68 | ================ 69 | 70 | .. autoclass:: motrackers.detectors.detector.Detector 71 | :members: 72 | 73 | .. autoclass:: motrackers.detectors.caffe.Caffe_SSDMobileNet 74 | :members: 75 | 76 | .. autoclass:: motrackers.detectors.tf.TF_SSDMobileNetV2 77 | :members: 78 | 79 | .. autoclass:: motrackers.detectors.yolo.YOLOv3 80 | :members: 81 | 82 | Utilities 83 | ========= 84 | 85 | .. autofunction:: motrackers.utils.misc.get_centroid 86 | 87 | .. autofunction:: motrackers.utils.misc.iou 88 | 89 | .. autofunction:: motrackers.utils.misc.iou_xywh 90 | 91 | .. autofunction:: motrackers.utils.misc.xyxy2xywh 92 | 93 | .. autofunction:: motrackers.utils.misc.xywh2xyxy 94 | 95 | .. autofunction:: motrackers.utils.misc.midwh2xywh 96 | 97 | .. autofunction:: motrackers.utils.misc.intersection_complement_indices 98 | 99 | .. autofunction:: motrackers.utils.misc.nms 100 | 101 | .. autofunction:: motrackers.utils.misc.draw_tracks 102 | 103 | .. autofunction:: motrackers.utils.misc.load_labelsjson 104 | 105 | .. autofunction:: motrackers.utils.misc.dict2jsonfile 106 | 107 | .. autofunction:: motrackers.utils.filechooser_utils.create_filechooser 108 | 109 | .. autofunction:: motrackers.utils.filechooser_utils.select_caffemodel_prototxt 110 | 111 | .. autofunction:: motrackers.utils.filechooser_utils.select_caffemodel_weights 112 | 113 | .. autofunction:: motrackers.utils.filechooser_utils.select_caffemodel 114 | 115 | .. autofunction:: motrackers.utils.filechooser_utils.select_videofile 116 | 117 | .. autofunction:: motrackers.utils.filechooser_utils.select_yolo_weights 118 | 119 | .. autofunction:: motrackers.utils.filechooser_utils.select_coco_labels 120 | 121 | .. autofunction:: motrackers.utils.filechooser_utils.select_yolo_config 122 | 123 | .. autofunction:: motrackers.utils.filechooser_utils.select_yolo_model 124 | 125 | .. autofunction:: motrackers.utils.filechooser_utils.select_pbtxt 126 | 127 | .. autofunction:: motrackers.utils.filechooser_utils.select_tfmobilenet_weights 128 | 129 | .. autofunction:: motrackers.utils.filechooser_utils.select_tfmobilenet 130 | 131 | .. mdinclude:: ./../../../DOWNLOAD_WEIGHTS.md 132 | 133 | .. mdinclude:: ./../../readme/REFERENCES.md 134 | 135 | .. mdinclude:: ./../../readme/CODE_OF_CONDUCT.md 136 | -------------------------------------------------------------------------------- /docs/source/includeme/readmefile.rst: -------------------------------------------------------------------------------- 1 | .. mdinclude:: ./../../../README.md 2 | 3 | Example: `TF-MobileNetSSD + CentroidTracker` 4 | ============================================ 5 | 6 | .. image:: ./../../../examples/assets/cows.gif 7 | :alt: Cows with tf-SSD 8 | :target: https://flic.kr/p/26WeEWy 9 | :class: with-shadow 10 | :width: 600px 11 | 12 | Example: `YOLOv3 + CentroidTracker` 13 | =================================== 14 | 15 | .. image:: ./../../../examples/assets/cars.gif 16 | :alt: Cars with YOLO 17 | :target: https://flic.kr/p/L6qyxj 18 | :class: with-shadow 19 | :width: 600px -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. Multi-object trackers in Python documentation master file, created by 2 | sphinx-quickstart on Sat Feb 27 09:59:32 2021. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Multi-object trackers in Python's documentation! 7 | =========================================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | includeme/readmefile.rst 13 | includeme/apidocuments.rst 14 | 15 | 16 | Indices and tables 17 | ================== 18 | 19 | * :ref:`genindex` 20 | 21 | -------------------------------------------------------------------------------- /examples/assets/cars.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/examples/assets/cars.gif -------------------------------------------------------------------------------- /examples/assets/cows.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/examples/assets/cows.gif -------------------------------------------------------------------------------- /examples/example_notebooks/detector_Caffe_SSDMobileNet.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Loading Caffe-model weights for SSD" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import cv2 as cv\n", 18 | "from motrackers.detectors.caffe import Caffe_SSDMobileNet" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "VIDEO_FILE = \"./../video_data/cars.mp4\"\n", 28 | "WEIGHTS_PATH = \"./../pretrained_models/caffemodel_weights/MobileNetSSD_deploy.caffemodel\"\n", 29 | "CONFIG_FILE_PATH = \"./../pretrained_models/caffemodel_weights/MobileNetSSD_deploy.prototxt\"\n", 30 | "LABELS_PATH=\"./../pretrained_models/caffemodel_weights/ssd_mobilenet_caffe_names.json\"\n", 31 | "CONFIDENCE_THRESHOLD = 0.5\n", 32 | "NMS_THRESHOLD = 0.2\n", 33 | "DRAW_BOUNDING_BOXES = True" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 3, 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "model = Caffe_SSDMobileNet(\n", 43 | " weights_path=WEIGHTS_PATH, \n", 44 | " configfile_path=CONFIG_FILE_PATH,\n", 45 | " labels_path=LABELS_PATH,\n", 46 | " confidence_threshold=CONFIDENCE_THRESHOLD,\n", 47 | " nms_threshold=NMS_THRESHOLD,\n", 48 | " draw_bboxes=DRAW_BOUNDING_BOXES\n", 49 | ")" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 4, 55 | "metadata": {}, 56 | "outputs": [], 57 | "source": [ 58 | "cap = cv.VideoCapture(VIDEO_FILE)" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": 5, 64 | "metadata": { 65 | "scrolled": false 66 | }, 67 | "outputs": [], 68 | "source": [ 69 | "while True:\n", 70 | " ok, image = cap.read()\n", 71 | " \n", 72 | " if not ok:\n", 73 | " print(\"Cannot read the video feed.\")\n", 74 | " break\n", 75 | " \n", 76 | " bboxes, confidences, class_ids = model.detect(image)\n", 77 | " \n", 78 | " updated_image = model.draw_bboxes(image.copy(), bboxes, confidences, class_ids)\n", 79 | " \n", 80 | " cv.imshow(\"image\", updated_image)\n", 81 | " if cv.waitKey(1) & 0xFF == ord('q'):\n", 82 | " break\n", 83 | "\n", 84 | "cap.release()\n", 85 | "cv.destroyWindow(\"image\")" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [] 94 | } 95 | ], 96 | "metadata": { 97 | "kernelspec": { 98 | "display_name": "work_env", 99 | "language": "python", 100 | "name": "work_env" 101 | }, 102 | "language_info": { 103 | "codemirror_mode": { 104 | "name": "ipython", 105 | "version": 3 106 | }, 107 | "file_extension": ".py", 108 | "mimetype": "text/x-python", 109 | "name": "python", 110 | "nbconvert_exporter": "python", 111 | "pygments_lexer": "ipython3", 112 | "version": "3.6.9" 113 | } 114 | }, 115 | "nbformat": 4, 116 | "nbformat_minor": 4 117 | } 118 | -------------------------------------------------------------------------------- /examples/example_notebooks/detector_TF_SSDMobileNetV2.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Object Detection - Tensorflow model of SSD-MobileNet-V2" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import cv2 as cv\n", 18 | "from motrackers.detectors import TF_SSDMobileNetV2" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "VIDEO_FILE = \"./../video_data/cars.mp4\"\n", 28 | "\n", 29 | "WEIGHTS_PATH = (\n", 30 | " './../pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb'\n", 31 | ")\n", 32 | "\n", 33 | "CONFIG_FILE_PATH = './../pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_2018_03_29.pbtxt'\n", 34 | "\n", 35 | "USE_GPU = False\n", 36 | "\n", 37 | "CONFIDENCE_THRESHOLD = 0.5\n", 38 | "\n", 39 | "NMS_THRESHOLD = 0.2\n", 40 | "\n", 41 | "DRAW_BOUNDING_BOXES = True\n" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 3, 47 | "metadata": {}, 48 | "outputs": [], 49 | "source": [ 50 | "model = TF_SSDMobileNetV2(\n", 51 | " weights_path=WEIGHTS_PATH,\n", 52 | " configfile_path=CONFIG_FILE_PATH,\n", 53 | " confidence_threshold=CONFIDENCE_THRESHOLD,\n", 54 | " nms_threshold=NMS_THRESHOLD,\n", 55 | " draw_bboxes=DRAW_BOUNDING_BOXES,\n", 56 | " use_gpu=USE_GPU\n", 57 | ")" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 4, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "cap = cv.VideoCapture(VIDEO_FILE)" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": 5, 72 | "metadata": { 73 | "scrolled": false 74 | }, 75 | "outputs": [], 76 | "source": [ 77 | "while True:\n", 78 | " ok, image = cap.read()\n", 79 | " \n", 80 | " if not ok:\n", 81 | " print(\"Cannot read the video feed.\")\n", 82 | " break\n", 83 | " \n", 84 | " bboxes, confidences, class_ids = model.detect(image)\n", 85 | " \n", 86 | " updated_image = model.draw_bboxes(image.copy(), bboxes, confidences, class_ids)\n", 87 | " \n", 88 | " cv.imshow(\"image\", updated_image)\n", 89 | " \n", 90 | " if cv.waitKey(1) & 0xFF == ord('q'):\n", 91 | " break\n", 92 | "\n", 93 | "cap.release()\n", 94 | "cv.destroyWindow(\"image\")" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [] 103 | } 104 | ], 105 | "metadata": { 106 | "kernelspec": { 107 | "display_name": "work_env", 108 | "language": "python", 109 | "name": "work_env" 110 | }, 111 | "language_info": { 112 | "codemirror_mode": { 113 | "name": "ipython", 114 | "version": 3 115 | }, 116 | "file_extension": ".py", 117 | "mimetype": "text/x-python", 118 | "name": "python", 119 | "nbconvert_exporter": "python", 120 | "pygments_lexer": "ipython3", 121 | "version": "3.6.9" 122 | } 123 | }, 124 | "nbformat": 4, 125 | "nbformat_minor": 4 126 | } 127 | -------------------------------------------------------------------------------- /examples/example_notebooks/detector_YOLOv3.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Object Detection - YOLOv3" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import cv2 as cv\n", 18 | "from motrackers.detectors import YOLOv3" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "VIDEO_FILE = \"./../video_data/cars.mp4\"\n", 28 | "WEIGHTS_PATH = './../pretrained_models/yolo_weights/yolov3.weights'\n", 29 | "CONFIG_FILE_PATH = './../pretrained_models/yolo_weights/yolov3.cfg'\n", 30 | "LABELS_PATH = \"./../pretrained_models/yolo_weights/coco_names.json\"\n", 31 | "\n", 32 | "USE_GPU = False\n", 33 | "CONFIDENCE_THRESHOLD = 0.5\n", 34 | "NMS_THRESHOLD = 0.2\n", 35 | "DRAW_BOUNDING_BOXES = True" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 3, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "model = YOLOv3(\n", 45 | " weights_path=WEIGHTS_PATH,\n", 46 | " configfile_path=CONFIG_FILE_PATH,\n", 47 | " labels_path=LABELS_PATH,\n", 48 | " confidence_threshold=CONFIDENCE_THRESHOLD,\n", 49 | " nms_threshold=NMS_THRESHOLD,\n", 50 | " draw_bboxes=DRAW_BOUNDING_BOXES,\n", 51 | " use_gpu=USE_GPU\n", 52 | ")" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 4, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "cap = cv.VideoCapture(VIDEO_FILE)" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": 5, 67 | "metadata": { 68 | "scrolled": false 69 | }, 70 | "outputs": [], 71 | "source": [ 72 | "while True:\n", 73 | " ok, image = cap.read()\n", 74 | " \n", 75 | " if not ok:\n", 76 | " print(\"Cannot read the video feed.\")\n", 77 | " break\n", 78 | " \n", 79 | " bboxes, confidences, class_ids = model.detect(image)\n", 80 | " updated_image = model.draw_bboxes(image.copy(), bboxes, confidences, class_ids)\n", 81 | " \n", 82 | " cv.imshow(\"image\", updated_image)\n", 83 | " if cv.waitKey(1) & 0xFF == ord('q'):\n", 84 | " break\n", 85 | "\n", 86 | "cap.release()\n", 87 | "cv.destroyWindow(\"image\")" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": null, 93 | "metadata": {}, 94 | "outputs": [], 95 | "source": [] 96 | } 97 | ], 98 | "metadata": { 99 | "kernelspec": { 100 | "display_name": "work_env", 101 | "language": "python", 102 | "name": "work_env" 103 | }, 104 | "language_info": { 105 | "codemirror_mode": { 106 | "name": "ipython", 107 | "version": 3 108 | }, 109 | "file_extension": ".py", 110 | "mimetype": "text/x-python", 111 | "name": "python", 112 | "nbconvert_exporter": "python", 113 | "pygments_lexer": "ipython3", 114 | "version": "3.6.9" 115 | } 116 | }, 117 | "nbformat": 4, 118 | "nbformat_minor": 4 119 | } 120 | -------------------------------------------------------------------------------- /examples/example_notebooks/mot_Caffe_SSDMobileNet.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Loading Caffe-model weights for SSD" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import cv2 as cv\n", 18 | "from motrackers.detectors.caffe import Caffe_SSDMobileNet\n", 19 | "from motrackers import CentroidTracker, CentroidKF_Tracker, SORT, IOUTracker\n", 20 | "from motrackers.utils import draw_tracks\n", 21 | "import ipywidgets as widgets" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": 2, 27 | "metadata": {}, 28 | "outputs": [ 29 | { 30 | "data": { 31 | "application/vnd.jupyter.widget-view+json": { 32 | "model_id": "abd6e0e1d0aa46138a7fc205c466f217", 33 | "version_major": 2, 34 | "version_minor": 0 35 | }, 36 | "text/plain": [ 37 | "Select(description='MOTracker:', options=('CentroidTracker', 'CentroidKF_Tracker', 'SORT', 'IOUTracker'), valu…" 38 | ] 39 | }, 40 | "metadata": {}, 41 | "output_type": "display_data" 42 | } 43 | ], 44 | "source": [ 45 | "chosen_tracker = widgets.Select(\n", 46 | " options=[\"CentroidTracker\", \"CentroidKF_Tracker\", \"SORT\", \"IOUTracker\"],\n", 47 | " value='CentroidTracker',\n", 48 | " rows=5,\n", 49 | " description='MOTracker:',\n", 50 | " disabled=False\n", 51 | ")\n", 52 | "chosen_tracker" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 3, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "if chosen_tracker.value == 'CentroidTracker':\n", 62 | " tracker = CentroidTracker(max_lost=0, tracker_output_format='mot_challenge')\n", 63 | "elif chosen_tracker.value == 'CentroidKF_Tracker':\n", 64 | " tracker = CentroidKF_Tracker(max_lost=0, tracker_output_format='mot_challenge')\n", 65 | "elif chosen_tracker.value == 'SORT':\n", 66 | " tracker = SORT(max_lost=3, tracker_output_format='mot_challenge', iou_threshold=0.3)\n", 67 | "elif chosen_tracker.value == 'IOUTracker':\n", 68 | " tracker = IOUTracker(max_lost=2, iou_threshold=0.5, min_detection_confidence=0.4, max_detection_confidence=0.7,\n", 69 | " tracker_output_format='mot_challenge')\n", 70 | "else:\n", 71 | " print(\"Please choose one tracker from the above list.\")" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 4, 77 | "metadata": {}, 78 | "outputs": [], 79 | "source": [ 80 | "VIDEO_FILE = \"./../video_data/cars.mp4\"\n", 81 | "WEIGHTS_PATH = \"./../pretrained_models/caffemodel_weights/MobileNetSSD_deploy.caffemodel\"\n", 82 | "CONFIG_FILE_PATH = \"./../pretrained_models/caffemodel_weights/MobileNetSSD_deploy.prototxt\"\n", 83 | "LABELS_PATH=\"./../pretrained_models/caffemodel_weights/ssd_mobilenet_caffe_names.json\"\n", 84 | "\n", 85 | "CONFIDENCE_THRESHOLD = 0.5\n", 86 | "NMS_THRESHOLD = 0.2\n", 87 | "DRAW_BOUNDING_BOXES = True\n", 88 | "USE_GPU=False" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 5, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "model = Caffe_SSDMobileNet(\n", 98 | " weights_path=WEIGHTS_PATH,\n", 99 | " configfile_path=CONFIG_FILE_PATH,\n", 100 | " labels_path=LABELS_PATH,\n", 101 | " confidence_threshold=CONFIDENCE_THRESHOLD,\n", 102 | " nms_threshold=NMS_THRESHOLD, \n", 103 | " draw_bboxes=DRAW_BOUNDING_BOXES,\n", 104 | " use_gpu=USE_GPU\n", 105 | ")" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 6, 111 | "metadata": { 112 | "scrolled": false 113 | }, 114 | "outputs": [], 115 | "source": [ 116 | "def main(video_path, model, tracker):\n", 117 | "\n", 118 | " cap = cv.VideoCapture(video_path)\n", 119 | " while True:\n", 120 | " ok, image = cap.read()\n", 121 | "\n", 122 | " if not ok:\n", 123 | " print(\"Cannot read the video feed.\")\n", 124 | " break\n", 125 | "\n", 126 | " image = cv.resize(image, (700, 500))\n", 127 | "\n", 128 | " bboxes, confidences, class_ids = model.detect(image)\n", 129 | " \n", 130 | " tracks = tracker.update(bboxes, confidences, class_ids)\n", 131 | " \n", 132 | " updated_image = model.draw_bboxes(image.copy(), bboxes, confidences, class_ids)\n", 133 | "\n", 134 | " updated_image = draw_tracks(updated_image, tracks)\n", 135 | "\n", 136 | " cv.imshow(\"image\", updated_image)\n", 137 | " if cv.waitKey(1) & 0xFF == ord('q'):\n", 138 | " break\n", 139 | "\n", 140 | " cap.release()\n", 141 | " cv.destroyAllWindows()" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": 7, 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "main(VIDEO_FILE, model, tracker)" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": null, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [] 159 | } 160 | ], 161 | "metadata": { 162 | "kernelspec": { 163 | "display_name": "work_env", 164 | "language": "python", 165 | "name": "work_env" 166 | }, 167 | "language_info": { 168 | "codemirror_mode": { 169 | "name": "ipython", 170 | "version": 3 171 | }, 172 | "file_extension": ".py", 173 | "mimetype": "text/x-python", 174 | "name": "python", 175 | "nbconvert_exporter": "python", 176 | "pygments_lexer": "ipython3", 177 | "version": "3.6.9" 178 | } 179 | }, 180 | "nbformat": 4, 181 | "nbformat_minor": 4 182 | } 183 | -------------------------------------------------------------------------------- /examples/example_notebooks/mot_TF_SSDMobileNetV2.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Multiple object tracking with Tensorflow-SSD-MobileNetv2 based object detection." 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import cv2 as cv\n", 18 | "from motrackers.detectors import TF_SSDMobileNetV2\n", 19 | "from motrackers import CentroidTracker, CentroidKF_Tracker, SORT, IOUTracker\n", 20 | "from motrackers.utils import draw_tracks\n", 21 | "import ipywidgets as widgets" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "VIDEO_FILE = \"./../video_data/cars.mp4\"\n", 31 | "WEIGHTS_PATH = (\n", 32 | " './../pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb')\n", 33 | "CONFIG_FILE_PATH = './../pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_2018_03_29.pbtxt'\n", 34 | "LABELS_PATH = \"./../pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_names.json\"\n", 35 | "\n", 36 | "CONFIDENCE_THRESHOLD = 0.5\n", 37 | "NMS_THRESHOLD = 0.2\n", 38 | "DRAW_BOUNDING_BOXES = True\n", 39 | "USE_GPU = False" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "metadata": {}, 46 | "outputs": [], 47 | "source": [ 48 | "chosen_tracker = widgets.Select(\n", 49 | " options=[\"CentroidTracker\", \"CentroidKF_Tracker\", \"SORT\", \"IOUTracker\"],\n", 50 | " value='CentroidTracker',\n", 51 | " rows=5,\n", 52 | " description='MOTracker:',\n", 53 | " disabled=False\n", 54 | ")\n", 55 | "chosen_tracker" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": null, 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "if chosen_tracker.value == 'CentroidTracker':\n", 65 | " tracker = CentroidTracker(max_lost=0, tracker_output_format='mot_challenge')\n", 66 | "elif chosen_tracker.value == 'CentroidKF_Tracker':\n", 67 | " tracker = CentroidKF_Tracker(max_lost=0, tracker_output_format='mot_challenge')\n", 68 | "elif chosen_tracker.value == 'SORT':\n", 69 | " tracker = SORT(max_lost=3, tracker_output_format='mot_challenge', iou_threshold=0.3)\n", 70 | "elif chosen_tracker.value == 'IOUTracker':\n", 71 | " tracker = IOUTracker(max_lost=2, iou_threshold=0.5, min_detection_confidence=0.4, max_detection_confidence=0.7,\n", 72 | " tracker_output_format='mot_challenge')\n", 73 | "else:\n", 74 | " print(\"Please choose one tracker from the above list.\")" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "model = TF_SSDMobileNetV2(\n", 84 | " weights_path=WEIGHTS_PATH,\n", 85 | " configfile_path=CONFIG_FILE_PATH,\n", 86 | " labels_path=LABELS_PATH,\n", 87 | " confidence_threshold=CONFIDENCE_THRESHOLD,\n", 88 | " nms_threshold=NMS_THRESHOLD,\n", 89 | " draw_bboxes=DRAW_BOUNDING_BOXES,\n", 90 | " use_gpu=USE_GPU\n", 91 | ")" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "metadata": { 98 | "scrolled": false 99 | }, 100 | "outputs": [], 101 | "source": [ 102 | "def main(video_path, model, tracker):\n", 103 | "\n", 104 | " cap = cv.VideoCapture(video_path)\n", 105 | " while True:\n", 106 | " ok, image = cap.read()\n", 107 | "\n", 108 | " if not ok:\n", 109 | " print(\"Cannot read the video feed.\")\n", 110 | " break\n", 111 | "\n", 112 | " image = cv.resize(image, (700, 500))\n", 113 | "\n", 114 | " bboxes, confidences, class_ids = model.detect(image)\n", 115 | " \n", 116 | " tracks = tracker.update(bboxes, confidences, class_ids)\n", 117 | " \n", 118 | " updated_image = model.draw_bboxes(image.copy(), bboxes, confidences, class_ids)\n", 119 | "\n", 120 | " updated_image = draw_tracks(updated_image, tracks)\n", 121 | "\n", 122 | " cv.imshow(\"image\", updated_image)\n", 123 | " if cv.waitKey(1) & 0xFF == ord('q'):\n", 124 | " break\n", 125 | "\n", 126 | " cap.release()\n", 127 | " cv.destroyAllWindows()" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "main(VIDEO_FILE, model, tracker)" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": null, 142 | "metadata": {}, 143 | "outputs": [], 144 | "source": [] 145 | } 146 | ], 147 | "metadata": { 148 | "kernelspec": { 149 | "display_name": "work_env", 150 | "language": "python", 151 | "name": "work_env" 152 | }, 153 | "language_info": { 154 | "codemirror_mode": { 155 | "name": "ipython", 156 | "version": 3 157 | }, 158 | "file_extension": ".py", 159 | "mimetype": "text/x-python", 160 | "name": "python", 161 | "nbconvert_exporter": "python", 162 | "pygments_lexer": "ipython3", 163 | "version": "3.6.9" 164 | } 165 | }, 166 | "nbformat": 4, 167 | "nbformat_minor": 4 168 | } 169 | -------------------------------------------------------------------------------- /examples/example_notebooks/mot_YOLOv3.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Multiple object tracking with YOLOv3-based object detection" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import cv2 as cv\n", 18 | "from motrackers.detectors import YOLOv3\n", 19 | "from motrackers import CentroidTracker, CentroidKF_Tracker, SORT, IOUTracker\n", 20 | "from motrackers.utils import draw_tracks\n", 21 | "import ipywidgets as widgets" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": 2, 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "VIDEO_FILE = \"./../video_data/cars.mp4\"\n", 31 | "WEIGHTS_PATH = './../pretrained_models/yolo_weights/yolov3.weights'\n", 32 | "CONFIG_FILE_PATH = './../pretrained_models/yolo_weights/yolov3.cfg'\n", 33 | "LABELS_PATH = \"./../pretrained_models/yolo_weights/coco_names.json\"\n", 34 | "\n", 35 | "CONFIDENCE_THRESHOLD = 0.5\n", 36 | "NMS_THRESHOLD = 0.2\n", 37 | "DRAW_BOUNDING_BOXES = True\n", 38 | "USE_GPU = False" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": 3, 44 | "metadata": {}, 45 | "outputs": [ 46 | { 47 | "data": { 48 | "application/vnd.jupyter.widget-view+json": { 49 | "model_id": "ae18feabad2649079498e476cb1cc240", 50 | "version_major": 2, 51 | "version_minor": 0 52 | }, 53 | "text/plain": [ 54 | "Select(description='MOTracker:', options=('CentroidTracker', 'CentroidKF_Tracker', 'SORT', 'IOUTracker'), valu…" 55 | ] 56 | }, 57 | "metadata": {}, 58 | "output_type": "display_data" 59 | } 60 | ], 61 | "source": [ 62 | "chosen_tracker = widgets.Select(\n", 63 | " options=[\"CentroidTracker\", \"CentroidKF_Tracker\", \"SORT\", \"IOUTracker\"],\n", 64 | " value='CentroidTracker',\n", 65 | " rows=5,\n", 66 | " description='MOTracker:',\n", 67 | " disabled=False\n", 68 | ")\n", 69 | "chosen_tracker" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": 4, 75 | "metadata": {}, 76 | "outputs": [], 77 | "source": [ 78 | "if chosen_tracker.value == 'CentroidTracker':\n", 79 | " tracker = CentroidTracker(max_lost=0, tracker_output_format='mot_challenge')\n", 80 | "elif chosen_tracker.value == 'CentroidKF_Tracker':\n", 81 | " tracker = CentroidKF_Tracker(max_lost=0, tracker_output_format='mot_challenge')\n", 82 | "elif chosen_tracker.value == 'SORT':\n", 83 | " tracker = SORT(max_lost=3, tracker_output_format='mot_challenge', iou_threshold=0.3)\n", 84 | "elif chosen_tracker.value == 'IOUTracker':\n", 85 | " tracker = IOUTracker(max_lost=2, iou_threshold=0.5, min_detection_confidence=0.4, max_detection_confidence=0.7,\n", 86 | " tracker_output_format='mot_challenge')\n", 87 | "else:\n", 88 | " print(\"Please choose one tracker from the above list.\")" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 5, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "model = YOLOv3(\n", 98 | " weights_path=WEIGHTS_PATH,\n", 99 | " configfile_path=CONFIG_FILE_PATH,\n", 100 | " labels_path=LABELS_PATH,\n", 101 | " confidence_threshold=CONFIDENCE_THRESHOLD,\n", 102 | " nms_threshold=NMS_THRESHOLD,\n", 103 | " draw_bboxes=DRAW_BOUNDING_BOXES,\n", 104 | " use_gpu=USE_GPU\n", 105 | ")" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 6, 111 | "metadata": { 112 | "scrolled": false 113 | }, 114 | "outputs": [], 115 | "source": [ 116 | "def main(video_path, model, tracker):\n", 117 | "\n", 118 | " cap = cv.VideoCapture(video_path)\n", 119 | " while True:\n", 120 | " ok, image = cap.read()\n", 121 | "\n", 122 | " if not ok:\n", 123 | " print(\"Cannot read the video feed.\")\n", 124 | " break\n", 125 | "\n", 126 | " image = cv.resize(image, (700, 500))\n", 127 | "\n", 128 | " bboxes, confidences, class_ids = model.detect(image)\n", 129 | " \n", 130 | " tracks = tracker.update(bboxes, confidences, class_ids)\n", 131 | " \n", 132 | " updated_image = model.draw_bboxes(image.copy(), bboxes, confidences, class_ids)\n", 133 | "\n", 134 | " updated_image = draw_tracks(updated_image, tracks)\n", 135 | "\n", 136 | " cv.imshow(\"image\", updated_image)\n", 137 | " if cv.waitKey(1) & 0xFF == ord('q'):\n", 138 | " break\n", 139 | "\n", 140 | " cap.release()\n", 141 | " cv.destroyAllWindows()" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": 7, 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "main(VIDEO_FILE, model, tracker)" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": null, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [] 159 | } 160 | ], 161 | "metadata": { 162 | "kernelspec": { 163 | "display_name": "work_env", 164 | "language": "python", 165 | "name": "work_env" 166 | }, 167 | "language_info": { 168 | "codemirror_mode": { 169 | "name": "ipython", 170 | "version": 3 171 | }, 172 | "file_extension": ".py", 173 | "mimetype": "text/x-python", 174 | "name": "python", 175 | "nbconvert_exporter": "python", 176 | "pygments_lexer": "ipython3", 177 | "version": "3.6.9" 178 | } 179 | }, 180 | "nbformat": 4, 181 | "nbformat_minor": 4 182 | } 183 | -------------------------------------------------------------------------------- /examples/example_scripts/detector_Caffe_SSDMobileNet.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | from motrackers.detectors import Caffe_SSDMobileNet 3 | 4 | 5 | def main(video_path, model): 6 | cap = cv.VideoCapture(video_path) 7 | 8 | while True: 9 | ok, image = cap.read() 10 | 11 | if not ok: 12 | print("Cannot read the video feed.") 13 | break 14 | 15 | bboxes, confidences, class_ids = model.detect(image) 16 | updated_image = model.draw_bboxes(image, bboxes, confidences, class_ids) 17 | 18 | cv.imshow("image", updated_image) 19 | if cv.waitKey(1) & 0xFF == ord('q'): 20 | break 21 | 22 | cap.release() 23 | cv.destroyAllWindows() 24 | 25 | 26 | if __name__ == '__main__': 27 | import argparse 28 | 29 | parser = argparse.ArgumentParser( 30 | description='Object detections in input video using Caffemodel of MobileNetSSD.') 31 | 32 | parser.add_argument( 33 | '--video', '-v', type=str, default="./../video_data/cars.mp4", help='Input video path.') 34 | 35 | parser.add_argument( 36 | '--weights', '-w', type=str, 37 | default="./../pretrained_models/caffemodel_weights/MobileNetSSD_deploy.caffemodel", 38 | help='path to weights file of Caffe-MobileNetSSD, i.e., `.caffemodel` file.' 39 | ) 40 | 41 | parser.add_argument( 42 | '--config', '-c', type=str, 43 | default="./../pretrained_models/caffemodel_weights/MobileNetSSD_deploy.prototxt", 44 | help='path to config file of Caffe-MobileNetSSD, i.e., `.prototxt` file.' 45 | ) 46 | 47 | parser.add_argument( 48 | '--labels', '-l', type=str, 49 | default="./../pretrained_models/caffemodel_weights/ssd_mobilenet_caffe_names.json", 50 | help='path to labels file of coco dataset (`.json` file.)' 51 | ) 52 | 53 | parser.add_argument( 54 | '--gpu', type=bool, 55 | default=False, help='Flag to use gpu to run the deep learning model. Default is `False`' 56 | ) 57 | 58 | args = parser.parse_args() 59 | 60 | model = Caffe_SSDMobileNet( 61 | weights_path=args.weights, 62 | configfile_path=args.config, 63 | labels_path=args.labels, 64 | confidence_threshold=0.5, 65 | nms_threshold=0.2, 66 | draw_bboxes=True, 67 | use_gpu=args.gpu 68 | ) 69 | 70 | main(args.video, model) 71 | -------------------------------------------------------------------------------- /examples/example_scripts/detector_TF_SSDMobileNetV2.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | from motrackers.detectors import TF_SSDMobileNetV2 3 | 4 | 5 | def main(video_path, model): 6 | cap = cv.VideoCapture(video_path) 7 | while True: 8 | ok, image = cap.read() 9 | 10 | if not ok: 11 | print("Cannot read the video feed.") 12 | break 13 | 14 | bboxes, confidences, class_ids = model.detect(image) 15 | updated_image = model.draw_bboxes(image, bboxes, confidences, class_ids) 16 | 17 | cv.imshow("image", updated_image) 18 | if cv.waitKey(1) & 0xFF == ord('q'): 19 | break 20 | 21 | cap.release() 22 | cv.destroyAllWindows() 23 | 24 | 25 | if __name__ == '__main__': 26 | import argparse 27 | 28 | parser = argparse.ArgumentParser( 29 | description='Object detections in input video using TensorFlow model of MobileNetSSD.') 30 | 31 | parser.add_argument( 32 | '--video', '-v', type=str, default="./../video_data/cars.mp4", help='Input video path.') 33 | 34 | parser.add_argument( 35 | '--weights', '-w', type=str, 36 | default="./../pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb", 37 | help='path to weights file of tf-MobileNetSSD (`.pb` file).' 38 | ) 39 | 40 | parser.add_argument( 41 | '--config', '-c', type=str, 42 | default="./../pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_2018_03_29.pbtxt", 43 | help='path to config file of Caffe-MobileNetSSD (`.pbtxt` file).' 44 | ) 45 | 46 | parser.add_argument( 47 | '--labels', '-l', type=str, 48 | default="./../pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_names.json", 49 | help='path to labels file of coco dataset (`.names` file.)' 50 | ) 51 | 52 | parser.add_argument( 53 | '--gpu', type=bool, default=False, 54 | help='Flag to use gpu to run the deep learning model. Default is `False`' 55 | ) 56 | 57 | args = parser.parse_args() 58 | 59 | model = TF_SSDMobileNetV2( 60 | weights_path=args.weights, 61 | configfile_path=args.config, 62 | labels_path=args.labels, 63 | confidence_threshold=0.5, 64 | nms_threshold=0.2, 65 | draw_bboxes=True, 66 | use_gpu=args.gpu 67 | ) 68 | 69 | main(args.video, model) 70 | -------------------------------------------------------------------------------- /examples/example_scripts/detector_YOLOv3.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | from motrackers.detectors import YOLOv3 3 | 4 | 5 | def main(video_path, model): 6 | cap = cv.VideoCapture(video_path) 7 | while True: 8 | ok, image = cap.read() 9 | 10 | if not ok: 11 | print("Cannot read the video feed.") 12 | break 13 | 14 | bboxes, confidences, class_ids = model.detect(image) 15 | updated_image = model.draw_bboxes(image, bboxes, confidences, class_ids) 16 | 17 | cv.imshow("image", updated_image) 18 | if cv.waitKey(1) & 0xFF == ord('q'): 19 | break 20 | 21 | cap.release() 22 | cv.destroyAllWindows() 23 | 24 | 25 | if __name__ == '__main__': 26 | import argparse 27 | 28 | parser = argparse.ArgumentParser( 29 | description='Object detections in input video using YOLOv3 trained on COCO dataset.' 30 | ) 31 | 32 | parser.add_argument( 33 | '--video', '-v', type=str, default="./../video_data/cars.mp4", help='Input video path.') 34 | 35 | parser.add_argument( 36 | '--weights', '-w', type=str, 37 | default="./../pretrained_models/yolo_weights/yolov3.weights", 38 | help='path to weights file of YOLOv3 (`.weights` file.)' 39 | ) 40 | 41 | parser.add_argument( 42 | '--config', '-c', type=str, 43 | default="./../pretrained_models/yolo_weights/yolov3.cfg", 44 | help='path to config file of YOLOv3 (`.cfg` file.)' 45 | ) 46 | 47 | parser.add_argument( 48 | '--labels', '-l', type=str, 49 | default="./../pretrained_models/yolo_weights/coco_names.json", 50 | help='path to labels file of coco dataset (`.names` file.)' 51 | ) 52 | 53 | parser.add_argument( 54 | '--gpu', type=bool, 55 | default=False, help='Flag to use gpu to run the deep learning model. Default is `False`' 56 | ) 57 | 58 | args = parser.parse_args() 59 | 60 | model = YOLOv3( 61 | weights_path=args.weights, 62 | configfile_path=args.config, 63 | labels_path=args.labels, 64 | confidence_threshold=0.5, 65 | nms_threshold=0.2, 66 | draw_bboxes=True, 67 | use_gpu=args.gpu 68 | ) 69 | 70 | main(args.video, model) 71 | -------------------------------------------------------------------------------- /examples/example_scripts/mot_Caffe_SSDMobileNet.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | from motrackers.detectors import Caffe_SSDMobileNet 3 | from motrackers import CentroidTracker, CentroidKF_Tracker, SORT, IOUTracker 4 | from motrackers.utils import draw_tracks 5 | 6 | 7 | def main(video_path, model, tracker): 8 | 9 | cap = cv.VideoCapture(video_path) 10 | while True: 11 | ok, image = cap.read() 12 | 13 | if not ok: 14 | print("Cannot read the video feed.") 15 | break 16 | 17 | image = cv.resize(image, (700, 500)) 18 | 19 | bboxes, confidences, class_ids = model.detect(image) 20 | tracks = tracker.update(bboxes, confidences, class_ids) 21 | updated_image = model.draw_bboxes(image.copy(), bboxes, confidences, class_ids) 22 | 23 | updated_image = draw_tracks(updated_image, tracks) 24 | 25 | cv.imshow("image", updated_image) 26 | if cv.waitKey(1) & 0xFF == ord('q'): 27 | break 28 | 29 | cap.release() 30 | cv.destroyAllWindows() 31 | 32 | 33 | if __name__ == '__main__': 34 | import argparse 35 | 36 | parser = argparse.ArgumentParser( 37 | description='Object detections in input video using Caffemodel of MobileNetSSD.') 38 | 39 | parser.add_argument( 40 | '--video', '-v', type=str, default="./../video_data/cars.mp4", help='Input video path.') 41 | 42 | parser.add_argument( 43 | '--weights', '-w', type=str, default="./../pretrained_models/caffemodel_weights/MobileNetSSD_deploy.caffemodel", 44 | help='path to weights file of Caffe-MobileNetSSD, i.e., `.caffemodel` file.') 45 | 46 | parser.add_argument( 47 | '--config', '-c', type=str, default="./../pretrained_models/caffemodel_weights/MobileNetSSD_deploy.prototxt", 48 | help='path to config file of Caffe-MobileNetSSD, i.e., `.prototxt` file.') 49 | 50 | parser.add_argument( 51 | '--labels', '-l', type=str, 52 | default="./../pretrained_models/caffemodel_weights/ssd_mobilenet_caffe_names.json", 53 | help='path to labels file of coco dataset (`.names` file.)' 54 | ) 55 | 56 | parser.add_argument( 57 | '--gpu', type=bool, default=False, help='Flag to use gpu to run the deep learning model. Default is `False`') 58 | 59 | parser.add_argument( 60 | '--tracker', type=str, default='SORT', help="Tracker used to track objects." 61 | " Options include ['CentroidTracker', 'CentroidKF_Tracker', 'SORT', IOUTracker]") 62 | 63 | args = parser.parse_args() 64 | 65 | if args.tracker == 'CentroidTracker': 66 | tracker = CentroidTracker(max_lost=0, tracker_output_format='mot_challenge') 67 | elif args.tracker == 'CentroidKF_Tracker': 68 | tracker = CentroidKF_Tracker(max_lost=0, tracker_output_format='mot_challenge') 69 | elif args.tracker == 'SORT': 70 | tracker = SORT(max_lost=3, tracker_output_format='mot_challenge', iou_threshold=0.3) 71 | elif args.tracker == 'IOUTracker': 72 | tracker = IOUTracker(max_lost=2, iou_threshold=0.5, min_detection_confidence=0.4, max_detection_confidence=0.7, 73 | tracker_output_format='mot_challenge') 74 | else: 75 | raise NotImplementedError 76 | 77 | model = Caffe_SSDMobileNet( 78 | weights_path=args.weights, 79 | configfile_path=args.config, 80 | labels_path=args.labels, 81 | confidence_threshold=0.5, 82 | nms_threshold=0.2, 83 | draw_bboxes=True, 84 | use_gpu=args.gpu 85 | ) 86 | 87 | main(args.video, model, tracker) 88 | -------------------------------------------------------------------------------- /examples/example_scripts/mot_TF_SSDMobileNet.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | from motrackers.detectors import TF_SSDMobileNetV2 3 | from motrackers import CentroidTracker, CentroidKF_Tracker, SORT, IOUTracker 4 | from motrackers.utils import draw_tracks 5 | 6 | 7 | def main(video_path, model, tracker): 8 | cap = cv.VideoCapture(video_path) 9 | while True: 10 | ok, image = cap.read() 11 | 12 | if not ok: 13 | print("Cannot read the video feed.") 14 | break 15 | 16 | image = cv.resize(image, (700, 500)) 17 | 18 | bboxes, confidences, class_ids = model.detect(image) 19 | tracks = tracker.update(bboxes, confidences, class_ids) 20 | updated_image = model.draw_bboxes(image.copy(), bboxes, confidences, class_ids) 21 | 22 | updated_image = draw_tracks(updated_image, tracks) 23 | 24 | cv.imshow("image", updated_image) 25 | if cv.waitKey(1) & 0xFF == ord('q'): 26 | break 27 | 28 | cap.release() 29 | cv.destroyAllWindows() 30 | 31 | 32 | if __name__ == '__main__': 33 | import argparse 34 | 35 | parser = argparse.ArgumentParser( 36 | description='Object detections in input video using TensorFlow model of MobileNetSSD.') 37 | 38 | parser.add_argument( 39 | '--video', '-v', type=str, default="./../video_data/cars.mp4", help='Input video path.') 40 | 41 | parser.add_argument( 42 | '--weights', '-w', type=str, 43 | default="./../pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb", 44 | help='path to weights file of tf-MobileNetSSD (`.pb` file).' 45 | ) 46 | 47 | parser.add_argument( 48 | '--config', '-c', 49 | type=str, 50 | default="./../pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_2018_03_29.pbtxt", 51 | help='path to config file of Caffe-MobileNetSSD (`.pbtxt` file).' 52 | ) 53 | 54 | parser.add_argument( 55 | '--labels', '-l', type=str, 56 | default="./../pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_names.json", 57 | help='path to labels file of coco dataset (`.names` file.)' 58 | ) 59 | 60 | parser.add_argument( 61 | '--gpu', type=bool, default=False, 62 | help='Flag to use gpu to run the deep learning model. Default is `False`' 63 | ) 64 | 65 | parser.add_argument( 66 | '--tracker', type=str, default='IOUTracker', 67 | help="Tracker used to track objects. Options include ['CentroidTracker', 'CentroidKF_Tracker', " 68 | "'SORT', IOUTracker]") 69 | 70 | args = parser.parse_args() 71 | 72 | if args.tracker == 'CentroidTracker': 73 | tracker = CentroidTracker(max_lost=0, tracker_output_format='mot_challenge') 74 | elif args.tracker == 'CentroidKF_Tracker': 75 | tracker = CentroidKF_Tracker(max_lost=0, tracker_output_format='mot_challenge') 76 | elif args.tracker == 'SORT': 77 | tracker = SORT(max_lost=3, tracker_output_format='mot_challenge', iou_threshold=0.3) 78 | elif args.tracker == 'IOUTracker': 79 | tracker = IOUTracker(max_lost=2, iou_threshold=0.5, min_detection_confidence=0.4, max_detection_confidence=0.7, 80 | tracker_output_format='mot_challenge') 81 | else: 82 | raise NotImplementedError 83 | 84 | model = TF_SSDMobileNetV2( 85 | weights_path=args.weights, 86 | configfile_path=args.config, 87 | labels_path=args.labels, 88 | confidence_threshold=0.4, 89 | nms_threshold=0.2, 90 | draw_bboxes=True, 91 | use_gpu=args.gpu 92 | ) 93 | 94 | main(args.video, model, tracker) 95 | -------------------------------------------------------------------------------- /examples/example_scripts/mot_YOLOv3.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | from motrackers.detectors import YOLOv3 3 | from motrackers import CentroidTracker, CentroidKF_Tracker, SORT, IOUTracker 4 | from motrackers.utils import draw_tracks 5 | 6 | 7 | def main(video_path, model, tracker): 8 | 9 | cap = cv.VideoCapture(video_path) 10 | while True: 11 | ok, image = cap.read() 12 | 13 | if not ok: 14 | print("Cannot read the video feed.") 15 | break 16 | 17 | image = cv.resize(image, (700, 500)) 18 | 19 | bboxes, confidences, class_ids = model.detect(image) 20 | tracks = tracker.update(bboxes, confidences, class_ids) 21 | updated_image = model.draw_bboxes(image.copy(), bboxes, confidences, class_ids) 22 | 23 | updated_image = draw_tracks(updated_image, tracks) 24 | 25 | cv.imshow("image", updated_image) 26 | if cv.waitKey(1) & 0xFF == ord('q'): 27 | break 28 | 29 | cap.release() 30 | cv.destroyAllWindows() 31 | 32 | 33 | if __name__ == '__main__': 34 | import argparse 35 | 36 | parser = argparse.ArgumentParser( 37 | description='Object detections in input video using YOLOv3 trained on COCO dataset.' 38 | ) 39 | 40 | parser.add_argument( 41 | '--video', '-v', type=str, default="./../video_data/cars.mp4", help='Input video path.') 42 | 43 | parser.add_argument( 44 | '--weights', '-w', type=str, 45 | default="./../pretrained_models/yolo_weights/yolov3.weights", 46 | help='path to weights file of YOLOv3 (`.weights` file.)' 47 | ) 48 | 49 | parser.add_argument( 50 | '--config', '-c', type=str, 51 | default="./../pretrained_models/yolo_weights/yolov3.cfg", 52 | help='path to config file of YOLOv3 (`.cfg` file.)' 53 | ) 54 | 55 | parser.add_argument( 56 | '--labels', '-l', type=str, 57 | default="./../pretrained_models/yolo_weights/coco_names.json", 58 | help='path to labels file of coco dataset (`.names` file.)' 59 | ) 60 | 61 | parser.add_argument( 62 | '--gpu', type=bool, 63 | default=False, help='Flag to use gpu to run the deep learning model. Default is `False`' 64 | ) 65 | 66 | parser.add_argument( 67 | '--tracker', type=str, default='CentroidKF_Tracker', 68 | help="Tracker used to track objects. Options include ['CentroidTracker', 'CentroidKF_Tracker', 'SORT']") 69 | 70 | args = parser.parse_args() 71 | 72 | if args.tracker == 'CentroidTracker': 73 | tracker = CentroidTracker(max_lost=0, tracker_output_format='mot_challenge') 74 | elif args.tracker == 'CentroidKF_Tracker': 75 | tracker = CentroidKF_Tracker(max_lost=0, tracker_output_format='mot_challenge') 76 | elif args.tracker == 'SORT': 77 | tracker = SORT(max_lost=3, tracker_output_format='mot_challenge', iou_threshold=0.3) 78 | elif args.tracker == 'IOUTracker': 79 | tracker = IOUTracker(max_lost=2, iou_threshold=0.5, min_detection_confidence=0.4, max_detection_confidence=0.7, 80 | tracker_output_format='mot_challenge') 81 | else: 82 | raise NotImplementedError 83 | 84 | model = YOLOv3( 85 | weights_path=args.weights, 86 | configfile_path=args.config, 87 | labels_path=args.labels, 88 | confidence_threshold=0.5, 89 | nms_threshold=0.2, 90 | draw_bboxes=True, 91 | use_gpu=args.gpu 92 | ) 93 | 94 | main(args.video, model, tracker) 95 | -------------------------------------------------------------------------------- /examples/example_scripts/readme.md: -------------------------------------------------------------------------------- 1 | ## How to use? 2 | 3 | To see how to use these example scripts, simply type the following in the terminal: 4 | 5 | ``` 6 | python3 --help 7 | ``` 8 | -------------------------------------------------------------------------------- /examples/motmetrics_eval/data/TUD-Campus/test.txt: -------------------------------------------------------------------------------- 1 | 1,3,113.84,274.5,57.307,130.05,-1,-1,-1,-1 2 | 1,6,273.05,203.83,77.366,175.56,-1,-1,-1,-1 3 | 1,10,416.68,205.54,91.04,206.59,-1,-1,-1,-1 4 | 1,13,175.02,195.54,60.972,138.36,-1,-1,-1,-1 5 | 2,3,116.37,265.2,62.858,142.64,-1,-1,-1,-1 6 | 2,6,267.86,202.71,77.704,176.33,-1,-1,-1,-1 7 | 2,10,423.95,203.42,91.88,208.5,-1,-1,-1,-1 8 | 2,13,177.14,202.51,58.209,132.09,-1,-1,-1,-1 9 | 3,3,118.93,255.89,68.408,155.24,-1,-1,-1,-1 10 | 3,6,262.73,201.65,78.033,177.08,-1,-1,-1,-1 11 | 3,10,431.14,201.32,92.719,210.4,-1,-1,-1,-1 12 | 3,13,179.21,209.5,55.445,125.82,-1,-1,-1,-1 13 | 4,3,121.53,246.57,73.959,167.83,-1,-1,-1,-1 14 | 4,6,257.67,200.61,78.354,177.81,-1,-1,-1,-1 15 | 4,10,438.16,199.26,93.559,212.31,-1,-1,-1,-1 16 | 4,13,181.25,216.56,52.681,119.55,-1,-1,-1,-1 17 | 5,3,124.22,237.24,79.51,180.43,-1,-1,-1,-1 18 | 5,6,252.68,199.54,78.667,178.52,-1,-1,-1,-1 19 | 5,10,444.94,197.29,94.398,214.21,-1,-1,-1,-1 20 | 5,13,183.24,223.72,49.917,113.27,-1,-1,-1,-1 21 | 6,3,127,227.96,85.061,193.02,-1,-1,-1,-1 22 | 6,6,247.74,198.46,78.972,179.21,-1,-1,-1,-1 23 | 6,10,451.36,195.47,95.238,216.12,-1,-1,-1,-1 24 | 6,13,185.16,230.95,47.154,107,-1,-1,-1,-1 25 | 7,3,129.86,218.75,90.612,205.62,-1,-1,-1,-1 26 | 7,6,242.86,197.44,79.267,179.88,-1,-1,-1,-1 27 | 7,10,457.4,193.8,96.077,218.02,-1,-1,-1,-1 28 | 7,13,187.05,238.21,44.39,100.73,-1,-1,-1,-1 29 | 8,3,132.77,209.65,96.163,218.21,-1,-1,-1,-1 30 | 8,6,237.99,196.55,79.555,180.53,-1,-1,-1,-1 31 | 8,10,463.14,192.21,96.916,219.93,-1,-1,-1,-1 32 | 9,3,135.64,200.65,101.71,230.81,-1,-1,-1,-1 33 | 9,6,233.04,195.92,79.834,181.16,-1,-1,-1,-1 34 | 9,10,468.66,190.65,97.756,221.83,-1,-1,-1,-1 35 | 10,3,138.45,191.79,107.27,243.4,-1,-1,-1,-1 36 | 10,6,227.9,195.56,80.105,181.78,-1,-1,-1,-1 37 | 10,10,474.09,189.09,98.595,223.74,-1,-1,-1,-1 38 | 11,3,141.22,183.1,112.82,256,-1,-1,-1,-1 39 | 11,6,222.43,195.44,80.367,182.37,-1,-1,-1,-1 40 | 11,10,479.5,187.49,99.435,225.64,-1,-1,-1,-1 41 | 12,3,143.96,174.55,118.37,268.6,-1,-1,-1,-1 42 | 12,6,216.54,195.56,80.621,182.95,-1,-1,-1,-1 43 | 12,10,484.99,185.85,100.27,227.55,-1,-1,-1,-1 44 | 13,3,146.68,166.1,123.92,281.19,-1,-1,-1,-1 45 | 13,6,210.1,195.93,80.866,183.51,-1,-1,-1,-1 46 | 13,10,490.67,184.15,101.11,229.45,-1,-1,-1,-1 47 | 14,6,203.1,196.5,81.104,184.04,-1,-1,-1,-1 48 | 14,10,496.59,182.38,101.95,231.36,-1,-1,-1,-1 49 | 15,6,195.62,197.15,81.332,184.56,-1,-1,-1,-1 50 | 15,10,502.81,180.54,102.79,233.26,-1,-1,-1,-1 51 | 16,6,187.79,197.8,81.553,185.06,-1,-1,-1,-1 52 | 16,7,276.17,205.24,60.444,137.16,-1,-1,-1,-1 53 | 16,10,509.37,178.64,103.63,235.16,-1,-1,-1,-1 54 | 17,6,179.76,198.42,81.764,185.54,-1,-1,-1,-1 55 | 17,7,282.02,209.51,58.423,132.58,-1,-1,-1,-1 56 | 17,10,516.23,176.69,104.47,237.07,-1,-1,-1,-1 57 | 18,6,171.65,199.03,81.968,186.01,-1,-1,-1,-1 58 | 18,7,287.93,213.81,56.403,127.99,-1,-1,-1,-1 59 | 18,10,523.34,174.73,105.31,238.97,-1,-1,-1,-1 60 | 19,6,163.55,199.67,82.163,186.45,-1,-1,-1,-1 61 | 19,7,293.99,218.11,54.382,123.4,-1,-1,-1,-1 62 | 19,10,530.57,172.78,106.15,240.88,-1,-1,-1,-1 63 | 20,6,155.46,200.39,82.35,186.87,-1,-1,-1,-1 64 | 20,7,300.27,222.37,52.361,118.82,-1,-1,-1,-1 65 | 21,6,147.3,201.23,82.528,187.28,-1,-1,-1,-1 66 | 21,7,306.86,226.57,50.34,114.23,-1,-1,-1,-1 67 | 22,6,139.04,202.19,82.698,187.66,-1,-1,-1,-1 68 | 22,7,313.74,230.72,48.32,109.65,-1,-1,-1,-1 69 | 23,6,130.69,203.28,82.859,188.03,-1,-1,-1,-1 70 | 23,7,320.84,234.83,46.299,105.06,-1,-1,-1,-1 71 | 24,6,122.3,204.52,83.012,188.38,-1,-1,-1,-1 72 | 24,7,328.08,238.93,44.278,100.48,-1,-1,-1,-1 73 | 24,11,224.23,208.03,71.57,162.41,-1,-1,-1,-1 74 | 25,6,113.93,205.85,83.157,188.71,-1,-1,-1,-1 75 | 25,7,335.33,243.04,42.257,95.892,-1,-1,-1,-1 76 | 25,11,230.36,214.02,68.455,155.34,-1,-1,-1,-1 77 | 26,4,119.05,191.06,80.289,182.2,-1,-1,-1,-1 78 | 26,7,342.57,247.15,40.236,91.306,-1,-1,-1,-1 79 | 26,9,-15.182,261.28,64.106,145.47,-1,-1,-1,-1 80 | 26,11,236.15,218.42,66.058,149.9,-1,-1,-1,-1 81 | 27,4,109.54,188.88,82.744,187.77,-1,-1,-1,-1 82 | 27,7,349.78,251.25,38.216,86.721,-1,-1,-1,-1 83 | 27,9,-16.51,246.39,71.474,162.19,-1,-1,-1,-1 84 | 27,11,241.64,221.42,64.306,145.92,-1,-1,-1,-1 85 | 28,4,100.03,186.72,85.2,193.34,-1,-1,-1,-1 86 | 28,9,-17.899,231.48,78.843,178.91,-1,-1,-1,-1 87 | 28,11,246.79,223.23,63.128,143.25,-1,-1,-1,-1 88 | 29,4,90.482,184.67,87.656,198.91,-1,-1,-1,-1 89 | 29,9,-19.351,216.58,86.212,195.63,-1,-1,-1,-1 90 | 29,11,251.59,224.01,62.458,141.73,-1,-1,-1,-1 91 | 30,4,80.854,182.84,90.111,204.49,-1,-1,-1,-1 92 | 30,9,-20.845,201.69,93.58,212.35,-1,-1,-1,-1 93 | 30,11,255.99,223.9,62.231,141.22,-1,-1,-1,-1 94 | 31,4,71.177,181.25,92.567,210.06,-1,-1,-1,-1 95 | 31,9,-22.364,186.8,100.95,229.07,-1,-1,-1,-1 96 | 31,11,260.09,223.01,62.386,141.57,-1,-1,-1,-1 97 | 32,4,61.563,179.93,95.023,215.63,-1,-1,-1,-1 98 | 32,11,263.94,221.53,62.864,142.65,-1,-1,-1,-1 99 | 33,4,52.124,178.89,97.479,221.2,-1,-1,-1,-1 100 | 33,8,324.5,165.22,108.85,247.01,-1,-1,-1,-1 101 | 33,11,267.58,219.61,63.611,144.35,-1,-1,-1,-1 102 | 34,4,42.892,178.09,99.934,226.78,-1,-1,-1,-1 103 | 34,8,336.28,176.03,105.56,239.53,-1,-1,-1,-1 104 | 34,11,271.09,217.43,64.576,146.54,-1,-1,-1,-1 105 | 35,4,33.841,177.49,102.39,232.35,-1,-1,-1,-1 106 | 35,8,348.07,186.95,102.26,232.06,-1,-1,-1,-1 107 | 35,11,274.48,215.13,65.708,149.11,-1,-1,-1,-1 108 | 36,4,24.941,177.01,104.85,237.92,-1,-1,-1,-1 109 | 36,8,359.8,198.05,98.967,224.58,-1,-1,-1,-1 110 | 36,11,277.84,212.8,66.965,151.96,-1,-1,-1,-1 111 | 37,4,16.117,176.59,107.3,243.49,-1,-1,-1,-1 112 | 37,8,371.47,209.36,95.673,217.1,-1,-1,-1,-1 113 | 37,11,281.24,210.52,68.301,154.99,-1,-1,-1,-1 114 | 38,2,52.423,232.95,80.36,182.36,-1,-1,-1,-1 115 | 38,8,383.09,220.95,92.379,209.63,-1,-1,-1,-1 116 | 38,11,284.69,208.3,69.68,158.12,-1,-1,-1,-1 117 | 39,2,56.427,217.08,87.228,197.94,-1,-1,-1,-1 118 | 39,8,394.66,232.78,89.085,202.15,-1,-1,-1,-1 119 | 39,11,288.21,206.16,71.063,161.26,-1,-1,-1,-1 120 | 40,2,61.088,203.79,92.98,210.99,-1,-1,-1,-1 121 | 40,8,406.23,244.74,85.791,194.68,-1,-1,-1,-1 122 | 40,11,291.81,204.09,72.419,164.34,-1,-1,-1,-1 123 | 41,2,66.422,192.94,97.686,221.67,-1,-1,-1,-1 124 | 41,5,394.42,197.9,97.849,222.04,-1,-1,-1,-1 125 | 41,11,295.46,202.07,73.718,167.28,-1,-1,-1,-1 126 | 42,2,72.446,184.34,101.42,230.14,-1,-1,-1,-1 127 | 42,5,400.86,193.91,102.46,232.51,-1,-1,-1,-1 128 | 42,11,299.14,200,74.933,170.04,-1,-1,-1,-1 129 | 43,2,79.136,177.84,104.25,236.56,-1,-1,-1,-1 130 | 43,5,407.31,190.07,107.08,242.98,-1,-1,-1,-1 131 | 43,11,302.83,197.86,76.041,172.55,-1,-1,-1,-1 132 | 44,2,86.433,173.27,106.24,241.09,-1,-1,-1,-1 133 | 44,5,413.79,186.43,111.69,253.45,-1,-1,-1,-1 134 | 44,11,306.54,195.65,77.02,174.78,-1,-1,-1,-1 135 | 45,2,94.236,170.49,107.48,243.89,-1,-1,-1,-1 136 | 45,5,420.34,182.99,116.3,263.91,-1,-1,-1,-1 137 | 45,11,310.3,193.42,77.854,176.67,-1,-1,-1,-1 138 | 46,2,102.52,169.29,108.02,245.13,-1,-1,-1,-1 139 | 46,5,426.95,179.62,120.92,274.38,-1,-1,-1,-1 140 | 46,11,314.13,191.28,78.529,178.2,-1,-1,-1,-1 141 | 47,2,111.21,169.48,107.95,244.96,-1,-1,-1,-1 142 | 47,5,433.61,176.29,125.53,284.85,-1,-1,-1,-1 143 | 47,11,318.04,189.41,79.034,179.34,-1,-1,-1,-1 144 | 48,2,120.2,170.97,107.33,243.55,-1,-1,-1,-1 145 | 48,5,442.57,183.44,125.53,284.85,-1,-1,-1,-1 146 | 48,11,322.05,187.93,79.36,180.08,-1,-1,-1,-1 147 | 49,1,459.32,237.96,50.475,114.54,-1,-1,-1,-1 148 | 49,2,129.39,173.6,106.23,241.06,-1,-1,-1,-1 149 | 49,11,326.2,186.99,79.503,180.41,-1,-1,-1,-1 150 | 50,1,462.86,233.83,51.899,117.77,-1,-1,-1,-1 151 | 50,2,138.73,177.21,104.73,237.65,-1,-1,-1,-1 152 | 50,11,330.53,186.6,79.461,180.31,-1,-1,-1,-1 153 | 51,1,466.5,230.01,53.199,120.72,-1,-1,-1,-1 154 | 51,2,148.19,181.54,102.89,233.48,-1,-1,-1,-1 155 | 51,11,335.03,186.77,79.236,179.8,-1,-1,-1,-1 156 | 52,1,470.24,226.53,54.374,123.39,-1,-1,-1,-1 157 | 52,2,157.75,186.54,100.79,228.71,-1,-1,-1,-1 158 | 52,11,339.68,187.47,78.833,178.89,-1,-1,-1,-1 159 | 53,1,474.11,223.4,55.425,125.78,-1,-1,-1,-1 160 | 53,2,167.48,192.06,98.496,223.51,-1,-1,-1,-1 161 | 53,11,344.45,188.69,78.259,177.59,-1,-1,-1,-1 162 | 54,1,478.14,220.61,56.352,127.88,-1,-1,-1,-1 163 | 54,2,177.42,197.94,96.081,218.03,-1,-1,-1,-1 164 | 54,11,349.28,190.37,77.525,175.92,-1,-1,-1,-1 165 | 55,1,482.39,218.19,57.154,129.7,-1,-1,-1,-1 166 | 55,2,187.55,204.01,93.617,212.44,-1,-1,-1,-1 167 | 55,11,354.13,192.44,76.646,173.93,-1,-1,-1,-1 168 | 55,12,533.9,309.3,68.825,156.18,-1,-1,-1,-1 169 | 56,1,486.87,216.16,57.832,131.24,-1,-1,-1,-1 170 | 56,2,197.87,210.13,91.174,206.9,-1,-1,-1,-1 171 | 56,11,358.95,194.8,75.638,171.64,-1,-1,-1,-1 172 | 56,12,533.51,279.28,82.049,186.19,-1,-1,-1,-1 173 | 57,1,491.6,214.54,58.386,132.49,-1,-1,-1,-1 174 | 57,2,208.33,216.16,88.824,201.56,-1,-1,-1,-1 175 | 57,11,363.73,197.38,74.522,169.1,-1,-1,-1,-1 176 | 57,12,533.16,249.29,95.273,216.2,-1,-1,-1,-1 177 | 58,1,496.55,213.28,58.815,133.47,-1,-1,-1,-1 178 | 58,2,218.83,221.94,86.637,196.6,-1,-1,-1,-1 179 | 58,11,368.43,200.11,73.32,166.38,-1,-1,-1,-1 180 | 58,12,532.85,219.33,108.5,246.2,-1,-1,-1,-1 181 | 59,1,501.69,212.34,59.119,134.16,-1,-1,-1,-1 182 | 59,2,229.42,227.21,84.684,192.17,-1,-1,-1,-1 183 | 59,11,373.11,202.9,72.061,163.52,-1,-1,-1,-1 184 | 59,12,532.56,189.41,121.72,276.21,-1,-1,-1,-1 185 | 60,1,506.97,211.69,59.3,134.57,-1,-1,-1,-1 186 | 60,2,240.03,231.7,83.037,188.43,-1,-1,-1,-1 187 | 60,11,377.81,205.68,70.773,160.6,-1,-1,-1,-1 188 | 60,12,536.94,180.92,125.53,284.85,-1,-1,-1,-1 189 | 61,1,512.37,211.32,59.355,134.69,-1,-1,-1,-1 190 | 61,2,250.51,235.22,81.767,185.55,-1,-1,-1,-1 191 | 61,11,382.61,208.36,69.489,157.68,-1,-1,-1,-1 192 | 61,12,543.18,181.11,125.53,284.85,-1,-1,-1,-1 193 | 62,1,517.89,211.16,59.287,134.54,-1,-1,-1,-1 194 | 62,2,260.69,237.62,80.944,183.68,-1,-1,-1,-1 195 | 62,11,387.51,210.9,68.246,154.86,-1,-1,-1,-1 196 | 63,1,523.53,211.17,59.094,134.1,-1,-1,-1,-1 197 | 63,2,270.48,238.63,80.641,182.99,-1,-1,-1,-1 198 | 63,11,392.47,213.29,67.081,152.22,-1,-1,-1,-1 199 | 64,1,529.29,211.36,58.776,133.38,-1,-1,-1,-1 200 | 64,2,279.79,238.08,80.928,183.64,-1,-1,-1,-1 201 | 64,11,397.5,215.45,66.039,149.86,-1,-1,-1,-1 202 | 65,1,535.17,211.62,58.334,132.37,-1,-1,-1,-1 203 | 65,2,288.52,235.8,81.876,185.79,-1,-1,-1,-1 204 | 65,11,402.55,217.33,65.163,147.87,-1,-1,-1,-1 205 | 66,1,541.16,211.9,57.768,131.09,-1,-1,-1,-1 206 | 66,2,296.67,231.74,83.556,189.61,-1,-1,-1,-1 207 | 66,11,407.6,218.87,64.503,146.37,-1,-1,-1,-1 208 | 67,1,547.25,212.24,57.077,129.52,-1,-1,-1,-1 209 | 67,2,304.28,225.68,86.04,195.24,-1,-1,-1,-1 210 | 67,11,412.69,220,64.11,145.48,-1,-1,-1,-1 211 | 68,1,553.44,212.71,56.262,127.67,-1,-1,-1,-1 212 | 68,2,311.34,217.45,89.398,202.86,-1,-1,-1,-1 213 | 68,11,417.76,220.6,64.039,145.32,-1,-1,-1,-1 214 | 69,1,559.73,213.37,55.323,125.54,-1,-1,-1,-1 215 | 69,2,317.88,206.93,93.702,212.63,-1,-1,-1,-1 216 | 69,11,422.74,220.5,64.348,146.02,-1,-1,-1,-1 217 | 70,1,566.12,214.27,54.259,123.13,-1,-1,-1,-1 218 | 70,2,323.85,194.07,99.022,224.7,-1,-1,-1,-1 219 | 70,11,427.58,219.49,65.097,147.72,-1,-1,-1,-1 220 | 71,1,572.59,215.45,53.07,120.43,-1,-1,-1,-1 221 | 71,2,329.22,178.75,105.43,239.25,-1,-1,-1,-1 222 | 71,11,432.2,217.39,66.352,150.57,-1,-1,-1,-1 223 | -------------------------------------------------------------------------------- /examples/motmetrics_eval/data/iotest/detrac.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adipandas/multi-object-tracker/ce7130a35eed57c8d0552b8b35e62edbe8386100/examples/motmetrics_eval/data/iotest/detrac.mat -------------------------------------------------------------------------------- /examples/motmetrics_eval/data/iotest/detrac.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /examples/motmetrics_eval/data/iotest/motchallenge.txt: -------------------------------------------------------------------------------- 1 | 1,1,399,182,121,229,1,-1,-1,-1 2 | 1,2,282,201,92,184,1,-1,-1,-1 3 | 2,2,269,202,87,182,1,-1,-1,-1 4 | 2,3,71,151,100,284,1,-1,-1,-1 5 | 2,4,200,206,55,137,1,-1,-1,-1 -------------------------------------------------------------------------------- /examples/motmetrics_eval/data/iotest/vatic.txt: -------------------------------------------------------------------------------- 1 | 0 412 0 842 124 0 0 0 0 "worker" 2 | 0 412 10 842 124 1 0 0 1 "pc" "attr1" "attr3" 3 | 1 412 0 842 124 1 0 0 1 "pc" "attr2" 4 | 2 412 0 842 124 2 0 0 1 "worker" "attr4" "attr1" "attr2" 5 | -------------------------------------------------------------------------------- /examples/motmetrics_eval/motmeterics.py: -------------------------------------------------------------------------------- 1 | import os 2 | import motmetrics as mm 3 | 4 | 5 | def compute_motchallenge(dname): 6 | df_gt = mm.io.loadtxt(os.path.join(dname, 'gt.txt')) 7 | df_test = mm.io.loadtxt(os.path.join(dname, 'test.txt')) 8 | return mm.utils.compare_to_groundtruth(df_gt, df_test, 'iou', distth=0.5) 9 | 10 | 11 | def metrics_motchallenge_files(data_dir='data'): 12 | """ 13 | Metric evaluation for sequences TUD-Campus and TUD-Stadtmitte for MOTChallenge. 14 | """ 15 | 16 | dnames = ['TUD-Campus', 'TUD-Stadtmitte'] 17 | 18 | # accumulators for two datasets TUD-Campus and TUD-Stadtmitte. 19 | accs = [compute_motchallenge(os.path.join(data_dir, d)) for d in dnames] 20 | 21 | mh = mm.metrics.create() 22 | summary = mh.compute_many(accs, metrics=mm.metrics.motchallenge_metrics, names=dnames, generate_overall=True) 23 | 24 | print() 25 | print(mm.io.render_summary(summary, namemap=mm.io.motchallenge_metric_names, formatters=mh.formatters)) 26 | 27 | 28 | if __name__ == '__main__': 29 | metrics_motchallenge_files(data_dir='data') 30 | -------------------------------------------------------------------------------- /examples/motmetrics_eval/readme.md: -------------------------------------------------------------------------------- 1 | ### MOT Challenge file format 2 | 3 | [[GitHub](https://github.com/adipandas/multi-object-tracker)] 4 | [[Home](https://adipandas.github.io/multi-object-tracker/)] 5 | 6 | The file format should be the same as the ground truth file, which is a CSV text-file containing one object instance per line. 7 | 8 | ``` 9 | , , , , , , , , , 10 | ``` 11 | 12 | #### Reference 13 | 1. https://motchallenge.net/instructions/ 14 | -------------------------------------------------------------------------------- /examples/pretrained_models/caffemodel_weights/get_caffemodel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # MobileNet-SSD model reference https://github.com/chuanqi305/MobileNet-SSD/ 4 | 5 | wget --no-check-certificate "https://drive.google.com/u/0/uc?id=0B3gersZ2cHIxRm5PMWRoTkdHdHc&export=download" -O 'MobileNetSSD_deploy.caffemodel' 6 | wget "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/daef68a6c2f5fbb8c88404266aa28180646d17e0/MobileNetSSD_deploy.prototxt" -O "MobileNetSSD_deploy.prototxt" 7 | -------------------------------------------------------------------------------- /examples/pretrained_models/caffemodel_weights/ssd_mobilenet_caffe_names.json: -------------------------------------------------------------------------------- 1 | {"0": "background", "1": "aeroplane", "2": "bicycle", "3": "bird", "4": "boat", "5": "bottle", "6": "bus", "7": "car", "8": "cat", "9": "chair", "10": "cow", "11": "diningtable", "12": "dog", "13": "horse", "14": "motorbike", "15": "person", "16": "pottedplant", "17": "sheep", "18": "sofa", "19": "train", "20": "tvmonitor"} -------------------------------------------------------------------------------- /examples/pretrained_models/tensorflow_weights/get_ssd_model.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Get models from https://github.com/opencv/opencv/wiki/TensorFlow-Object-Detection-API#use-existing-config-file-for-your-model 4 | 5 | wget -c http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_coco_2018_03_29.tar.gz -O - | tar -xz 6 | wget https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/ssd_mobilenet_v2_coco_2018_03_29.pbtxt 7 | 8 | # Tensorflow object detection API: https://github.com/opencv/opencv/wiki/TensorFlow-Object-Detection-API#use-existing-config-file-for-your-model 9 | -------------------------------------------------------------------------------- /examples/pretrained_models/tensorflow_weights/ssd_mobilenet_v2_coco_names.json: -------------------------------------------------------------------------------- 1 | {"0": "background", "1": "person", "2": "bicycle", "3": "car", "4": "motorcycle", "5": "airplane", "6": "bus", "7": "train", 2 | "8": "truck", "9": "boat", "10": "traffic light", "11": "fire hydrant", "13": "stop sign", "14": "parking meter", "15": "bench", 3 | "16": "bird", "17": "cat", "18": "dog", "19": "horse", "20": "sheep", "21": "cow", "22": "elephant", "23": "bear", "24": "zebra", 4 | "25": "giraffe", "27": "backpack", "28": "umbrella", "31": "handbag", "32": "tie", "33": "suitcase", "34": "frisbee", "35": "skis", 5 | "36": "snowboard", "37": "sports ball", "38": "kite", "39": "baseball bat", "40": "baseball glove", "41": "skateboard", "42": "surfboard", 6 | "43": "tennis racket", "44": "bottle", "46": "wine glass", "47": "cup", "48": "fork", "49": "knife", "50": "spoon", "51": "bowl", "52": "banana", 7 | "53": "apple", "54": "sandwich", "55": "orange", "56": "broccoli", "57": "carrot", "58": "hot dog", "59": "pizza", "60": "donut", "61": "cake", 8 | "62": "chair", "63": "couch", "64": "potted plant", "65": "bed", "67": "dining table", "70": "toilet", "72": "tv", "73": "laptop", "74": "mouse", 9 | "75": "remote", "76": "keyboard", "77": "cell phone", "78": "microwave", "79": "oven", "80": "toaster", "81": "sink", "82": "refrigerator", 10 | "84": "book", "85": "clock", "86": "vase", "87": "scissors", "88": "teddy bear", "89": "hair drier", "90": "toothbrush" 11 | } 12 | -------------------------------------------------------------------------------- /examples/pretrained_models/yolo_weights/coco_names.json: -------------------------------------------------------------------------------- 1 | {"0": "person", "1": "bicycle", "2": "car", "3": "motorbike", "4": "aeroplane", "5": "bus", "6": "train", "7": "truck", "8": "boat", 2 | "9": "traffic light", "10": "fire hydrant", "11": "stop sign", "12": "parking meter", "13": "bench", "14": "bird", "15": "cat", 3 | "16": "dog", "17": "horse", "18": "sheep", "19": "cow", "20": "elephant", "21": "bear", "22": "zebra", "23": "giraffe", "24": "backpack", 4 | "25": "umbrella", "26": "handbag", "27": "tie", "28": "suitcase", "29": "frisbee", "30": "skis", "31": "snowboard", "32": "sports ball", 5 | "33": "kite", "34": "baseball bat", "35": "baseball glove", "36": "skateboard", "37": "surfboard", "38": "tennis racket", "39": "bottle", 6 | "40": "wine glass", "41": "cup", "42": "fork", "43": "knife", "44": "spoon", "45": "bowl", "46": "banana", "47": "apple", "48": "sandwich", 7 | "49": "orange", "50": "broccoli", 8 | "51": "carrot", "52": "hot dog", "53": "pizza", "54": "donut", "55": "cake", "56": "chair", "57": "sofa", 9 | "58": "pottedplant", "59": "bed", "60": "diningtable", "61": "toilet", "62": "tvmonitor", "63": "laptop", 10 | "64": "mouse", "65": "remote", "66": "keyboard", "67": "cell phone", "68": "microwave", "69": "oven", "70": "toaster", 11 | "71": "sink", "72": "refrigerator", "73": "book", "74": "clock", "75": "vase", "76": "scissors", "77": "teddy bear", "78": "hair drier", 12 | "79": "toothbrush"} 13 | -------------------------------------------------------------------------------- /examples/pretrained_models/yolo_weights/get_yolo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | wget https://pjreddie.com/media/files/yolov3.weights 3 | wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg 4 | wget https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names 5 | -------------------------------------------------------------------------------- /examples/readme.md: -------------------------------------------------------------------------------- 1 | # multi-object-tracker examples 2 | 3 | [[Webpage](https://adipandas.github.io/multi-object-tracker/)] 4 | [[GitHub](https://github.com/adipandas/multi-object-tracker)] 5 | 6 | 7 | This folder contains various examples. 8 | 9 | Note: Before using these examples, you will have to download the network models. Please refer [these instructions](../DOWNLOAD_WEIGHTS.md) for downloading. 10 | 11 | 1. [Example jupyter-notebooks](https://github.com/adipandas/multi-object-tracker/tree/master/examples/example_notebooks) 12 | 2. [Example scripts](https://github.com/adipandas/multi-object-tracker/tree/master/examples/example_scripts) 13 | 3. [Evaluations based on py-motmetrics](https://github.com/adipandas/multi-object-tracker/tree/master/examples/motmetrics_eval) 14 | -------------------------------------------------------------------------------- /examples/video_data/readme.md: -------------------------------------------------------------------------------- 1 | ## Data for examples. 2 | 3 | To run the provided examples you can use the videos provided from the following links. 4 | 5 | * cars: https://flic.kr/p/L6qyxj 6 | * cows: https://flic.kr/p/26WeEWy 7 | * people: https://flic.kr/p/7gWofV 8 | -------------------------------------------------------------------------------- /motrackers/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Multi-object Trackers in Python: 3 | - GitHub link: https://github.com/adipandas/multi-object-tracker 4 | - Author: Aditya M. Deshpande 5 | - Blog: http://adipandas.github.io/ 6 | """ 7 | 8 | 9 | from motrackers.tracker import Tracker as CentroidTracker 10 | from motrackers.centroid_kf_tracker import CentroidKF_Tracker 11 | from motrackers.sort_tracker import SORT 12 | from motrackers.iou_tracker import IOUTracker 13 | -------------------------------------------------------------------------------- /motrackers/centroid_kf_tracker.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | import numpy as np 3 | from scipy.spatial import distance 4 | from scipy.optimize import linear_sum_assignment 5 | from motrackers.tracker import Tracker 6 | from motrackers.track import KFTrackCentroid 7 | from motrackers.utils.misc import get_centroid 8 | 9 | 10 | def assign_tracks2detection_centroid_distances(bbox_tracks, bbox_detections, distance_threshold=10.): 11 | """ 12 | Assigns detected bounding boxes to tracked bounding boxes using IoU as a distance metric. 13 | 14 | Args: 15 | bbox_tracks (numpy.ndarray): Tracked bounding boxes with shape `(n, 4)` 16 | and each row as `(xmin, ymin, width, height)`. 17 | bbox_detections (numpy.ndarray): detection bounding boxes with shape `(m, 4)` and 18 | each row as `(xmin, ymin, width, height)`. 19 | distance_threshold (float): Minimum distance between the tracked object 20 | and new detection to consider for assignment. 21 | 22 | Returns: 23 | tuple: Tuple containing the following elements: 24 | - matches (numpy.ndarray): Array of shape `(n, 2)` where `n` is number of pairs formed after matching tracks to detections. This is an array of tuples with each element as matched pair of indices`(track_index, detection_index)`. 25 | - unmatched_detections (numpy.ndarray): Array of shape `(m,)` where `m` is number of unmatched detections. 26 | - unmatched_tracks (numpy.ndarray): Array of shape `(k,)` where `k` is the number of unmatched tracks. 27 | 28 | """ 29 | 30 | if (bbox_tracks.size == 0) or (bbox_detections.size == 0): 31 | return np.empty((0, 2), dtype=int), np.arange(len(bbox_detections), dtype=int), np.empty((0,), dtype=int) 32 | 33 | if len(bbox_tracks.shape) == 1: 34 | bbox_tracks = bbox_tracks[None, :] 35 | 36 | if len(bbox_detections.shape) == 1: 37 | bbox_detections = bbox_detections[None, :] 38 | 39 | estimated_track_centroids = get_centroid(bbox_tracks) 40 | detection_centroids = get_centroid(bbox_detections) 41 | centroid_distances = distance.cdist(estimated_track_centroids, detection_centroids) 42 | 43 | assigned_tracks, assigned_detections = linear_sum_assignment(centroid_distances) 44 | 45 | unmatched_detections, unmatched_tracks = [], [] 46 | 47 | for d in range(bbox_detections.shape[0]): 48 | if d not in assigned_detections: 49 | unmatched_detections.append(d) 50 | 51 | for t in range(bbox_tracks.shape[0]): 52 | if t not in assigned_tracks: 53 | unmatched_tracks.append(t) 54 | 55 | # filter out matched with high distance between centroids 56 | matches = [] 57 | for t, d in zip(assigned_tracks, assigned_detections): 58 | if centroid_distances[t, d] > distance_threshold: 59 | unmatched_detections.append(d) 60 | unmatched_tracks.append(t) 61 | else: 62 | matches.append((t, d)) 63 | 64 | if len(matches): 65 | matches = np.array(matches) 66 | else: 67 | matches = np.empty((0, 2), dtype=int) 68 | 69 | return matches, np.array(unmatched_detections), np.array(unmatched_tracks) 70 | 71 | 72 | class CentroidKF_Tracker(Tracker): 73 | """ 74 | Kalman filter based tracking of multiple detected objects. 75 | 76 | Args: 77 | max_lost (int): Maximum number of consecutive frames object was not detected. 78 | tracker_output_format (str): Output format of the tracker. 79 | process_noise_scale (float or numpy.ndarray): Process noise covariance matrix of shape (3, 3) or 80 | covariance magnitude as scalar value. 81 | measurement_noise_scale (float or numpy.ndarray): Measurement noise covariance matrix of shape (1,) 82 | or covariance magnitude as scalar value. 83 | time_step (int or float): Time step for Kalman Filter. 84 | """ 85 | 86 | def __init__( 87 | self, 88 | max_lost=1, 89 | centroid_distance_threshold=30., 90 | tracker_output_format='mot_challenge', 91 | process_noise_scale=1.0, 92 | measurement_noise_scale=1.0, 93 | time_step=1 94 | ): 95 | self.time_step = time_step 96 | self.process_noise_scale = process_noise_scale 97 | self.measurement_noise_scale = measurement_noise_scale 98 | self.centroid_distance_threshold = centroid_distance_threshold 99 | self.kalman_trackers = OrderedDict() 100 | super().__init__(max_lost, tracker_output_format) 101 | 102 | def _add_track(self, frame_id, bbox, detection_confidence, class_id, **kwargs): 103 | self.tracks[self.next_track_id] = KFTrackCentroid( 104 | self.next_track_id, frame_id, bbox, detection_confidence, class_id=class_id, 105 | data_output_format=self.tracker_output_format, process_noise_scale=self.process_noise_scale, 106 | measurement_noise_scale=self.measurement_noise_scale, **kwargs 107 | ) 108 | self.next_track_id += 1 109 | 110 | def update(self, bboxes, detection_scores, class_ids): 111 | self.frame_count += 1 112 | bbox_detections = np.array(bboxes, dtype='int') 113 | 114 | track_ids = list(self.tracks.keys()) 115 | bbox_tracks = [] 116 | for track_id in track_ids: 117 | bbox_tracks.append(self.tracks[track_id].predict()) 118 | bbox_tracks = np.array(bbox_tracks) 119 | 120 | if len(bboxes) == 0: 121 | for i in range(len(bbox_tracks)): 122 | track_id = track_ids[i] 123 | bbox = bbox_tracks[i, :] 124 | confidence = self.tracks[track_id].detection_confidence 125 | cid = self.tracks[track_id].class_id 126 | self._update_track(track_id, self.frame_count, bbox, detection_confidence=confidence, class_id=cid, lost=1) 127 | if self.tracks[track_id].lost > self.max_lost: 128 | self._remove_track(track_id) 129 | else: 130 | matches, unmatched_detections, unmatched_tracks = assign_tracks2detection_centroid_distances( 131 | bbox_tracks, bbox_detections, distance_threshold=self.centroid_distance_threshold 132 | ) 133 | 134 | for i in range(matches.shape[0]): 135 | t, d = matches[i, :] 136 | track_id = track_ids[t] 137 | bbox = bboxes[d, :] 138 | cid = class_ids[d] 139 | confidence = detection_scores[d] 140 | self._update_track(track_id, self.frame_count, bbox, confidence, cid, lost=0) 141 | 142 | for d in unmatched_detections: 143 | bbox = bboxes[d, :] 144 | cid = class_ids[d] 145 | confidence = detection_scores[d] 146 | self._add_track(self.frame_count, bbox, confidence, cid) 147 | 148 | for t in unmatched_tracks: 149 | track_id = track_ids[t] 150 | bbox = bbox_tracks[t, :] 151 | confidence = self.tracks[track_id].detection_confidence 152 | cid = self.tracks[track_id].class_id 153 | self._update_track(track_id, self.frame_count, bbox, confidence, cid, lost=1) 154 | 155 | if self.tracks[track_id].lost > self.max_lost: 156 | self._remove_track(track_id) 157 | 158 | outputs = self._get_tracks(self.tracks) 159 | return outputs 160 | -------------------------------------------------------------------------------- /motrackers/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | from motrackers.detectors.tf import TF_SSDMobileNetV2 2 | from motrackers.detectors.caffe import Caffe_SSDMobileNet 3 | from motrackers.detectors.yolo import YOLOv3 4 | -------------------------------------------------------------------------------- /motrackers/detectors/caffe.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | from motrackers.detectors.detector import Detector 3 | from motrackers.utils.misc import load_labelsjson 4 | 5 | 6 | class Caffe_SSDMobileNet(Detector): 7 | """ 8 | Caffe SSD MobileNet model for Object Detection. 9 | 10 | Args: 11 | weights_path (str): path to network weights file. 12 | configfile_path (str): path to network configuration file. 13 | labels_path (str): path to data labels json file. 14 | confidence_threshold (float): confidence threshold to select the detected object. 15 | nms_threshold (float): Non-maximum suppression threshold. 16 | draw_bboxes (bool): If True, assign colors for drawing bounding boxes on the image. 17 | use_gpu (bool): If True, try to load the model on GPU. 18 | """ 19 | 20 | def __init__(self, weights_path, configfile_path, labels_path, 21 | confidence_threshold=0.5, nms_threshold=0.2, draw_bboxes=True, use_gpu=False): 22 | 23 | object_names = load_labelsjson(labels_path) 24 | 25 | self.pixel_mean = 127.5 26 | self.pixel_std = 1/127.5 27 | self.image_size = (300, 300) 28 | 29 | self.net = cv.dnn.readNetFromCaffe(configfile_path, weights_path) 30 | 31 | if use_gpu: 32 | self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA) 33 | self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA) 34 | 35 | super().__init__(object_names, confidence_threshold, nms_threshold, draw_bboxes) 36 | 37 | def forward(self, image): 38 | blob = cv.dnn.blobFromImage(image, scalefactor=self.pixel_std, size=self.image_size, 39 | mean=(self.pixel_mean, self.pixel_mean, self.pixel_mean), swapRB=True, crop=False) 40 | self.net.setInput(blob) 41 | detections = self.net.forward() 42 | return detections 43 | -------------------------------------------------------------------------------- /motrackers/detectors/detector.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 as cv 3 | from motrackers.utils.misc import xyxy2xywh 4 | 5 | 6 | class Detector: 7 | """ 8 | Abstract class for detector. 9 | 10 | Args: 11 | object_names (dict): Dictionary containing (key, value) as (class_id, class_name) for object detector. 12 | confidence_threshold (float): Confidence threshold for object detection. 13 | nms_threshold (float): Threshold for non-maximal suppression. 14 | draw_bboxes (bool): If true, draw bounding boxes on the image is possible. 15 | """ 16 | 17 | def __init__(self, object_names, confidence_threshold, nms_threshold, draw_bboxes=True): 18 | self.object_names = object_names 19 | self.confidence_threshold = confidence_threshold 20 | self.nms_threshold = nms_threshold 21 | self.height = None 22 | self.width = None 23 | 24 | np.random.seed(12345) 25 | if draw_bboxes: 26 | self.bbox_colors = {key: np.random.randint(0, 255, size=(3,)).tolist() for key in self.object_names.keys()} 27 | 28 | def forward(self, image): 29 | """ 30 | Forward pass for the detector with input image. 31 | 32 | Args: 33 | image (numpy.ndarray): Input image. 34 | 35 | Returns: 36 | numpy.ndarray: detections 37 | """ 38 | raise NotImplemented 39 | 40 | def detect(self, image): 41 | """ 42 | Detect objects in the input image. 43 | 44 | Args: 45 | image (numpy.ndarray): Input image. 46 | 47 | Returns: 48 | tuple: Tuple containing the following elements: 49 | - bboxes (numpy.ndarray): Bounding boxes with shape (n, 4) containing detected objects with each row as `(xmin, ymin, width, height)`. 50 | - confidences (numpy.ndarray): Confidence or detection probabilities if the detected objects with shape (n,). 51 | - class_ids (numpy.ndarray): Class_ids or label_ids of detected objects with shape (n, 4) 52 | 53 | """ 54 | if self.width is None or self.height is None: 55 | (self.height, self.width) = image.shape[:2] 56 | 57 | detections = self.forward(image).squeeze(axis=0).squeeze(axis=0) 58 | 59 | bboxes, confidences, class_ids = [], [], [] 60 | 61 | for i in range(detections.shape[0]): 62 | detection = detections[i, :] 63 | class_id = detection[1] 64 | confidence = detection[2] 65 | 66 | if confidence > self.confidence_threshold: 67 | bbox = detection[3:7] * np.array([self.width, self.height, self.width, self.height]) 68 | bboxes.append(bbox.astype("int")) 69 | confidences.append(float(confidence)) 70 | class_ids.append(int(class_id)) 71 | 72 | if len(bboxes): 73 | bboxes = xyxy2xywh(np.array(bboxes)).tolist() 74 | class_ids = np.array(class_ids).astype('int') 75 | indices = cv.dnn.NMSBoxes(bboxes, confidences, self.confidence_threshold, self.nms_threshold).flatten() 76 | return np.array(bboxes)[indices, :], np.array(confidences)[indices], class_ids[indices] 77 | else: 78 | return np.array([]), np.array([]), np.array([]) 79 | 80 | def draw_bboxes(self, image, bboxes, confidences, class_ids): 81 | """ 82 | Draw the bounding boxes about detected objects in the image. 83 | 84 | Args: 85 | image (numpy.ndarray): Image or video frame. 86 | bboxes (numpy.ndarray): Bounding boxes pixel coordinates as (xmin, ymin, width, height) 87 | confidences (numpy.ndarray): Detection confidence or detection probability. 88 | class_ids (numpy.ndarray): Array containing class ids (aka label ids) of each detected object. 89 | 90 | Returns: 91 | numpy.ndarray: image with the bounding boxes drawn on it. 92 | """ 93 | 94 | for bb, conf, cid in zip(bboxes, confidences, class_ids): 95 | clr = [int(c) for c in self.bbox_colors[cid]] 96 | cv.rectangle(image, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]), clr, 2) 97 | label = "{}:{:.4f}".format(self.object_names[cid], conf) 98 | (label_width, label_height), baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 2) 99 | y_label = max(bb[1], label_height) 100 | cv.rectangle(image, (bb[0], y_label - label_height), (bb[0] + label_width, y_label + baseLine), 101 | (255, 255, 255), cv.FILLED) 102 | cv.putText(image, label, (bb[0], y_label), cv.FONT_HERSHEY_SIMPLEX, 0.5, clr, 2) 103 | return image 104 | -------------------------------------------------------------------------------- /motrackers/detectors/tf.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | from motrackers.detectors.detector import Detector 3 | from motrackers.utils.misc import load_labelsjson 4 | 5 | 6 | class TF_SSDMobileNetV2(Detector): 7 | """ 8 | Tensorflow SSD MobileNetv2 model for Object Detection. 9 | 10 | Args: 11 | weights_path (str): path to network weights file. 12 | configfile_path (str): path to network configuration file. 13 | labels_path (str): path to data labels json file. 14 | confidence_threshold (float): confidence threshold to select the detected object. 15 | nms_threshold (float): Non-maximum suppression threshold. 16 | draw_bboxes (bool): If True, assign colors for drawing bounding boxes on the image. 17 | use_gpu (bool): If True, try to load the model on GPU. 18 | """ 19 | 20 | def __init__(self, weights_path, configfile_path, labels_path, confidence_threshold=0.5, nms_threshold=0.4, draw_bboxes=True, use_gpu=False): 21 | self.image_size = (300, 300) 22 | self.net = cv.dnn.readNetFromTensorflow(weights_path, configfile_path) 23 | 24 | if use_gpu: 25 | self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA) 26 | self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA) 27 | 28 | object_names = load_labelsjson(labels_path) 29 | 30 | super().__init__(object_names, confidence_threshold, nms_threshold, draw_bboxes) 31 | 32 | def forward(self, image): 33 | blob = cv.dnn.blobFromImage(image, size=self.image_size, swapRB=True, crop=False) 34 | self.net.setInput(blob) 35 | detections = self.net.forward() 36 | return detections 37 | -------------------------------------------------------------------------------- /motrackers/detectors/yolo.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 as cv 3 | from motrackers.detectors.detector import Detector 4 | from motrackers.utils.misc import load_labelsjson 5 | 6 | 7 | class YOLOv3(Detector): 8 | """ 9 | YOLOv3 Object Detector Module. 10 | 11 | Args: 12 | weights_path (str): path to network weights file. 13 | configfile_path (str): path to network configuration file. 14 | labels_path (str): path to data labels json file. 15 | confidence_threshold (float): confidence threshold to select the detected object. 16 | nms_threshold (float): Non-maximum suppression threshold. 17 | draw_bboxes (bool): If True, assign colors for drawing bounding boxes on the image. 18 | use_gpu (bool): If True, try to load the model on GPU. 19 | """ 20 | 21 | def __init__(self, weights_path, configfile_path, labels_path, confidence_threshold=0.5, nms_threshold=0.2, draw_bboxes=True, use_gpu=False): 22 | self.net = cv.dnn.readNetFromDarknet(configfile_path, weights_path) 23 | object_names = load_labelsjson(labels_path) 24 | 25 | layer_names = self.net.getLayerNames() 26 | if cv.__version__ >= '4.6.0': 27 | self.layer_names = [layer_names[i - 1] for i in self.net.getUnconnectedOutLayers()] 28 | else: 29 | self.layer_names = [layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()] 30 | 31 | self.scale_factor = 1/255.0 32 | self.image_size = (416, 416) 33 | 34 | self.net = cv.dnn.readNetFromDarknet(configfile_path, weights_path) 35 | if use_gpu: 36 | self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA) 37 | self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA) 38 | 39 | super().__init__(object_names, confidence_threshold, nms_threshold, draw_bboxes) 40 | 41 | def forward(self, image): 42 | blob = cv.dnn.blobFromImage(image, self.scale_factor, self.image_size, swapRB=True, crop=False) 43 | self.net.setInput(blob) 44 | detections = self.net.forward(self.layer_names) # detect objects using object detection model 45 | return detections 46 | 47 | def detect(self, image): 48 | if self.width is None or self.height is None: 49 | (self.height, self.width) = image.shape[:2] 50 | 51 | detections = self.forward(image) 52 | 53 | bboxes, confidences, class_ids = [], [], [] 54 | 55 | for output in detections: 56 | for detect in output: 57 | scores = detect[5:] 58 | class_id = np.argmax(scores) 59 | confidence = scores[class_id] 60 | if confidence > self.confidence_threshold: 61 | xmid, ymid, w, h = detect[0:4] * np.array([self.width, self.height, self.width, self.height]) 62 | x, y = int(xmid - 0.5*w), int(ymid - 0.5*h) 63 | bboxes.append([x, y, w, h]) 64 | confidences.append(float(confidence)) 65 | class_ids.append(class_id) 66 | 67 | indices = cv.dnn.NMSBoxes(bboxes, confidences, self.confidence_threshold, self.nms_threshold).flatten() 68 | class_ids = np.array(class_ids).astype('int') 69 | output = np.array(bboxes)[indices, :].astype('int'), np.array(confidences)[indices], class_ids[indices] 70 | return output 71 | -------------------------------------------------------------------------------- /motrackers/iou_tracker.py: -------------------------------------------------------------------------------- 1 | from motrackers.utils.misc import iou_xywh as iou 2 | from motrackers.tracker import Tracker 3 | 4 | 5 | class IOUTracker(Tracker): 6 | """ 7 | Intersection over Union Tracker. 8 | 9 | References 10 | ---------- 11 | * Implementation of this algorithm is heavily based on https://github.com/bochinski/iou-tracker 12 | 13 | Args: 14 | max_lost (int): Maximum number of consecutive frames object was not detected. 15 | tracker_output_format (str): Output format of the tracker. 16 | min_detection_confidence (float): Threshold for minimum detection confidence. 17 | max_detection_confidence (float): Threshold for max. detection confidence. 18 | iou_threshold (float): Intersection over union minimum value. 19 | """ 20 | 21 | def __init__( 22 | self, 23 | max_lost=2, 24 | iou_threshold=0.5, 25 | min_detection_confidence=0.4, 26 | max_detection_confidence=0.7, 27 | tracker_output_format='mot_challenge' 28 | ): 29 | self.iou_threshold = iou_threshold 30 | self.max_detection_confidence = max_detection_confidence 31 | self.min_detection_confidence = min_detection_confidence 32 | 33 | super(IOUTracker, self).__init__(max_lost=max_lost, tracker_output_format=tracker_output_format) 34 | 35 | def update(self, bboxes, detection_scores, class_ids): 36 | detections = Tracker.preprocess_input(bboxes, class_ids, detection_scores) 37 | self.frame_count += 1 38 | track_ids = list(self.tracks.keys()) 39 | 40 | updated_tracks = [] 41 | for track_id in track_ids: 42 | if len(detections) > 0: 43 | idx, best_match = max(enumerate(detections), key=lambda x: iou(self.tracks[track_id].bbox, x[1][0])) 44 | (bb, cid, scr) = best_match 45 | 46 | if iou(self.tracks[track_id].bbox, bb) > self.iou_threshold: 47 | self._update_track(track_id, self.frame_count, bb, scr, class_id=cid, 48 | iou_score=iou(self.tracks[track_id].bbox, bb)) 49 | updated_tracks.append(track_id) 50 | del detections[idx] 51 | 52 | if len(updated_tracks) == 0 or track_id is not updated_tracks[-1]: 53 | self.tracks[track_id].lost += 1 54 | if self.tracks[track_id].lost > self.max_lost: 55 | self._remove_track(track_id) 56 | 57 | for bb, cid, scr in detections: 58 | self._add_track(self.frame_count, bb, scr, class_id=cid) 59 | 60 | outputs = self._get_tracks(self.tracks) 61 | return outputs 62 | -------------------------------------------------------------------------------- /motrackers/sort_tracker.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.optimize import linear_sum_assignment 3 | from motrackers.utils.misc import iou_xywh as iou 4 | from motrackers.track import KFTrackSORT, KFTrack4DSORT 5 | from motrackers.centroid_kf_tracker import CentroidKF_Tracker 6 | 7 | 8 | def assign_tracks2detection_iou(bbox_tracks, bbox_detections, iou_threshold=0.3): 9 | """ 10 | Assigns detected bounding boxes to tracked bounding boxes using IoU as a distance metric. 11 | 12 | Args: 13 | bbox_tracks (numpy.ndarray): Bounding boxes of shape `(N, 4)` where `N` is number of objects already being tracked. 14 | bbox_detections (numpy.ndarray): Bounding boxes of shape `(M, 4)` where `M` is number of objects that are newly detected. 15 | iou_threshold (float): IOU threashold. 16 | 17 | Returns: 18 | tuple: Tuple contains the following elements in the given order: 19 | - matches (numpy.ndarray): Array of shape `(n, 2)` where `n` is number of pairs formed after matching tracks to detections. This is an array of tuples with each element as matched pair of indices`(track_index, detection_index)`. 20 | - unmatched_detections (numpy.ndarray): Array of shape `(m,)` where `m` is number of unmatched detections. 21 | - unmatched_tracks (numpy.ndarray): Array of shape `(k,)` where `k` is the number of unmatched tracks. 22 | """ 23 | 24 | if (bbox_tracks.size == 0) or (bbox_detections.size == 0): 25 | return np.empty((0, 2), dtype=int), np.arange(len(bbox_detections), dtype=int), np.empty((0,), dtype=int) 26 | 27 | if len(bbox_tracks.shape) == 1: 28 | bbox_tracks = bbox_tracks[None, :] 29 | 30 | if len(bbox_detections.shape) == 1: 31 | bbox_detections = bbox_detections[None, :] 32 | 33 | iou_matrix = np.zeros((bbox_tracks.shape[0], bbox_detections.shape[0]), dtype=np.float32) 34 | for t in range(bbox_tracks.shape[0]): 35 | for d in range(bbox_detections.shape[0]): 36 | iou_matrix[t, d] = iou(bbox_tracks[t, :], bbox_detections[d, :]) 37 | assigned_tracks, assigned_detections = linear_sum_assignment(-iou_matrix) 38 | unmatched_detections, unmatched_tracks = [], [] 39 | 40 | for d in range(bbox_detections.shape[0]): 41 | if d not in assigned_detections: 42 | unmatched_detections.append(d) 43 | 44 | for t in range(bbox_tracks.shape[0]): 45 | if t not in assigned_tracks: 46 | unmatched_tracks.append(t) 47 | 48 | # filter out matched with low IOU 49 | matches = [] 50 | for t, d in zip(assigned_tracks, assigned_detections): 51 | if iou_matrix[t, d] < iou_threshold: 52 | unmatched_detections.append(d) 53 | unmatched_tracks.append(t) 54 | else: 55 | matches.append((t, d)) 56 | 57 | if len(matches): 58 | matches = np.array(matches) 59 | else: 60 | matches = np.empty((0, 2), dtype=int) 61 | 62 | return matches, np.array(unmatched_detections), np.array(unmatched_tracks) 63 | 64 | 65 | class SORT(CentroidKF_Tracker): 66 | """ 67 | SORT - Multi object tracker. 68 | 69 | Args: 70 | max_lost (int): Max. number of times a object is lost while tracking. 71 | tracker_output_format (str): Output format of the tracker. 72 | iou_threshold (float): Intersection over union minimum value. 73 | process_noise_scale (float or numpy.ndarray): Process noise covariance matrix of shape (3, 3) 74 | or covariance magnitude as scalar value. 75 | measurement_noise_scale (float or numpy.ndarray): Measurement noise covariance matrix of shape (1,) 76 | or covariance magnitude as scalar value. 77 | time_step (int or float): Time step for Kalman Filter. 78 | """ 79 | 80 | def __init__( 81 | self, max_lost=0, 82 | tracker_output_format='mot_challenge', 83 | iou_threshold=0.3, 84 | process_noise_scale=1.0, 85 | measurement_noise_scale=1.0, 86 | time_step=1 87 | ): 88 | self.iou_threshold = iou_threshold 89 | 90 | super().__init__( 91 | max_lost=max_lost, tracker_output_format=tracker_output_format, 92 | process_noise_scale=process_noise_scale, 93 | measurement_noise_scale=measurement_noise_scale, time_step=time_step 94 | ) 95 | 96 | def _add_track(self, frame_id, bbox, detection_confidence, class_id, **kwargs): 97 | # self.tracks[self.next_track_id] = KFTrackSORT( 98 | # self.next_track_id, frame_id, bbox, detection_confidence, class_id=class_id, 99 | # data_output_format=self.tracker_output_format, process_noise_scale=self.process_noise_scale, 100 | # measurement_noise_scale=self.measurement_noise_scale, **kwargs 101 | # ) 102 | self.tracks[self.next_track_id] = KFTrack4DSORT( 103 | self.next_track_id, frame_id, bbox, detection_confidence, class_id=class_id, 104 | data_output_format=self.tracker_output_format, process_noise_scale=self.process_noise_scale, 105 | measurement_noise_scale=self.measurement_noise_scale, kf_time_step=1, **kwargs) 106 | self.next_track_id += 1 107 | 108 | def update(self, bboxes, detection_scores, class_ids): 109 | self.frame_count += 1 110 | 111 | bbox_detections = np.array(bboxes, dtype='int') 112 | 113 | # track_ids_all = list(self.tracks.keys()) 114 | # bbox_tracks = [] 115 | # track_ids = [] 116 | # for track_id in track_ids_all: 117 | # bb = self.tracks[track_id].predict() 118 | # if np.any(np.isnan(bb)): 119 | # self._remove_track(track_id) 120 | # else: 121 | # track_ids.append(track_id) 122 | # bbox_tracks.append(bb) 123 | 124 | track_ids = list(self.tracks.keys()) 125 | bbox_tracks = [] 126 | for track_id in track_ids: 127 | bb = self.tracks[track_id].predict() 128 | bbox_tracks.append(bb) 129 | 130 | bbox_tracks = np.array(bbox_tracks) 131 | 132 | if len(bboxes) == 0: 133 | for i in range(len(bbox_tracks)): 134 | track_id = track_ids[i] 135 | bbox = bbox_tracks[i, :] 136 | confidence = self.tracks[track_id].detection_confidence 137 | cid = self.tracks[track_id].class_id 138 | self._update_track(track_id, self.frame_count, bbox, detection_confidence=confidence, class_id=cid, lost=1) 139 | if self.tracks[track_id].lost > self.max_lost: 140 | self._remove_track(track_id) 141 | else: 142 | matches, unmatched_detections, unmatched_tracks = assign_tracks2detection_iou( 143 | bbox_tracks, bbox_detections, iou_threshold=self.iou_threshold) 144 | 145 | for i in range(matches.shape[0]): 146 | t, d = matches[i, :] 147 | track_id = track_ids[t] 148 | bbox = bboxes[d, :] 149 | cid = class_ids[d] 150 | confidence = detection_scores[d] 151 | self._update_track(track_id, self.frame_count, bbox, confidence, cid, lost=0) 152 | 153 | for d in unmatched_detections: 154 | bbox = bboxes[d, :] 155 | cid = class_ids[d] 156 | confidence = detection_scores[d] 157 | self._add_track(self.frame_count, bbox, confidence, cid) 158 | 159 | for t in unmatched_tracks: 160 | track_id = track_ids[t] 161 | bbox = bbox_tracks[t, :] 162 | confidence = self.tracks[track_id].detection_confidence 163 | cid = self.tracks[track_id].class_id 164 | self._update_track(track_id, self.frame_count, bbox, detection_confidence=confidence, class_id=cid, lost=1) 165 | if self.tracks[track_id].lost > self.max_lost: 166 | self._remove_track(track_id) 167 | 168 | outputs = self._get_tracks(self.tracks) 169 | return outputs 170 | -------------------------------------------------------------------------------- /motrackers/tracker.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | import numpy as np 3 | from scipy.spatial import distance 4 | from motrackers.utils.misc import get_centroid 5 | from motrackers.track import Track 6 | 7 | 8 | class Tracker: 9 | """ 10 | Greedy Tracker with tracking based on ``centroid`` location of the bounding box of the object. 11 | This tracker is also referred as ``CentroidTracker`` in this repository. 12 | 13 | Args: 14 | max_lost (int): Maximum number of consecutive frames object was not detected. 15 | tracker_output_format (str): Output format of the tracker. 16 | """ 17 | 18 | def __init__(self, max_lost=5, tracker_output_format='mot_challenge'): 19 | self.next_track_id = 0 20 | self.tracks = OrderedDict() 21 | self.max_lost = max_lost 22 | self.frame_count = 0 23 | self.tracker_output_format = tracker_output_format 24 | 25 | def _add_track(self, frame_id, bbox, detection_confidence, class_id, **kwargs): 26 | """ 27 | Add a newly detected object to the queue. 28 | 29 | Args: 30 | frame_id (int): Camera frame id. 31 | bbox (numpy.ndarray): Bounding box pixel coordinates as (xmin, ymin, xmax, ymax) of the track. 32 | detection_confidence (float): Detection confidence of the object (probability). 33 | class_id (str or int): Class label id. 34 | kwargs (dict): Additional key word arguments. 35 | """ 36 | 37 | self.tracks[self.next_track_id] = Track( 38 | self.next_track_id, frame_id, bbox, detection_confidence, class_id=class_id, 39 | data_output_format=self.tracker_output_format, 40 | **kwargs 41 | ) 42 | self.next_track_id += 1 43 | 44 | def _remove_track(self, track_id): 45 | """ 46 | Remove tracker data after object is lost. 47 | 48 | Args: 49 | track_id (int): track_id of the track lost while tracking. 50 | """ 51 | 52 | del self.tracks[track_id] 53 | 54 | def _update_track(self, track_id, frame_id, bbox, detection_confidence, class_id, lost=0, iou_score=0., **kwargs): 55 | """ 56 | Update track state. 57 | 58 | Args: 59 | track_id (int): ID of the track. 60 | frame_id (int): Frame count. 61 | bbox (numpy.ndarray or list): Bounding box coordinates as `(xmin, ymin, width, height)`. 62 | detection_confidence (float): Detection confidence (a.k.a. detection probability). 63 | class_id (int): ID of the class (aka label) of the object being tracked. 64 | lost (int): Number of frames the object was lost while tracking. 65 | iou_score (float): Intersection over union. 66 | kwargs (dict): Additional keyword arguments. 67 | """ 68 | 69 | self.tracks[track_id].update( 70 | frame_id, bbox, detection_confidence, class_id=class_id, lost=lost, iou_score=iou_score, **kwargs 71 | ) 72 | 73 | @staticmethod 74 | def _get_tracks(tracks): 75 | """ 76 | Output the information of tracks. 77 | 78 | Args: 79 | tracks (OrderedDict): Tracks dictionary with (key, value) as (track_id, corresponding `Track` objects). 80 | 81 | Returns: 82 | list: List of tracks being currently tracked by the tracker. 83 | """ 84 | 85 | outputs = [] 86 | for _, track in tracks.items(): 87 | # if not track.lost: 88 | # outputs.append(track.output()) 89 | outputs.append(track.output()) 90 | return outputs 91 | 92 | @staticmethod 93 | def preprocess_input(bboxes, class_ids, detection_scores): 94 | """ 95 | Preprocess the input data. 96 | 97 | Args: 98 | bboxes (list or numpy.ndarray): Array of bounding boxes with each bbox as a tuple containing `(xmin, ymin, width, height)`. 99 | class_ids (list or numpy.ndarray): Array of Class ID or label ID. 100 | detection_scores (list or numpy.ndarray): Array of detection scores (a.k.a. detection probabilities). 101 | 102 | Returns: 103 | detections (list[Tuple]): Data for detections as list of tuples containing `(bbox, class_id, detection_score)`. 104 | """ 105 | 106 | new_bboxes = np.array(bboxes, dtype='float') 107 | new_class_ids = np.array(class_ids, dtype='int') 108 | new_detection_scores = np.array(detection_scores) 109 | 110 | new_detections = list(zip(new_bboxes, new_class_ids, new_detection_scores)) 111 | return new_detections 112 | 113 | def update(self, bboxes, detection_scores, class_ids): 114 | """ 115 | Update the tracker based on the new bounding boxes. 116 | 117 | Args: 118 | bboxes (numpy.ndarray or list): List of bounding boxes detected in the current frame. Each element of the list represent 119 | coordinates of bounding box as tuple `(top-left-x, top-left-y, width, height)`. 120 | detection_scores(numpy.ndarray or list): List of detection scores (probability) of each detected object. 121 | class_ids (numpy.ndarray or list): List of class_ids (int) corresponding to labels of the detected object. Default is `None`. 122 | 123 | Returns: 124 | list: List of tracks being currently tracked by the tracker. Each track is represented by the tuple with elements `(frame_id, track_id, bb_left, bb_top, bb_width, bb_height, conf, x, y, z)`. 125 | """ 126 | 127 | self.frame_count += 1 128 | 129 | if len(bboxes) == 0: 130 | lost_ids = list(self.tracks.keys()) 131 | 132 | for track_id in lost_ids: 133 | self.tracks[track_id].lost += 1 134 | if self.tracks[track_id].lost > self.max_lost: 135 | self._remove_track(track_id) 136 | 137 | outputs = self._get_tracks(self.tracks) 138 | return outputs 139 | 140 | detections = Tracker.preprocess_input(bboxes, class_ids, detection_scores) 141 | 142 | track_ids = list(self.tracks.keys()) 143 | 144 | updated_tracks, updated_detections = [], [] 145 | 146 | if len(track_ids): 147 | track_centroids = np.array([self.tracks[tid].centroid for tid in track_ids]) 148 | detection_centroids = get_centroid(np.asarray(bboxes)) 149 | 150 | centroid_distances = distance.cdist(track_centroids, detection_centroids) 151 | 152 | track_indices = np.amin(centroid_distances, axis=1).argsort() 153 | 154 | for idx in track_indices: 155 | track_id = track_ids[idx] 156 | 157 | remaining_detections = [ 158 | (i, d) for (i, d) in enumerate(centroid_distances[idx, :]) if i not in updated_detections] 159 | 160 | if len(remaining_detections): 161 | detection_idx, detection_distance = min(remaining_detections, key=lambda x: x[1]) 162 | bbox, class_id, confidence = detections[detection_idx] 163 | self._update_track(track_id, self.frame_count, bbox, confidence, class_id=class_id) 164 | updated_detections.append(detection_idx) 165 | updated_tracks.append(track_id) 166 | 167 | if len(updated_tracks) == 0 or track_id is not updated_tracks[-1]: 168 | self.tracks[track_id].lost += 1 169 | if self.tracks[track_id].lost > self.max_lost: 170 | self._remove_track(track_id) 171 | 172 | for i, (bbox, class_id, confidence) in enumerate(detections): 173 | if i not in updated_detections: 174 | self._add_track(self.frame_count, bbox, confidence, class_id=class_id) 175 | 176 | outputs = self._get_tracks(self.tracks) 177 | return outputs 178 | -------------------------------------------------------------------------------- /motrackers/tracker_img.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import time 3 | import cv2 4 | 5 | 6 | ap = argparse.ArgumentParser() 7 | ap.add_argument("-v", "--video", type=str, default='../examples/video_data/cars.mp4', help="path to input video file") 8 | ap.add_argument("-t", "--tracker", type=str, default="kcf", help="OpenCV object tracker type") 9 | args = vars(ap.parse_args()) 10 | 11 | OPENCV_OBJECT_TRACKERS = { 12 | "csrt": cv2.TrackerCSRT_create, 13 | "kcf": cv2.TrackerKCF_create, 14 | "boosting": cv2.TrackerBoosting_create, 15 | "mil": cv2.TrackerMIL_create, 16 | "tld": cv2.TrackerTLD_create, 17 | "medianflow": cv2.TrackerMedianFlow_create, 18 | "mosse": cv2.TrackerMOSSE_create 19 | } 20 | 21 | trackers = cv2.MultiTracker_create() 22 | 23 | vs = cv2.VideoCapture(args["video"]) 24 | 25 | while True: 26 | ok, frame = vs.read() 27 | if not ok: 28 | break 29 | 30 | # resize the frame (so we can process it faster) 31 | frame = cv2.resize(frame, (600, 400)) 32 | 33 | (success, boxes) = trackers.update(frame) 34 | print(success) 35 | 36 | for box in boxes: 37 | (x, y, w, h) = [int(v) for v in box] 38 | cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) 39 | 40 | cv2.imshow("Frame", frame) 41 | key = cv2.waitKey(1) & 0xFF 42 | 43 | # if the 's' key is selected, we are going to "select" a bounding 44 | # box to tracks 45 | if key == ord("s"): 46 | # select the bounding box of the object we want to track (make 47 | # sure you press ENTER or SPACE after selecting the ROI) 48 | box = cv2.selectROI("Frame", frame, fromCenter=False, showCrosshair=True) 49 | 50 | # create a new object tracker for the bounding box and add it to our multi-object tracker 51 | tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]() 52 | trackers.add(tracker, frame, box) 53 | 54 | elif key == ord("q"): # if the `q` key was pressed, break from the loop 55 | break 56 | 57 | time.sleep(0.1) 58 | 59 | # if we are using a webcam, release the pointer 60 | vs.release() 61 | 62 | # close all windows 63 | cv2.destroyAllWindows() 64 | -------------------------------------------------------------------------------- /motrackers/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .filechooser_utils import select_caffemodel 2 | from .filechooser_utils import select_videofile 3 | from .filechooser_utils import select_yolo_model 4 | from .filechooser_utils import select_tfmobilenet 5 | from .misc import iou 6 | from .misc import get_centroid 7 | from .misc import draw_tracks 8 | -------------------------------------------------------------------------------- /motrackers/utils/filechooser_utils.py: -------------------------------------------------------------------------------- 1 | from ipyfilechooser import FileChooser 2 | 3 | 4 | def create_filechooser(default_path="~/", html_title="Select File", use_dir_icons=True): 5 | fc = FileChooser(default_path) 6 | fc.title = html_title 7 | fc.use_dir_icons = use_dir_icons 8 | return fc 9 | 10 | 11 | def select_caffemodel_prototxt(default_path="~/", use_dir_icons=True): 12 | html_title = 'Select .prototxt file for the caffemodel:' 13 | fc = create_filechooser(default_path=default_path, 14 | html_title=html_title, 15 | use_dir_icons=use_dir_icons) 16 | return fc 17 | 18 | 19 | def select_caffemodel_weights(default_path="~/", use_dir_icons=True): 20 | html_title = 'Select caffemodel weights (file with extention .caffemodel):' 21 | fc = create_filechooser(default_path=default_path, 22 | html_title=html_title, 23 | use_dir_icons=use_dir_icons) 24 | return fc 25 | 26 | 27 | def select_caffemodel(default_path="~/", use_dir_icons=True): 28 | prototxt = select_caffemodel_prototxt(default_path=default_path, use_dir_icons=use_dir_icons) 29 | weights = select_caffemodel_weights(default_path=default_path, use_dir_icons=use_dir_icons) 30 | return prototxt, weights 31 | 32 | 33 | def select_videofile(default_path="~/", use_dir_icons=True): 34 | html_title = 'Select video file:' 35 | fc = create_filechooser(default_path=default_path, 36 | html_title=html_title, 37 | use_dir_icons=use_dir_icons) 38 | return fc 39 | 40 | 41 | def select_yolo_weights(default_path="~/", use_dir_icons=True): 42 | html_title = 'Select YOLO weights (.weights file):' 43 | fc = create_filechooser(default_path=default_path, 44 | html_title=html_title, 45 | use_dir_icons=use_dir_icons) 46 | return fc 47 | 48 | 49 | def select_coco_labels(default_path="~/", use_dir_icons=True): 50 | html_title = 'Select coco labels file (.name file):' 51 | fc = create_filechooser(default_path=default_path, 52 | html_title=html_title, 53 | use_dir_icons=use_dir_icons) 54 | return fc 55 | 56 | 57 | def select_yolo_config(default_path="~/", use_dir_icons=True): 58 | html_title = 'Choose YOLO config file (.cfg file):' 59 | fc = create_filechooser(default_path=default_path, 60 | html_title=html_title, 61 | use_dir_icons=use_dir_icons) 62 | return fc 63 | 64 | 65 | def select_yolo_model(default_path="~/", use_dir_icons=True): 66 | yolo_weights = select_yolo_weights(default_path, use_dir_icons) 67 | yolo_config = select_yolo_config(default_path, use_dir_icons) 68 | coco_names = select_coco_labels(default_path, use_dir_icons) 69 | return yolo_weights, yolo_config, coco_names 70 | 71 | 72 | def select_pbtxt(default_path="~/", use_dir_icons=True): 73 | html_title = 'Select .pbtxt file:' 74 | fc = create_filechooser(default_path=default_path, 75 | html_title=html_title, 76 | use_dir_icons=use_dir_icons) 77 | return fc 78 | 79 | 80 | def select_tfmobilenet_weights(default_path="~/", use_dir_icons=True): 81 | html_title = 'Select tf-frozen graph of mobilenet (.pb file):' 82 | fc = create_filechooser(default_path=default_path, 83 | html_title=html_title, 84 | use_dir_icons=use_dir_icons) 85 | return fc 86 | 87 | 88 | def select_tfmobilenet(default_path="~/", use_dir_icons=True): 89 | prototxt = select_pbtxt(default_path, use_dir_icons) 90 | tfweights = select_tfmobilenet_weights(default_path, use_dir_icons) 91 | return prototxt, tfweights 92 | -------------------------------------------------------------------------------- /motrackers/utils/misc.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 as cv 3 | 4 | 5 | def get_centroid(bboxes): 6 | """ 7 | Calculate centroids for multiple bounding boxes. 8 | 9 | Args: 10 | bboxes (numpy.ndarray): Array of shape `(n, 4)` or of shape `(4,)` where 11 | each row contains `(xmin, ymin, width, height)`. 12 | 13 | Returns: 14 | numpy.ndarray: Centroid (x, y) coordinates of shape `(n, 2)` or `(2,)`. 15 | 16 | """ 17 | 18 | one_bbox = False 19 | if len(bboxes.shape) == 1: 20 | one_bbox = True 21 | bboxes = bboxes[None, :] 22 | 23 | xmin = bboxes[:, 0] 24 | ymin = bboxes[:, 1] 25 | w, h = bboxes[:, 2], bboxes[:, 3] 26 | 27 | xc = xmin + 0.5*w 28 | yc = ymin + 0.5*h 29 | 30 | x = np.hstack([xc[:, None], yc[:, None]]) 31 | 32 | if one_bbox: 33 | x = x.flatten() 34 | return x 35 | 36 | 37 | def iou(bbox1, bbox2): 38 | """ 39 | Calculates the intersection-over-union of two bounding boxes. 40 | Source: https://github.com/bochinski/iou-tracker/blob/master/util.py 41 | 42 | Args: 43 | bbox1 (numpy.array or list[floats]): Bounding box of length 4 containing 44 | ``(x-top-left, y-top-left, x-bottom-right, y-bottom-right)``. 45 | bbox2 (numpy.array or list[floats]): Bounding box of length 4 containing 46 | ``(x-top-left, y-top-left, x-bottom-right, y-bottom-right)``. 47 | 48 | Returns: 49 | float: intersection-over-onion of bbox1, bbox2. 50 | """ 51 | 52 | bbox1 = [float(x) for x in bbox1] 53 | bbox2 = [float(x) for x in bbox2] 54 | 55 | (x0_1, y0_1, x1_1, y1_1), (x0_2, y0_2, x1_2, y1_2) = bbox1, bbox2 56 | 57 | # get the overlap rectangle 58 | overlap_x0 = max(x0_1, x0_2) 59 | overlap_y0 = max(y0_1, y0_2) 60 | overlap_x1 = min(x1_1, x1_2) 61 | overlap_y1 = min(y1_1, y1_2) 62 | 63 | # check if there is an overlap 64 | if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0: 65 | return 0.0 66 | 67 | # if yes, calculate the ratio of the overlap to each ROI size and the unified size 68 | size_1 = (x1_1 - x0_1) * (y1_1 - y0_1) 69 | size_2 = (x1_2 - x0_2) * (y1_2 - y0_2) 70 | size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0) 71 | size_union = size_1 + size_2 - size_intersection 72 | 73 | iou_ = size_intersection / size_union 74 | 75 | return iou_ 76 | 77 | 78 | def iou_xywh(bbox1, bbox2): 79 | """ 80 | Calculates the intersection-over-union of two bounding boxes. 81 | Source: https://github.com/bochinski/iou-tracker/blob/master/util.py 82 | 83 | Args: 84 | bbox1 (numpy.array or list[floats]): bounding box of length 4 containing ``(x-top-left, y-top-left, width, height)``. 85 | bbox2 (numpy.array or list[floats]): bounding box of length 4 containing ``(x-top-left, y-top-left, width, height)``. 86 | 87 | Returns: 88 | float: intersection-over-onion of bbox1, bbox2. 89 | """ 90 | bbox1 = bbox1[0], bbox1[1], bbox1[0]+bbox1[2], bbox1[1]+bbox1[3] 91 | bbox2 = bbox2[0], bbox2[1], bbox2[0]+bbox2[2], bbox2[1]+bbox2[3] 92 | 93 | iou_ = iou(bbox1, bbox2) 94 | 95 | return iou_ 96 | 97 | 98 | def xyxy2xywh(xyxy): 99 | """ 100 | Convert bounding box coordinates from (xmin, ymin, xmax, ymax) format to (xmin, ymin, width, height). 101 | 102 | Args: 103 | xyxy (numpy.ndarray): 104 | 105 | Returns: 106 | numpy.ndarray: Bounding box coordinates (xmin, ymin, width, height). 107 | 108 | """ 109 | 110 | if len(xyxy.shape) == 2: 111 | w, h = xyxy[:, 2] - xyxy[:, 0] + 1, xyxy[:, 3] - xyxy[:, 1] + 1 112 | xywh = np.concatenate((xyxy[:, 0:2], w[:, None], h[:, None]), axis=1) 113 | return xywh.astype("int") 114 | elif len(xyxy.shape) == 1: 115 | (left, top, right, bottom) = xyxy 116 | width = right - left + 1 117 | height = bottom - top + 1 118 | return np.array([left, top, width, height]).astype('int') 119 | else: 120 | raise ValueError("Input shape not compatible.") 121 | 122 | 123 | def xywh2xyxy(xywh): 124 | """ 125 | Convert bounding box coordinates from (xmin, ymin, width, height) to (xmin, ymin, xmax, ymax) format. 126 | 127 | Args: 128 | xywh (numpy.ndarray): Bounding box coordinates as `(xmin, ymin, width, height)`. 129 | 130 | Returns: 131 | numpy.ndarray : Bounding box coordinates as `(xmin, ymin, xmax, ymax)`. 132 | 133 | """ 134 | 135 | if len(xywh.shape) == 2: 136 | x = xywh[:, 0] + xywh[:, 2] 137 | y = xywh[:, 1] + xywh[:, 3] 138 | xyxy = np.concatenate((xywh[:, 0:2], x[:, None], y[:, None]), axis=1).astype('int') 139 | return xyxy 140 | if len(xywh.shape) == 1: 141 | x, y, w, h = xywh 142 | xr = x + w 143 | yb = y + h 144 | return np.array([x, y, xr, yb]).astype('int') 145 | 146 | 147 | def midwh2xywh(midwh): 148 | """ 149 | Convert bounding box coordinates from (xmid, ymid, width, height) to (xmin, ymin, width, height) format. 150 | 151 | Args: 152 | midwh (numpy.ndarray): Bounding box coordinates (xmid, ymid, width, height). 153 | 154 | Returns: 155 | numpy.ndarray: Bounding box coordinates (xmin, ymin, width, height). 156 | """ 157 | 158 | if len(midwh.shape) == 2: 159 | xymin = midwh[:, 0:2] - midwh[:, 2:] * 0.5 160 | wh = midwh[:, 2:] 161 | xywh = np.concatenate([xymin, wh], axis=1).astype('int') 162 | return xywh 163 | if len(midwh.shape) == 1: 164 | xmid, ymid, w, h = midwh 165 | xywh = np.array([xmid-w*0.5, ymid-h*0.5, w, h]).astype('int') 166 | return xywh 167 | 168 | 169 | def intersection_complement_indices(big_set_indices, small_set_indices): 170 | """ 171 | Get the complement of intersection of two sets of indices. 172 | 173 | Args: 174 | big_set_indices (numpy.ndarray): Indices of big set. 175 | small_set_indices (numpy.ndarray): Indices of small set. 176 | 177 | Returns: 178 | numpy.ndarray: Indices of set which is complementary to intersection of two input sets. 179 | """ 180 | assert big_set_indices.shape[0] >= small_set_indices.shape[1] 181 | n = len(big_set_indices) 182 | mask = np.ones((n,), dtype=bool) 183 | mask[small_set_indices] = False 184 | intersection_complement = big_set_indices[mask] 185 | return intersection_complement 186 | 187 | 188 | def nms(boxes, scores, overlapThresh, classes=None): 189 | """ 190 | Non-maximum suppression. based on Malisiewicz et al. 191 | 192 | Args: 193 | boxes (numpy.ndarray): Boxes to process (xmin, ymin, xmax, ymax) 194 | scores (numpy.ndarray): Corresponding scores for each box 195 | overlapThresh (float): Overlap threshold for boxes to merge 196 | classes (numpy.ndarray, optional): Class ids for each box. 197 | 198 | Returns: 199 | tuple: a tuple containing: 200 | - boxes (list): nms boxes 201 | - scores (list): nms scores 202 | - classes (list, optional): nms classes if specified 203 | 204 | """ 205 | 206 | if boxes.dtype.kind == "i": 207 | boxes = boxes.astype("float") 208 | 209 | if scores.dtype.kind == "i": 210 | scores = scores.astype("float") 211 | 212 | pick = [] 213 | 214 | x1 = boxes[:, 0] 215 | y1 = boxes[:, 1] 216 | x2 = boxes[:, 2] 217 | y2 = boxes[:, 3] 218 | area = (x2 - x1 + 1) * (y2 - y1 + 1) 219 | 220 | idxs = np.argsort(scores) 221 | 222 | while len(idxs) > 0: 223 | last = len(idxs) - 1 224 | i = idxs[last] 225 | pick.append(i) 226 | 227 | xx1 = np.maximum(x1[i], x1[idxs[:last]]) 228 | yy1 = np.maximum(y1[i], y1[idxs[:last]]) 229 | xx2 = np.minimum(x2[i], x2[idxs[:last]]) 230 | yy2 = np.minimum(y2[i], y2[idxs[:last]]) 231 | 232 | w = np.maximum(0, xx2 - xx1 + 1) 233 | h = np.maximum(0, yy2 - yy1 + 1) 234 | 235 | overlap = (w * h) / area[idxs[:last]] 236 | 237 | # delete all indexes from the index list that have 238 | idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0]))) 239 | 240 | if classes is not None: 241 | return boxes[pick], scores[pick], classes[pick] 242 | else: 243 | return boxes[pick], scores[pick] 244 | 245 | 246 | def draw_tracks(image, tracks): 247 | """ 248 | Draw on input image. 249 | 250 | Args: 251 | image (numpy.ndarray): image 252 | tracks (list): list of tracks to be drawn on the image. 253 | 254 | Returns: 255 | numpy.ndarray: image with the track-ids drawn on it. 256 | """ 257 | 258 | for trk in tracks: 259 | 260 | trk_id = trk[1] 261 | xmin = trk[2] 262 | ymin = trk[3] 263 | width = trk[4] 264 | height = trk[5] 265 | 266 | xcentroid, ycentroid = int(xmin + 0.5*width), int(ymin + 0.5*height) 267 | 268 | text = "ID {}".format(trk_id) 269 | 270 | cv.putText(image, text, (xcentroid - 10, ycentroid - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) 271 | cv.circle(image, (xcentroid, ycentroid), 4, (0, 255, 0), -1) 272 | 273 | return image 274 | 275 | 276 | def load_labelsjson(json_file): 277 | import json 278 | with open(json_file) as file: 279 | data = json.load(file) 280 | labels = {int(k): v for k, v in data.items()} 281 | return labels 282 | 283 | 284 | def dict2jsonfile(dict_data, json_file_path): 285 | import json 286 | with open(json_file_path, 'w') as outfile: 287 | json.dump(dict_data, outfile) 288 | 289 | 290 | if __name__ == '__main__': 291 | bb = np.random.random_integers(0, 100, size=(20,)).reshape((5, 4)) 292 | c = get_centroid(bb) 293 | print(bb, c) 294 | 295 | bb2 = np.array([1, 2, 3, 4]) 296 | c2 = get_centroid(bb2) 297 | print(bb2, c2) 298 | 299 | data = { 300 | 0: 'background', 1: 'aeroplane', 2: 'bicycle', 3: 'bird', 4: 'boat', 5: 'bottle', 6: 'bus', 301 | 7: 'car', 8: 'cat', 9: 'chair', 10: 'cow', 11: 'diningtable', 12: 'dog', 13: 'horse', 14: 'motorbike', 302 | 15: 'person', 16: 'pottedplant', 17: 'sheep', 18: 'sofa', 19: 'train', 20: 'tvmonitor' 303 | } 304 | dict2jsonfile(data, '../../examples/pretrained_models/caffemodel_weights/ssd_mobilenet_caffe_names.json') 305 | 306 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "motrackers" 3 | version = "0.0.2" 4 | description = "Multi-object trackers in Python" 5 | authors = [ 6 | {name = "Aditya M. Deshpande", email = "adityadeshpande2010@gmail.com"} 7 | ] 8 | license = {file = "LICENSE.txt"} 9 | readme = "README.md" 10 | requires-python = ">3.6" 11 | 12 | keywords = ["tracking", "object", "multi-object", "python"] 13 | 14 | classifiers = [ 15 | "Programming Language :: Python :: 3" 16 | ] 17 | 18 | dependencies = [ 19 | "numpy", 20 | "scipy", 21 | "matplotlib", 22 | "opencv-python", 23 | "pandas", 24 | "motmetrics", 25 | "setuptools", 26 | "ipyfilechooser" 27 | ] 28 | 29 | [project.optional-dependencies] 30 | docs = [ 31 | 'sphinx', 32 | 'm2r2' 33 | ] 34 | 35 | [project.urls] 36 | homepath = "https://adipandas.github.io/multi-object-tracker" 37 | repository = "https://github.com/adipandas/multi-object-tracker" 38 | 39 | [build-system] 40 | requires = ["setuptools", "wheel"] 41 | build-backend = "setuptools.build_meta" 42 | 43 | # [https://setuptools.pypa.io/en/stable/userguide/datafiles.html](https://setuptools.pypa.io/en/stable/userguide/datafiles.html) 44 | 45 | [tool.setuptools] 46 | include-package-data = true 47 | 48 | [tool.setuptools.packages.find] 49 | where = ["."] 50 | --------------------------------------------------------------------------------