├── .github
└── workflows
│ ├── continuous-integration.yml
│ ├── joss_pdf.yml
│ └── publish-on-release.yml
├── .gitignore
├── CITATION.cff
├── Content
├── Demo_plots.png
├── Demo_results.png
├── Demo_terminal.png
├── Person_selection.png
├── Video_tuto_Sports2D_Colab.png
├── joint_convention.png
├── paper.bib
├── paper.md
├── sports2d_blender.gif
└── sports2d_opensim.gif
├── LICENSE
├── README.md
├── Sports2D
├── Demo
│ ├── Config_demo.toml
│ └── demo.mp4
├── Sports2D.ipynb
├── Sports2D.py
├── Utilities
│ ├── __init__.py
│ ├── common.py
│ ├── filter.py
│ └── tests.py
├── __init__.py
└── process.py
└── pyproject.toml
/.github/workflows/continuous-integration.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: Build on Win-MacOS-Ubuntu with Python 3.10-3.11
5 |
6 | on:
7 | push:
8 | branches: [ "main" ]
9 | paths-ignore:
10 | - 'README.md'
11 | - 'Content/**'
12 | pull_request:
13 | branches: [ "main" ]
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | build:
20 | runs-on: ${{ matrix.os }}
21 | timeout-minutes: 60
22 |
23 | strategy:
24 | fail-fast: false
25 | matrix:
26 | os: [ubuntu-latest, windows-latest, macos-latest, macos-13]
27 | python-version: ["3.10", "3.11"]
28 | include:
29 | - os: ubuntu-latest
30 | cache-path: ~/.cache/pip
31 | - os: windows-latest
32 | cache-path: C:\Users\runneradmin\AppData\Local\pip\Cache
33 | - os: macos-latest
34 | cache-path: ~/Library/Caches/pip
35 | - os: macos-13
36 | cache-path: ~/Library/Caches/pip
37 |
38 | steps:
39 | - name: Checkout code
40 | uses: actions/checkout@v4
41 |
42 | - name: Cache conda environment
43 | uses: actions/cache@v4
44 | with:
45 | path: ~/.conda
46 | key: ${{ runner.os }}-conda-${{ matrix.python-version }}-${{ hashFiles('**/environment.yml') }}
47 | restore-keys: ${{ runner.os }}-conda-${{ matrix.python-version }}-
48 |
49 | - name: Install Miniconda
50 | uses: conda-incubator/setup-miniconda@v3
51 | with:
52 | auto-update-conda: true
53 | python-version: ${{ matrix.python-version }}
54 | activate-environment: sports2d
55 |
56 | - name: Install OpenSim with conda
57 | shell: bash -l {0}
58 | run: |
59 | conda install -n sports2d -c conda-forge -c defaults pip
60 | conda install -n sports2d opensim-org::opensim -y
61 | timeout-minutes: 15
62 |
63 | - name: Cache pip dependencies
64 | uses: actions/cache@v4
65 | with:
66 | path: ${{ matrix.cache-path }}
67 | key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt') }}
68 | restore-keys: ${{ runner.os }}-pip-${{ matrix.python-version }}-
69 |
70 | - name: Install pip dependencies
71 | shell: bash -l {0}
72 | run: |
73 | conda run -n sports2d python -m ensurepip --upgrade
74 | conda run -n sports2d python -m pip install --upgrade pip setuptools wheel
75 | conda run -n sports2d python -m pip install flake8 pytest
76 | conda run -n sports2d python -m pip install git+https://github.com/${{ github.repository }}.git@${{ github.sha }}
77 | # conda run -n sports2d python -m pip install torch torchvision
78 | timeout-minutes: 10
79 |
80 | - name: Lint with flake8
81 | shell: bash -l {0}
82 | run: |
83 | conda activate sports2d
84 | # stop the build if there are Python syntax errors or undefined names
85 | conda run -n sports2d flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
86 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
87 | conda run -n sports2d flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
88 | timeout-minutes: 5
89 |
90 | - name: Test with pytest
91 | shell: bash -l {0}
92 | env:
93 | PYTHONIOENCODING: utf-8
94 | PYTHONUNBUFFERED: "1"
95 | KMP_DUPLICATE_LIB_OK: TRUE
96 | run: |
97 | conda activate sports2d
98 | pytest -v Sports2D/Utilities/tests.py --capture=sys
99 | timeout-minutes: 20
100 |
--------------------------------------------------------------------------------
/.github/workflows/joss_pdf.yml:
--------------------------------------------------------------------------------
1 | name: JOSS pdf constructor
2 |
3 | on:
4 | push:
5 | paths:
6 | - 'Content/paper.md'
7 | - 'Content/paper.bib'
8 |
9 | jobs:
10 | paper:
11 | runs-on: ubuntu-latest
12 | name: Paper Draft
13 | steps:
14 | - name: Checkout
15 | uses: actions/checkout@v2
16 | - name: Build draft PDF
17 | uses: openjournals/openjournals-draft-action@master
18 | with:
19 | journal: joss
20 | # This should be the path to the paper within your repo.
21 | paper-path: Content/paper.md
22 | - name: Upload
23 | uses: actions/upload-artifact@v4
24 | with:
25 | name: paper
26 | # This is the output path where Pandoc will write the compiled
27 | # PDF. Note, this should be the same directory as the input
28 | # paper.md
29 | path: Content/paper.pdf
30 |
--------------------------------------------------------------------------------
/.github/workflows/publish-on-release.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package to PyPi upon release
10 |
11 | on:
12 | release:
13 | types: [published]
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | deploy:
20 |
21 | runs-on: ubuntu-latest
22 |
23 | steps:
24 | - name: Checkout
25 | uses: actions/checkout@v4
26 | - name: Set up Python
27 | uses: actions/setup-python@v5
28 | with:
29 | python-version: "3.10"
30 | - name: Install dependencies
31 | run: |
32 | python -m pip install --upgrade pip
33 | pip install build setuptools-scm
34 | - name: Build package
35 | run: python -m build
36 | - name: Publish package
37 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
38 | with:
39 | user: __token__
40 | password: ${{ secrets.PYPI_API_TOKEN }}
41 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | **/__pycache__/
2 | **/build/
3 | *.pyc
4 | logs.txt*
5 | **/*.log
6 | *.egg-info/
7 | dist/
8 | demo_Sports2D/**
9 | **/Demo/*.jpg
10 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: "1.2.0"
2 | authors:
3 | - family-names: Pagnon
4 | given-names: David
5 | orcid: "https://orcid.org/0000-0002-6891-8331"
6 | - family-names: Kim
7 | given-names: HunMin
8 | orcid: "https://orcid.org/0009-0007-7710-8051"
9 | contact:
10 | - family-names: Pagnon
11 | given-names: David
12 | orcid: "https://orcid.org/0000-0002-6891-8331"
13 | doi: 10.5281/zenodo.7903962
14 | message: If you use this software, please cite our article in the
15 | Journal of Open Source Software.
16 | preferred-citation:
17 | authors:
18 | - family-names: Pagnon
19 | given-names: David
20 | orcid: "https://orcid.org/0000-0002-6891-8331"
21 | - family-names: Kim
22 | given-names: HunMin
23 | orcid: "https://orcid.org/0009-0007-7710-8051"
24 | date-published: 2024-09-24
25 | doi: 10.21105/joss.06849
26 | issn: 2475-9066
27 | issue: 101
28 | journal: Journal of Open Source Software
29 | publisher:
30 | name: Open Journals
31 | start: 6849
32 | title: "Sports2D: Compute 2D human pose and angles from a video or a
33 | webcam"
34 | type: article
35 | url: "https://joss.theoj.org/papers/10.21105/joss.06849"
36 | volume: 9
37 | title: "Sports2D: Compute 2D human pose and angles from a video or a
38 | webcam"
39 |
--------------------------------------------------------------------------------
/Content/Demo_plots.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/davidpagnon/Sports2D/cdd80ecac898736a8d533d8098e4a982b8244d6a/Content/Demo_plots.png
--------------------------------------------------------------------------------
/Content/Demo_results.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/davidpagnon/Sports2D/cdd80ecac898736a8d533d8098e4a982b8244d6a/Content/Demo_results.png
--------------------------------------------------------------------------------
/Content/Demo_terminal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/davidpagnon/Sports2D/cdd80ecac898736a8d533d8098e4a982b8244d6a/Content/Demo_terminal.png
--------------------------------------------------------------------------------
/Content/Person_selection.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/davidpagnon/Sports2D/cdd80ecac898736a8d533d8098e4a982b8244d6a/Content/Person_selection.png
--------------------------------------------------------------------------------
/Content/Video_tuto_Sports2D_Colab.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/davidpagnon/Sports2D/cdd80ecac898736a8d533d8098e4a982b8244d6a/Content/Video_tuto_Sports2D_Colab.png
--------------------------------------------------------------------------------
/Content/joint_convention.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/davidpagnon/Sports2D/cdd80ecac898736a8d533d8098e4a982b8244d6a/Content/joint_convention.png
--------------------------------------------------------------------------------
/Content/paper.bib:
--------------------------------------------------------------------------------
1 | @article{Bazarevsky_2020,
2 | title={Blazepose: On-device real-time body pose tracking},
3 | author={Bazarevsky, Valentin and Grishchenko, Ivan and Raveendran, Karthik and Zhu, Tyler and Zhang, Fan and Grundmann, Matthias},
4 | DOI={10.48550/arXiv.2006.10204},
5 | journal={arXiv preprint arXiv:2006.10204},
6 | year={2020}
7 | }
8 |
9 | @article{Bisong_2019,
10 | title={Google colaboratory},
11 | author={Bisong, Ekaba and Bisong, Ekaba},
12 | DOI={10.1007/978-1-4842-4470-8},
13 | journal={Building machine learning and deep learning models on google cloud platform: a comprehensive guide for beginners},
14 | pages={59--64},
15 | year={2019},
16 | publisher={Springer}
17 | }
18 |
19 | @article{Boswell_2023,
20 | title={Smartphone videos of the sit-to-stand test predict osteoarthritis and health outcomes in a nationwide study},
21 | author={Boswell, Melissa A and Kidzi{\'n}ski, {\L}ukasz and Hicks, Jennifer L and Uhlrich, Scott D and Falisse, Antoine and Delp, Scott L},
22 | DOI={10.1038/s41746-023-00775-1},
23 | journal={npj Digital Medicine},
24 | volume={6},
25 | number={1},
26 | pages={32},
27 | year={2023},
28 | publisher={Nature Publishing Group UK London}
29 | }
30 |
31 | @article{Bradski_2000,
32 | author = {Bradski, G.},
33 | citeulike-article-id = {2236121},
34 | journal = {Dr. Dobb's Journal of Software Tools},
35 | keywords = {bibtex-import},
36 | posted-at = {2008-01-15 19:21:54},
37 | priority = {4},
38 | title = {The OpenCV Library},
39 | year = {2000}
40 | }
41 |
42 | @article{Bright_2012,
43 | title={Effect of clinical decision-support systems: a systematic review},
44 | author={Bright, Tiffani J and Wong, Anthony and Dhurjati, Ravi and Bristow, Erin and Bastian, Lori and Coeytaux, Remy R and Samsa, Gregory and Hasselblad, Vic and Williams, John W and Musty, Michael D and others},
45 | DOI={10.7326/0003-4819-157-1-201207030-00450},
46 | journal={Annals of internal medicine},
47 | volume={157},
48 | number={1},
49 | pages={29--43},
50 | year={2012},
51 | publisher={American College of Physicians}
52 | }
53 |
54 | @article{Butterworth_1930,
55 | title={On the theory of filter amplifiers},
56 | author={Butterworth, Stephen},
57 | journal={Wireless Engineer},
58 | volume={7},
59 | number={6},
60 | pages={536--541},
61 | year={1930}
62 | }
63 |
64 | @article{Cao_2019,
65 | title={OpenPose: realtime multi-person 2D pose estimation using Part Affinity Fields},
66 | author={Cao, Zhe and Hidalgo, Gines and Simon, Tomas and Wei, Shih-En and Sheikh, Yaser},
67 | journal={IEEE transactions on pattern analysis and machine intelligence},
68 | volume={43},
69 | number={1},
70 | pages={172--186},
71 | year={2019},
72 | URL = {https://arxiv.org/abs/1611.08050},
73 | DOI = {10.1109/TPAMI.2019.2929257},
74 | publisher={IEEE}
75 | }
76 |
77 | @article{Cleveland_1981,
78 | title={LOWESS: A program for smoothing scatterplots by robust locally weighted regression},
79 | author={Cleveland, William S},
80 | DOI={10.2307/2683591},
81 | journal={American Statistician},
82 | volume={35},
83 | number={1},
84 | pages={54},
85 | year={1981}
86 | }
87 |
88 | @article{Colyer_2018,
89 | title={A review of the evolution of vision-based motion analysis and the integration of advanced computer vision methods towards developing a markerless system},
90 | author={Colyer, Steffi L and Evans, Murray and Cosker, Darren P and Salo, Aki IT},
91 | journal={Sports medicine-open},
92 | DOI={10.1186/s40798-018-0139-y},
93 | volume={4},
94 | number={1},
95 | pages={1--15},
96 | year={2018},
97 | publisher={SpringerOpen}
98 | }
99 |
100 | @article{Delp_2007,
101 | title={OpenSim: open-source software to create and analyze dynamic simulations of movement},
102 | author={Delp, Scott L and Anderson, Frank C and Arnold, Allison S and Loan, Peter and Habib, Ayman and John, Chand T and Guendelman, Eran and Thelen, Darryl G},
103 | journal={IEEE transactions on biomedical engineering},
104 | volume={54},
105 | number={11},
106 | pages={1940--1950},
107 | year={2007},
108 | URL = {https://ieeexplore.ieee.org/abstract/document/4352056},
109 | DOI = {10.1109/TBME.2007.901024},
110 | publisher={IEEE}
111 | }
112 |
113 | @article{Di_2021,
114 | title={A 2D qualitative movement assessment of a deceleration task detects football players with high knee joint loading},
115 | author={Di Paolo, Stefano and Zaffagnini, Stefano and Tosarelli, Filippo and Aggio, Fabrizio and Bragonzoni, Laura and Grassi, Alberto and Della Villa, Francesco},
116 | DOI={10.1007/s00167-021-06709-2},
117 | journal={Knee Surgery, Sports Traumatology, Arthroscopy},
118 | volume={29},
119 | pages={4032--4040},
120 | year={2021},
121 | publisher={Springer}
122 | }
123 |
124 | @article{Jiang_2023,
125 | title={RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose},
126 | author={Tao Jiang and Peng Lu and Li Zhang and Ningsheng Ma and Rui Han and Chengqi Lyu and Yining Li and Kai Chen},
127 | DOI={10.48550/arXiv.2303.07399},
128 | journal={arXiv},
129 | year={2023},
130 | eprint={2303.07399},
131 | archivePrefix={arXiv},
132 | primaryClass={cs.CV},
133 | url={https://arxiv.org/abs/2303.07399},
134 | }
135 |
136 | @article{Kidzinski_2020,
137 | title={Deep neural networks enable quantitative movement analysis using single-camera videos},
138 | author={Kidzi{\'n}ski, {\L}ukasz and Yang, Bryan and Hicks, Jennifer L and Rajagopal, Apoorva and Delp, Scott L and Schwartz, Michael H},
139 | DOI={10.1038/s41467-020-17807-z},
140 | journal={Nature communications},
141 | volume={11},
142 | number={1},
143 | pages={4054},
144 | year={2020},
145 | publisher={Nature Publishing Group UK London}
146 | }
147 |
148 | @misc{Kinovea,
149 | author = {Kinovea},
150 | title = {Kinovea - A microscope for your videos},
151 | year = {},
152 | publisher = {GitHub},
153 | url = {https://www.kinovea.org/features.html},
154 | howpublished = {\url{https://www.kinovea.org/features.html}}
155 | }
156 |
157 | @article{Mathis_2018,
158 | title={DeepLabCut: markerless pose estimation of user-defined body parts with deep learning},
159 | author={Mathis, Alexander and Mamidanna, Pranav and Cury, Kevin M and Abe, Taiga and Murthy, Venkatesh N and Mathis, Mackenzie Weygandt and Bethge, Matthias},
160 | journal={Nature neuroscience},
161 | volume={21},
162 | number={9},
163 | pages={1281--1289},
164 | year={2018},
165 | URL = {https://www.nature.com/articles/s41593-018-0209-y},
166 | DOI = {10.1038/s41593-018-0209-y},
167 | publisher={Nature Publishing Group}
168 | }
169 |
170 | @article{Minssen_2020,
171 | title={Clinical trial data transparency and GDPR compliance: Implications for data sharing and open innovation},
172 | author={Minssen, Timo and Rajam, Neethu and Bogers, Marcel},
173 | DOI={10.2139/ssrn.3413035},
174 | journal={Science and Public Policy},
175 | volume={47},
176 | number={5},
177 | pages={616--626},
178 | year={2020},
179 | publisher={Oxford University Press}
180 | }
181 |
182 | @article{ODonoghue_2008,
183 | title={Principal components analysis in the selection of key performance indicators in sport},
184 | author={O’Donoghue, Peter},
185 | doi={10.1080/24748668.2008.11868456},
186 | journal={International Journal of Performance Analysis in Sport},
187 | volume={8},
188 | number={3},
189 | pages={145--155},
190 | year={2008},
191 | publisher={Taylor \& Francis}
192 | }
193 |
194 | @article{Pagnon_2022a,
195 | author = {Pagnon, David and {\sortcite{Pagnonb}}Domalain, Mathieu and Reveret, Lionel},
196 | title = {Pose2Sim: An open-source Python package for multiview markerless kinematics},
197 | journal = {Journal of Open Source Software},
198 | publisher = {The Open Journal},
199 | year = {2022},
200 | doi = {10.21105/joss.04362},
201 | url = {https://joss.theoj.org/papers/10.21105/joss.04362},
202 | volume = {7},
203 | number = {77},
204 | pages = {4362}
205 | }
206 |
207 | @inproceedings{Pagnon_2022b,
208 | title = {{A 3D markerless protocol with action cameras – Key performance indicators in boxing}},
209 | author = {Pagnon, David and {\sortcite{Pagnonc}}Domalain, Mathieu and Robert, Thomas and Lahkar, Bhrigu-Kumar and Moussa, Issa and Sauli{\`e}re, Guillaume and Goyallon, Thibault and Reveret, Lionel},
210 | booktitle={2022 Congress of the European College of Sport Science (ECSS)},
211 | url = {https://hal.archives-ouvertes.fr/hal-03790926},
212 | note = {Poster},
213 | year = {2022}
214 | }
215 |
216 | @article{Patrizi_2016,
217 | title={Comparison between low-cost marker-less and high-end marker-based motion capture systems for the computer-aided assessment of working ergonomics},
218 | author={Patrizi, Alfredo and Pennestr{\`\i}, Ettore and Valentini, Pier Paolo},
219 | DOI={10.1080/00140139.2015.1057238},
220 | journal={Ergonomics},
221 | volume={59},
222 | number={1},
223 | pages={155--162},
224 | year={2016},
225 | publisher={Taylor \& Francis}
226 | }
227 |
228 | @article{Seth_2018,
229 | DOI = {10.1371/journal.pcbi.1006223},
230 | author = {Seth, Ajay AND Hicks, Jennifer L. AND Uchida, Thomas K. AND Habib, Ayman AND Dembia, Christopher L. AND Dunne, James J. AND Ong, Carmichael F. AND DeMers, Matthew S. AND Rajagopal, Apoorva AND Millard, Matthew AND Hamner, Samuel R. AND Arnold, Edith M. AND Yong, Jennifer R. AND Lakshmikanth, Shrinidhi K. AND Sherman, Michael A. AND Ku, Joy P. AND Delp, Scott L.},
231 | journal = {PLOS Computational Biology},
232 | publisher = {Public Library of Science},
233 | title = {OpenSim: Simulating musculoskeletal dynamics and neuromuscular control to study human and animal movement},
234 | year = {2018},
235 | month = {07},
236 | volume = {14},
237 | url = {https://doi.org/10.1371/journal.pcbi.1006223},
238 | pages = {1-20},
239 | number = {7},
240 | }
241 |
242 | @article{Uhlrich_2022,
243 | title={OpenCap: 3D human movement dynamics from smartphone videos},
244 | url={https://www.biorxiv.org/content/10.1101/2022.07.07.499061v1},
245 | DOI={10.1101/2022.07.07.499061},
246 | publisher={bioRxiv},
247 | author={Uhlrich, Scott D. and Falisse, Antoine and Kidziński, Łukasz and Muccini, Julie and Ko, Michael and Chaudhari, Akshay S. and Hicks, Jennifer L. and Delp, Scott L.},
248 | year={2022},
249 | month={Jul},
250 | pages={2022.07.07.499061}
251 | }
252 |
253 | @article{Venkatesh_2012,
254 | title={Consumer acceptance and use of information technology: extending the unified theory of acceptance and use of technology},
255 | author={Venkatesh, Viswanath and Thong, James YL and Xu, Xin},
256 | DOI={10.2307/41410412},
257 | journal={MIS quarterly},
258 | pages={157--178},
259 | year={2012},
260 | publisher={JSTOR}
261 | }
262 |
263 | @article{Wade_2022,
264 | title={Applications and limitations of current markerless motion capture methods for clinical gait biomechanics},
265 | author={Wade, Logan and Needham, Laurie and McGuigan, Polly and Bilzon, James},
266 | DOI={10.7717/peerj.12995},
267 | journal={PeerJ},
268 | volume={10},
269 | pages={e12995},
270 | year={2022},
271 | publisher={PeerJ Inc.}
272 | }
273 |
274 | @book{Whittle_2014,
275 | title={Gait analysis: an introduction},
276 | author={Whittle, Michael W},
277 | year={2014},
278 | publisher={Butterworth-Heinemann}
279 | }
280 |
281 | @book{winter2009biomechanics,
282 | title={Biomechanics and motor control of human movement},
283 | author={Winter, David A},
284 | year={2009},
285 | publisher={John wiley \& sons}
286 | }
287 |
288 | @article{Zheng_2023,
289 | title={Deep learning-based human pose estimation: A survey},
290 | author={Zheng, Ce and Wu, Wenhan and Chen, Chen and Yang, Taojiannan and Zhu, Sijie and Shen, Ju and Kehtarnavaz, Nasser and Shah, Mubarak},
291 | DOI={10.1145/3603618},
292 | journal={ACM Computing Surveys},
293 | volume={56},
294 | number={1},
295 | pages={1--37},
296 | year={2023},
297 | publisher={ACM New York, NY}
298 | }
299 |
--------------------------------------------------------------------------------
/Content/paper.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'Sports2D: Compute 2D human pose and angles from a video or a webcam'
3 | tags:
4 | - python
5 | - markerless kinematics
6 | - motion capture
7 | - sports performance analysis
8 | - rtmpose
9 | - clinical gait analysis
10 | authors:
11 | - name: David Pagnon^[corresponding author]
12 | orcid: 0000-0002-6891-8331
13 | affiliation: 1
14 | - name: HunMin Kim
15 | orcid: 0009-0007-7710-8051
16 | affiliation: 2
17 | affiliations:
18 | - name: Centre for the Analysis of Motion, Entertainment Research & Applications (CAMERA), University of Bath, Claverton Down, Bath, BA2 7AY, United Kingdom
19 | index: 1
20 | - name: Inha University, Yonghyeon Campus, 100 Inha-ro, Michuhol-gu, Incheon 22212, South Korea
21 | index: 2
22 | date: February 14 2024
23 | bibliography: paper.bib
24 | ---
25 |
26 |
27 | # Summary
28 | `Sports2D` provides a user-friendly solution for automatic and real-time analysis of multi-person human movement from a video or a webcam. This Python package uses 2D markerless pose estimation to detect joint coordinates from videos, and then computes 2D joint and segment angles.
29 |
30 |
31 | The output incorporates annotated videos and image sequences overlaid with joint locations, joint angles, and segment angles, for each of the detected persons. For further analysis, this information is also stored in files that are editable with MS Excel® or any other spreadsheet editor (.trc for locations, .mot for angles, according to the OpenSim standard [@Delp_2007; @Seth_2018]).
32 |
33 | `Sports2D` may be useful for clinicians as a decision supports system (CDSS) [@Bright_2012], as well as for gait analysis [@Whittle_2014] or ergonomic design [@Patrizi_2016]. Sports coaches can also use it to quantify key performance indicators (KPIs) [@ODonoghue_2008; @Pagnon_2022b], or to better understand, correct, or compare athletes' movement patterns. Finally, it can be used by researchers as a simple tool for 2D biomechanical analysis on the fly. One of the multiple use cases would be to evaluate ACL injury risks from deceleration drills [@Di_2021].
34 |
35 |
36 | # Statement of need
37 |
38 | Machine learning has recently accelerated the development and availability of markerless kinematics [@Zheng_2023; @Colyer_2018], which allows for the collection of kinematic data without the use of physical markers or manual annotation.
39 |
40 | A large part of these tools focus on 2D analysis, such as `OpenPose` [@Cao_2019], `BlazePose` [@Bazarevsky_2020], or `DeepLabCut` [@Mathis_2018]. More recently, `RTMPose` [@Jiang_2023] offered a faster, more accurate, and more flexible alternative to the previous solutions. Still, although they bear the advantage of being open-source, none of these options are easily accessible to people who do not have a programming background, and the output is not directly usable for further kinematic investigation. Yet, clinical acceptance of new technologies is known to be influenced not only by their price value and their performance, but also by their perceived ease-of-use, the social influence around the customer, and other parameters described by the Unified Theory of Acceptance and Use of Technology (UTAUT2) [@Venkatesh_2012].
41 |
42 | 
43 |
44 | 
45 |
46 | In fact, there is a clear trade-off between accuracy and ease-of-use. Some open-source tools focus on the accuracy of a 3D analysis by using multiple cameras, such as `Pose2Sim` [@Pagnon_2022a] or `OpenCap` [@Uhlrich_2022]. These, however, require either a certain level of programming skills, a particular hardware setup, or to send data to a server that does not comply with the European rules of data protection (GDPR). Some other tools choose to put more emphasis on user-friendliness, and point out that 2D analysis is often sufficient when the analyzed motion mostly lies in the sagittal or frontal plane. `Sit2Stand` [@Boswell_2023] and `CP GaitLab` [@Kidzinski_2020] provide such tools, although they are focused on very specific tasks. `Kinovea` [@Kinovea], on the other hand, is a widely used software for sports performance analysis, which provides multiple additional features. However, it relies on tracking manual labels. This can be time-consuming when analyzing numerous videos, and it may also be lacking robustness when the tracked points are lost. It is also only available on Windows, and requires the user to transfer files prior to analysis.
47 |
48 | `Sports2D` is an alternative solution that aims at filling this gap: it is free and open-source, straightforward to install and to run, can be run on any platform, can be run locally for data protection, and it automatically provides 2D joint and segment angles without the need for manual annotation. It is also robust and flexible, works in real-time, supports multi-person analysis, and can process one video, several videos simultaneously, or a webcam stream. The output is provided as .trc files for locations and .mot files for angles, which makes it compatible with OpenSim [@Delp_2007; @Seth_2018] and readable by any spreadsheet software for further statistical analysis.
49 |
50 |
51 | # Workflow
52 |
53 | ## Installation and usage
54 |
55 | `Sports2d` is installed under Python via `pip install sports2d`. If a valid CUDA installation is found, Sports2D uses the GPU, otherwise it uses the CPU with OpenVino acceleration.
56 |
57 |
62 |
63 | A detailed installation and usage guide can be found on the repository: https://github.com/davidpagnon/Sports2D.
64 |
65 | ## Sports2D method details
66 |
67 | [Sports2D]{.ul}:
68 |
69 | 1. Reads stream from a webcam, from one video, or from a list of videos. It selects an optional specified time range to process.
70 | 2. Sets up the RTMLib pose tracker with specified parameters. It can be run in lightweight, balanced, or performance mode, and for faster inference, keypoints can be tracked for a certain number of frames instead of detected. Any RTMPose model can be used.
71 | 3. Tracks people so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears for a few frames. This carefully crafted `sports2d` tracker runs at a comparable speed as the RTMlib one but is much more robust. The user can still choose the RTMLib method if they need it by using the `tracking_mode` argument.
72 | 4. Retrieves the keypoints with high enough confidence, and only keeps the persons with enough average high-confidence.
73 | 5. Computes the selected joint and segment angles, and flips them on the left/right side if the respective foot is pointing to the left/right. The user can select which angles they want to compute, display, and save.
74 | 5. Draws bounding boxes around each person and writes their IDs\
75 | Draws the skeleton and the keypoints, with a green to red color scale to account for their confidence\
76 | Draws joint and segment angles on the body, and writes the values either near the joint/segment, or on the upper-left of the image with a progress bar
77 | 6. Interpolates missing pose and angle sequences if gaps are not too large. Filters them with the selected filter (among `Butterworth`, `Gaussian`, `LOESS`, or `Median`) and their parameters
78 | 7. Optionally shows processed images, saves them, or saves them as a video\
79 | Optionally plots pose and angle data before and after processing for comparison\
80 | Optionally saves poses for each person as a TRC file, and angles as a MOT file
81 |
82 |
83 |
84 | [The Demo video]{.ul} that Sports2D is tested on is voluntarily challenging, in order to demonstrate the robustness of the process after sorting, interpolation and filtering. It contains:
85 |
86 | * One person walking in the sagittal plane
87 | * One person in the frontal plane. This person then performs a flip while being backlit, both of which are challenging for the pose detection algorithm
88 | * One tiny person flickering in the background who needs to be ignored
89 |
90 |
91 |
92 | [Joint and segment angle estimation]{.ul}:
93 |
94 | Specific joint and segment angles can be chosen. They are consistent regardless of the direction the participant is facing: the participant is considered to look to the left when their toes are to the left of their heels, and to the right otherwise. Resulting angles can be filtered in the same way as point coordinates, and they can also be plotted.
95 |
96 | Joint angle conventions are as follows (\autoref{fig:joint_angle_conventions}):
97 |
98 | * Ankle dorsiflexion: Between heel and big toe, and ankle and knee.\
99 | *-90° when the foot is aligned with the shank.*
100 | * Knee flexion: Between hip, knee, and ankle.\
101 | *0° when the shank is aligned with the thigh.*
102 | * Hip flexion: Between knee, hip, and shoulder.\
103 | *0° when the trunk is aligned with the thigh.*
104 | * Shoulder flexion: Between hip, shoulder, and elbow.\
105 | *180° when the arm is aligned with the trunk.*
106 | * Elbow flexion: Between wrist, elbow, and shoulder.\
107 | *0° when the forearm is aligned with the arm.*
108 |
109 | Segment angles are measured anticlockwise between the horizontal and the segment lines:
110 |
111 | * Foot: Between heel and big toe.
112 | * Shank: Between knee and ankle.
113 | * Thigh: Between hip and knee.
114 | * Pelvis: Between left and right hip
115 | * Trunk: Between hip midpoint and shoulder midpoint
116 | * Shoulders: Between left and right shoulder
117 | * Head: Between neck and top of the head
118 | * Arm: Between shoulder and elbow.
119 | * Forearm: Between elbow and wrist.
120 |
121 | 
122 |
123 |
124 | # Limitations
125 |
126 | The user of `Sports2D` should be aware of the following limitations:
127 |
128 | * Results are acceptable only if the participants move in the 2D plane, either in the frontal plane or in the sagittal one. If you need research-grade markerless joint kinematics, consider using several cameras, and constraining angles to a biomechanically accurate model. See `Pose2Sim` [@Pagnon_2022a] for example.
129 | * Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect [@Wade_2022], especially if motion blur is significant such as on some broadcast videos.
130 |
131 |
132 |
133 | # Acknowledgements
134 |
135 | I would like to acknowledge Rob Olivar, a sports coach who enlightened me about the need for such a tool.\
136 | I also acknowledge the work of the dedicated people involved in the many major open-source software programs and packages used by `Sports2D`, such as `Python`, `RTMPPose`, `OpenCV` [@Bradski_2000], among others.
137 |
138 |
139 | # References
140 |
141 |
142 |
--------------------------------------------------------------------------------
/Content/sports2d_blender.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/davidpagnon/Sports2D/cdd80ecac898736a8d533d8098e4a982b8244d6a/Content/sports2d_blender.gif
--------------------------------------------------------------------------------
/Content/sports2d_opensim.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/davidpagnon/Sports2D/cdd80ecac898736a8d533d8098e4a982b8244d6a/Content/sports2d_opensim.gif
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2022, perfanalytics
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | [](https://github.com/davidpagnon/sports2d/actions/workflows/continuous-integration.yml)
3 | [](https://badge.fury.io/py/Sports2D)
4 | \
5 | [](https://pepy.tech/project/sports2d)
6 | [](https://github.com/davidpagnon/sports2d/stargazers)
7 | [](https://github.com/davidpagnon/sports2d/issues)
8 | [](https://GitHub.com/davidpagnon/sports2d/issues?q=is%3Aissue+is%3Aclosed)
9 | \
10 | [](https://joss.theoj.org/papers/1d525bbb2695c88c6ebbf2297bd35897)
11 | [](https://zenodo.org/doi/10.5281/zenodo.7903962)
12 | [](https://opensource.org/licenses/BSD-3-Clause)
13 | \
14 | [](https://discord.com/invite/4mXUdSFjmt)
15 |
16 |
17 |
18 |
19 |
20 | # Sports2D
21 |
22 | **`Sports2D` automatically computes 2D joint positions, as well as joint and segment angles from a video or a webcam.**
23 |
24 |
25 |
26 | > **`Announcements:`**
27 | > - Select only the persons you want to analyze **New in v0.8!**
28 | > - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
29 | > - Any detector and pose estimation model can be used. **New in v0.6!**
30 | > - Results in meters rather than pixels. **New in v0.5!**
31 | > - Faster, more accurate
32 | > - Works from a webcam
33 | > - Better visualization output
34 | > - More flexible, easier to run
35 | >
36 | > Run `pip install sports2d pose2sim -U` to get the latest version.
37 |
38 | ***N.B.:*** As always, I am more than happy to welcome contributions (see [How to contribute](#how-to-contribute-and-to-do-list))!
39 |
42 |
43 |
44 |
45 |
46 |
47 | https://github.com/user-attachments/assets/6a444474-4df1-4134-af0c-e9746fa433ad
48 |
49 |
50 |
51 | `Warning:` Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect.\
52 | `Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal plane). The persons need to be filmed as parallel as possible to the motion plane.\
53 | If you need 3D research-grade markerless joint kinematics, consider using several cameras with **[Pose2Sim](https://github.com/perfanalytics/pose2sim)**.
54 |
55 |
56 |
57 |
58 |
59 |
60 | ## Contents
61 | 1. [Installation and Demonstration](#installation-and-demonstration)
62 | 1. [Installation](#installation)
63 | 1. [Quick install](#quick-install)
64 | 2. [Full install](#full-install)
65 | 2. [Demonstration](#demonstration)
66 | 1. [Run the demo](#run-the-demo)
67 | 2. [Visualize in OpenSim](#visualize-in-opensim)
68 | 3. [Visualize in Blender](#visualize-in-blender)
69 | 3. [Play with the parameters](#play-with-the-parameters)
70 | 1. [Run on a custom video or on a webcam](#run-on-a-custom-video-or-on-a-webcam)
71 | 2. [Run for a specific time range](#run-for-a-specific-time-range)
72 | 3. [Select the persons you are interested in](#select-the-persons-you-are-interested-in)
73 | 4. [Get coordinates in meters](#get-coordinates-in-meters)
74 | 5. [Run inverse kinematics](#run-inverse-kinematics)
75 | 6. [Run on several videos at once](#run-on-several-videos-at-once)
76 | 7. [Use the configuration file or run within Python](#use-the-configuration-file-or-run-within-python)
77 | 8. [Get the angles the way you want](#get-the-angles-the-way-you-want)
78 | 9. [Customize your output](#customize-your-output)
79 | 10. [Use a custom pose estimation model](#use-a-custom-pose-estimation-model)
80 | 11. [All the parameters](#all-the-parameters)
81 | 2. [Go further](#go-further)
82 | 1. [Too slow for you?](#too-slow-for-you)
83 | 3. [Run inverse kinematics](#run-inverse-kinematics)
84 | 4. [How it works](#how-it-works)
85 | 3. [How to cite and how to contribute](#how-to-cite-and-how-to-contribute)
86 |
87 |
88 |
89 | ## Installation and Demonstration
90 |
91 | ### Installation
92 |
93 |
100 |
101 | #### Quick install
102 |
103 | > N.B.: Full install is required for OpenSim inverse kinematics.
104 |
105 | Open a terminal. Type `python -V` to make sure python >=3.10 <=3.11 is installed. If not, install it [from there](https://www.python.org/downloads/).
106 |
107 | Run:
108 | ``` cmd
109 | pip install sports2d
110 | ```
111 |
112 | Alternatively, build from source to test the last changes:
113 | ``` cmd
114 | git clone https://github.com/davidpagnon/sports2d.git
115 | cd sports2d
116 | pip install .
117 | ```
118 |
119 |
120 |
121 | #### Full install
122 |
123 | > **N.B.:** Only needed if you want to run inverse kinematics (`--do_ik True`).\
124 | > **N.B.:** If you already have a Pose2Sim conda environment, you can skip this step. Just run `conda activate Pose2Sim` and `pip install sports2d`.
125 |
126 | - Install Anaconda or [Miniconda](https://docs.conda.io/en/latest/miniconda.html):\
127 | Open an Anaconda prompt and create a virtual environment:
128 | ``` cmd
129 | conda create -n Sports2D python=3.10 -y
130 | conda activate Sports2D
131 | ```
132 | - **Install OpenSim**:\
133 | Install the OpenSim Python API (if you do not want to install via conda, refer [to this page](https://opensimconfluence.atlassian.net/wiki/spaces/OpenSim/pages/53085346/Scripting+in+Python#ScriptinginPython-SettingupyourPythonscriptingenvironment(ifnotusingconda))):
134 | ```
135 | conda install -c opensim-org opensim -y
136 | ```
137 |
138 | - **Install Sports2D with Pose2Sim**:
139 | ``` cmd
140 | pip install sports2d
141 | ```
142 |
143 |
144 |
145 |
146 | ### Demonstration
147 |
148 | #### Run the demo:
149 |
150 | Just open a command line and run:
151 | ``` cmd
152 | sports2d
153 | ```
154 |
155 | You should see the joint positions and angles being displayed in real time.
156 |
157 | Check the folder where you run that command line to find the resulting `video`, `images`, `TRC pose` and `MOT angle` files (which can be opened with any spreadsheet software), and `logs`.
158 |
159 | ***Important:*** If you ran the conda install, you first need to activate the environment: run `conda activate sports2d` in the Anaconda prompt.
160 |
161 |
162 |
163 |
164 |
165 | ***Note:***\
166 | The Demo video is voluntarily challenging to demonstrate the robustness of the process after sorting, interpolation and filtering. It contains:
167 | - One person walking in the sagittal plane
168 | - One person doing jumping jacks in the frontal plane. This person then performs a flip while being backlit, both of which are challenging for the pose detection algorithm
169 | - One tiny person flickering in the background who needs to be ignored
170 |
171 |
172 |
173 |
174 | #### Visualize in Blender
175 |
176 | 1. **Install the Pose2Sim_Blender add-on.**\
177 | Follow instructions on the [Pose2Sim_Blender](https://github.com/davidpagnon/Pose2Sim_Blender) add-on page.
178 | 2. **Open your point coordinates.**\
179 | **Add Markers**: open your trc file(e.g., `coords_m.trc`) from your `result_dir` folder.
180 |
181 | This will optionally create **an animated rig** based on the motion of the captured person.
182 | 3. **Open your animated skeleton:**\
183 | Make sure you first set `--do_ik True` ([full install](#full-install) required). See [inverse kinematics](#run-inverse-kinematics) section for more details.
184 | - **Add Model**: Open your scaled model (e.g., `Model_Pose2Sim_LSTM.osim`).
185 | - **Add Motion**: Open your motion file (e.g., `angles.mot`). Make sure the skeleton is selected in the outliner.
186 |
187 | The OpenSim skeleton is not rigged yet. **[Feel free to contribute!](https://github.com/perfanalytics/pose2sim/issues/40)**
188 |
189 |
190 |
191 |
192 |
193 |
194 | #### Visualize in OpenSim
195 |
196 | 1. Install **[OpenSim GUI](https://simtk.org/frs/index.php?group_id=91)**.
197 | 2. **Visualize point coordinates:**\
198 | **File -> Preview experimental data:** Open your trc file (e.g., `coords_m.trc`) from your `result_dir` folder.
199 | 3. **Visualize angles:**\
200 | To open an animated model and run further biomechanical analysis, make sure you first set `--do_ik True` ([full install](#full-install) required). See [inverse kinematics](#run-inverse-kinematics) section for more details.
201 | - **File -> Open Model:** Open your scaled model (e.g., `Model_Pose2Sim_LSTM.osim`).
202 | - **File -> Load Motion:** Open your motion file (e.g., `angles.mot`).
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 | ### Play with the parameters
211 |
212 | For a full list of the available parameters, see [this section](#all-the-parameters) of the documentation, check the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file, or type `sports2d --help`. All non specified are set to default values.
213 |
214 |
215 |
216 |
217 | #### Run on a custom video or on a webcam:
218 | ``` cmd
219 | sports2d --video_input path_to_video.mp4
220 | ```
221 |
222 | ``` cmd
223 | sports2d --video_input webcam
224 | ```
225 |
226 |
227 |
228 | #### Run for a specific time range:
229 | ```cmd
230 | sports2d --time_range 1.2 2.7
231 | ```
232 |
233 |
234 |
235 |
236 | #### Select the persons you are interested in:
237 | If you only want to analyze a subset of the detected persons, you can use the `--nb_persons_to_detect` and `--person_ordering_method` parameters. The order matters if you want to [convert coordinates in meters](#get-coordinates-in-meters) or [run inverse kinematics](#run-inverse-kinematics).
238 |
239 |
240 | ``` cmd
241 | sports2d --nb_persons_to_detect 2 --person_ordering_method highest_likelihood
242 | ```
243 |
244 | We recommend to use the `on_click` method if you can afford a manual input. This lets the user handle both the person number and their order in the same stage. When prompted, select the persons you are interested in in the desired order. In our case, lets slide to a frame where both people are visible, and select the woman first, then the man.
245 |
246 | Otherwise, if you want to run Sports2D automatically for example, you can choose other ordering methods such as 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'.
247 |
248 | ``` cmd
249 | sports2d --person_ordering_method on_click
250 | ```
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 | #### Get coordinates in meters:
261 | > **N.B.:** Depth is estimated from a neutral pose.
262 |
263 |
264 | You may need to convert pixel coordinates to meters.\
265 | Just provide the height of the reference person (and their ID in case of multiple person detection).
266 |
267 | You can also specify whether the visible side of the person is left, right, front, or back. Set it to 'auto' if you do not want to find it automatically (only works for motion in the sagittal plane), or to 'none' if you want to keep 2D instead of 3D coordinates (if the person goes right, and then left for example).
268 |
269 | The floor angle and the origin of the xy axis are computed automatically from gait. If you analyze another type of motion, you can manually specify them. Note that `y` points down.\
270 | Also note that distortions are not taken into account, and that results will be less accurate for motions in the frontal plane.
271 |
272 |
275 | ``` cmd
276 | sports2d --to_meters True --first_person_height 1.65 --visible_side auto front none
277 | ```
278 | ``` cmd
279 | sports2d --to_meters True --first_person_height 1.65 --visible_side auto front none `
280 | --person_ordering_method on_click `
281 | --floor_angle 0 --xy_origin 0 940
282 | ```
283 |
284 |
285 |
286 |
287 | #### Run inverse kinematics:
288 | > N.B.: [Full install](#full-install) required.
289 |
290 | > **N.B.:** The person needs to be moving on a single plane for the whole selected time range.
291 |
292 | OpenSim inverse kinematics allows you to set joint constraints, joint angle limits, to constrain the bones to keep the same length all along the motion and potentially to have equal sizes on left and right side. Most generally, it gives more biomechanically accurate results. It can also give you the opportunity to compute joint torques, muscle forces, ground reaction forces, and more, [with MoCo](https://opensim-org.github.io/opensim-moco-site/) for example.
293 |
294 | This is done via [Pose2Sim](https://github.com/perfanalytics/pose2sim).\
295 | Model scaling is done according to the mean of the segment lengths, across a subset of frames. We remove the 10% fastest frames (potential outliers), the frames where the speed is 0 (person probably out of frame), the frames where the average knee and hip flexion angles are above 45° (pose estimation is not precise when the person is crouching) and the 20% most extreme segment values after the previous operations (potential outliers). All these parameters can be edited in your Config.toml file.
296 |
297 | ```cmd
298 | sports2d --time_range 1.2 2.7 `
299 | --do_ik true --first_person_height 1.65 --visible_side auto front
300 | ```
301 |
302 | You can optionally use the LSTM marker augmentation to improve the quality of the output motion.\
303 | You can also optionally give the participants proper masses. Mass has no influence on motion, only on forces (if you decide to further pursue kinetics analysis).
304 |
305 | ```cmd
306 | sports2d --time_range 1.2 2.7 `
307 | --do_ik true --first_person_height 1.65 --visible_side left front `
308 | --use_augmentation True --participant_mass 55.0 67.0
309 | ```
310 |
311 |
312 |
313 |
314 | #### Run on several videos at once:
315 | ``` cmd
316 | sports2d --video_input demo.mp4 other_video.mp4
317 | ```
318 | All videos analyzed with the same time range.
319 | ```cmd
320 | sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7
321 | ```
322 | Different time ranges for each video.
323 | ```cmd
324 | sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
325 | ```
326 |
327 |
328 |
329 |
330 | #### Use the configuration file or run within Python:
331 |
332 | - Run with a configuration file:
333 | ``` cmd
334 | sports2d --config Config_demo.toml
335 | ```
336 | - Run within Python:
337 | ``` python
338 | from Sports2D import Sports2D; Sports2D.process('Config_demo.toml')
339 | ```
340 | - Run within Python with a dictionary (for example, `config_dict = toml.load('Config_demo.toml')`):
341 | ``` python
342 | from Sports2D import Sports2D; Sports2D.process(config_dict)
343 | ```
344 |
345 |
346 |
347 |
348 | #### Get the angles the way you want:
349 |
350 | - Choose which angles you need:
351 | ```cmd
352 | sports2d --joint_angles 'right knee' 'left knee' --segment_angles None
353 | ```
354 | - Choose where to display the angles: either as a list on the upper-left of the image, or near the joint/segment, or both:
355 | ```cmd
356 | sports2d --display_angle_values_on body # OR none, or list
357 | ```
358 | - You can also decide not to calculate and display angles at all:
359 | ```cmd
360 | sports2d --calculate_angles false
361 | ```
362 | - To run **inverse kinematics with OpenSim**, check [this section](#run-inverse-kinematics)
363 |
364 |
365 |
366 |
367 | #### Customize your output:
368 | - Choose whether you want video, images, trc pose file, angle mot file, real-time display, and plots:
369 | ```cmd
370 | sports2d --save_vid false --save_img true `
371 | --save_pose false --save_angles true `
372 | --show_realtime_results false --show_graphs false
373 | ```
374 | - Save results to a custom directory, specify the slow-motion factor:
375 | ``` cmd
376 | sports2d --result_dir path_to_result_dir
377 | ```
378 |
379 |
380 |
381 |
382 | #### Use a custom pose estimation model:
383 | - Retrieve hand motion:
384 | ``` cmd
385 | sports2d --pose_model whole_body
386 | ```
387 | - Use any custom (deployed) MMPose model
388 | ``` cmd
389 | sports2d --pose_model BodyWithFeet : `
390 | --mode """{'det_class':'YOLOX', `
391 | 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_m_8xb8-300e_humanart-c2c7a14a.zip', `
392 | 'det_input_size':[640, 640], `
393 | 'pose_class':'RTMPose', `
394 | 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip', `
395 | 'pose_input_size':[192,256]}"""
396 | ```
397 |
398 |
399 |
400 |
401 | #### All the parameters
402 |
403 | For a full list of the available parameters, have a look at the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file or type:
404 |
405 | ``` cmd
406 | sports2d --help
407 | ```
408 |
409 | ```
410 | 'config': ["C", "path to a toml configuration file"],
411 |
412 | 'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
413 | 'nb_persons_to_detect': ["n", "number of persons to detect. int or 'all'. 'all' if not specified"],
414 | 'person_ordering_method': ["", "'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. 'on_click' if not specified"],
415 | 'first_person_height': ["H", "height of the reference person in meters. 1.65 if not specified. Not used if a calibration file is provided"],
416 | 'visible_side': ["", "front, back, left, right, auto, or none. 'auto front none' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
417 | 'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
418 | 'compare': ["", "visually compare motion with trc file. false if not specified"],
419 | 'webcam_id': ["w", "webcam ID. 0 if not specified"],
420 | 'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
421 | 'video_dir': ["d", "current directory if not specified"],
422 | 'result_dir': ["r", "current directory if not specified"],
423 | 'show_realtime_results': ["R", "show results in real-time. true if not specified"],
424 | 'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
425 | 'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
426 | 'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
427 | 'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
428 | 'save_vid': ["V", "save processed video. true if not specified"],
429 | 'save_img': ["I", "save processed images. true if not specified"],
430 | 'save_pose': ["P", "save pose as trc files. true if not specified"],
431 | 'calculate_angles': ["c", "calculate joint and segment angles. true if not specified"],
432 | 'save_angles': ["A", "save angles as mot files. true if not specified"],
433 | 'slowmo_factor': ["", "slow-motion factor. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8. 1 if not specified"],
434 | 'pose_model': ["p", "body_with_feet, whole_body_wrist, whole_body, or body. body_with_feet if not specified"],
435 | 'mode': ["m", 'light, balanced, performance, or a """{dictionary within triple quote}""". balanced if not specified. Use a dictionary to specify your own detection and/or pose estimation models (more about in the documentation).'],
436 | 'det_frequency': ["f", "run person detection only every N frames, and inbetween track previously detected bounding boxes. keypoint detection is still run on all frames.\n\
437 | Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. 1 if not specified: detection runs on all frames"],
438 | 'backend': ["", "Backend for pose estimation can be 'auto', 'cpu', 'cuda', 'mps' (for MacOS), or 'rocm' (for AMD GPUs)"],
439 | 'device': ["", "Device for pose estimatino can be 'auto', 'openvino', 'onnxruntime', 'opencv'"],
440 | 'to_meters': ["M", "convert pixels to meters. true if not specified"],
441 | 'make_c3d': ["", "Convert trc to c3d file. true if not specified"],
442 | 'floor_angle': ["", "angle of the floor (degrees). 'auto' if not specified"],
443 | 'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
444 | 'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
445 | 'save_calib': ["", "save calibration file. true if not specified"],
446 | 'do_ik': ["", "do inverse kinematics. false if not specified"],
447 | 'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
448 | 'feet_on_floor': ["", "offset marker augmentation results so that feet are at floor level. true if not specified"],
449 | 'use_contacts_muscles': ["", "Use model with contact spheres and muscles. false if not specified"],
450 | 'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
451 | 'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
452 | 'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
453 | 'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
454 | Default: max_age:30, n_init:3, nms_max_overlap:0.8, max_cosine_distance:0.3, nn_budget:200, max_iou_distance:0.8, embedder_gpu: True\n\
455 | More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'],
456 | 'input_size': ["", "width, height. 1280, 720 if not specified. Lower resolution will be faster but less precise"],
457 | 'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
458 | 'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
459 | 'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
460 | 'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
461 | 'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
462 | 'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
463 | 'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
464 | 'fontSize': ["", "font size for angle values. 0.3 if not specified"],
465 | 'flip_left_right': ["", "true or false. Flips angles when the person faces the other side. The person looks to the right if their toe keypoint is to the right of their heel. Set it to false if the person is sprinting or if you want timeseries to be continuous even when the participent switches their stance. true if not specified"],
466 | 'correct_segment_angles_with_floor_angle': ["", "true or false. If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead. True if not specified"],
467 | 'interpolate': ["", "interpolate missing data. true if not specified"],
468 | 'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
469 | 'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
470 | 'filter': ["", "filter results. true if not specified"],
471 | 'filter_type': ["", "butterworth, gaussian, median, or loess. butterworth if not specified"],
472 | 'order': ["", "order of the Butterworth filter. 4 if not specified"],
473 | 'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 3 if not specified"],
474 | 'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
475 | 'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
476 | 'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
477 | 'osim_setup_path': ["", "path to OpenSim setup. '../OpenSim_setup' if not specified"],
478 | 'right_left_symmetry': ["", "right left symmetry. true if not specified"],
479 | 'default_height': ["", "default height for scaling. 1.70 if not specified"],
480 | 'remove_individual_scaling_setup': ["", "remove individual scaling setup files generated during scaling. true if not specified"],
481 | 'remove_individual_ik_setup': ["", "remove individual IK setup files generated during IK. true if not specified"],
482 | 'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
483 | 'close_to_zero_speed_m': ["","Sum for all keypoints: about 0.2 m/frame. Defaults to 0.2"],
484 | 'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame. Defaults to 50"],
485 | 'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise and ignored. Defaults to 45"],
486 | 'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
487 | 'use_custom_logging': ["", "use custom logging. false if not specified"]
488 | ```
489 |
490 |
491 |
492 |
493 | ## Go further
494 |
495 | ### Too slow for you?
496 |
497 | **Quick fixes:**
498 | - Use ` --save_vid false --save_img false --show_realtime_results false`: Will not save images or videos, and will not display the results in real time.
499 | - Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate.\
500 | Note that any detection and pose models can be used (first [deploy them with MMPose](https://mmpose.readthedocs.io/en/latest/user_guides/how_to_deploy.html#onnx) if you do not have their .onnx or .zip files), with the following formalism:
501 | ```
502 | --mode """{'det_class':'YOLOX',
503 | 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_nano_8xb8-300e_humanart-40f6f0d0.zip',
504 | 'det_input_size':[416,416],
505 | 'pose_class':'RTMPose',
506 | 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.zip',
507 | 'pose_input_size':[192,256]}"""
508 | ```
509 | - Use `--det_frequency 50`: Will detect poses only every 50 frames, and track keypoints in between, which is faster.
510 | - Use `--load_trc_px `: Will use pose estimation results from a file. Useful if you want to use different parameters for pixel to meter conversion or angle calculation without running detection and pose estimation all over.
511 | - Make sure you use `--tracking_mode sports2d`: Will use the default Sports2D tracker. Unlike DeepSort, it is faster, does not require any parametrization, and is as good in non-crowded scenes.
512 |
513 |
514 |
515 | **Use your GPU**:\
516 | Will be much faster, with no impact on accuracy. However, the installation takes about 6 GB of additional storage space.
517 |
518 | 1. Run `nvidia-smi` in a terminal. If this results in an error, your GPU is probably not compatible with CUDA. If not, note the "CUDA version": it is the latest version your driver is compatible with (more information [on this post](https://stackoverflow.com/questions/60987997/why-torch-cuda-is-available-returns-false-even-after-installing-pytorch-with)).
519 |
520 | Then go to the [ONNXruntime requirement page](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements), note the latest compatible CUDA and cuDNN requirements. Next, go to the [pyTorch website](https://pytorch.org/get-started/previous-versions/) and install the latest version that satisfies these requirements (beware that torch 2.4 ships with cuDNN 9, while torch 2.3 installs cuDNN 8). For example:
521 | ``` cmd
522 | pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu124
523 | ```
524 |
525 |
527 |
528 | 2. Finally, install ONNX Runtime with GPU support:
529 | ```
530 | pip uninstall onnxruntime
531 | pip install onnxruntime-gpu
532 | ```
533 |
534 | 3. Check that everything went well within Python with these commands:
535 | ``` bash
536 | python -c 'import torch; print(torch.cuda.is_available())'
537 | python -c 'import onnxruntime as ort; print(ort.get_available_providers())'
538 | # Should print "True ['CUDAExecutionProvider', ...]"
539 | ```
540 |
541 |
542 |
543 |
544 |
545 |
546 |
547 |
548 |
549 |
554 |
555 |
556 |
557 |
558 |
559 |
560 |
561 |
562 | ### How it works
563 |
564 | Sports2D:
565 | - Detects 2D joint centers from a video or a webcam with RTMLib.
566 | - Converts pixel coordinates to meters.
567 | - Computes selected joint and segment angles.
568 | - Optionally performs kinematic optimization via OpenSim.
569 | - Optionally saves processed image and video files.
570 |
571 |
572 |
573 | **Okay but how does it work, really?**\
574 | Sports2D:
575 |
576 | 1. **Reads stream from a webcam, from one video, or from a list of videos**. Selects the specified time range to process.
577 |
578 | 2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference, keypoints can be tracked instead of detected for a certain number of frames. Any RTMPose model can be used.
579 |
580 | 3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames. We crafted a 'sports2D' tracker which gives good results and runs in real time, but it is also possible to use `deepsort` in particularly challenging situations.
581 |
582 | 4. **Chooses the right persons to keep.** In single-person mode, only keeps the person with the highest average scores over the sequence. In multi-person mode, only retrieves the keypoints with high enough confidence, and only keeps the persons with high enough average confidence over each frame.
583 |
584 | 4. **Converts the pixel coordinates to meters.** The user can provide a calibration file, or simply the size of a specified person. The floor angle and the coordinate origin can either be detected automatically from the gait sequence, or be manually specified. The depth coordinates are set to normative values, depending on whether the person is going left, right, facing the camera, or looking away.
585 |
586 | 5. **Computes the selected joint and segment angles**, and flips them on the left/right side if the respective foot is pointing to the left/right.
587 |
588 | 5. **Draws the results on the image:**\
589 | Draws bounding boxes around each person and writes their IDs\
590 | Draws the skeleton and the keypoints, with a green to red color scale to account for their confidence\
591 | Draws joint and segment angles on the body, and writes the values either near the joint/segment, or on the upper-left of the image with a progress bar
592 |
593 | 6. **Interpolates and filters results:** Missing pose and angle sequences are interpolated unless gaps are too large. Results are filtered according to the selected filter (among `Butterworth`, `Gaussian`, `LOESS`, or `Median`) and their parameters
594 |
595 | 7. **Optionally show** processed images, saves them, or saves them as a video\
596 | **Optionally plots** pose and angle data before and after processing for comparison\
597 | **Optionally saves** poses for each person as a TRC file in pixels and meters, angles as a MOT file, and calibration data as a [Pose2Sim](https://github.com/perfanalytics/pose2sim) TOML file
598 |
599 |
600 |
601 | **Joint angle conventions:**
602 | - Ankle dorsiflexion: Between heel and big toe, and ankle and knee.\
603 | *-90° when the foot is aligned with the shank.*
604 | - Knee flexion: Between hip, knee, and ankle.\
605 | *0° when the shank is aligned with the thigh.*
606 | - Hip flexion: Between knee, hip, and shoulder.\
607 | *0° when the trunk is aligned with the thigh.*
608 | - Shoulder flexion: Between hip, shoulder, and elbow.\
609 | *180° when the arm is aligned with the trunk.*
610 | - Elbow flexion: Between wrist, elbow, and shoulder.\
611 | *0° when the forearm is aligned with the arm.*
612 |
613 | **Segment angle conventions:**\
614 | Angles are measured anticlockwise between the horizontal and the segment.
615 | - Foot: Between heel and big toe
616 | - Shank: Between ankle and knee
617 | - Thigh: Between hip and knee
618 | - Pelvis: Between left and right hip
619 | - Trunk: Between hip midpoint and shoulder midpoint
620 | - Shoulders: Between left and right shoulder
621 | - Head: Between neck and top of the head
622 | - Arm: Between shoulder and elbow
623 | - Forearm: Between elbow and wrist
624 |
625 |
626 |
627 |
628 |
629 |
630 | ## How to cite and how to contribute
631 |
632 | ### How to cite
633 | If you use Sports2D, please cite [Pagnon, 2024](https://joss.theoj.org/papers/10.21105/joss.06849).
634 |
635 | @article{Pagnon_Sports2D_Compute_2D_2024,
636 | author = {Pagnon, David and Kim, HunMin},
637 | doi = {10.21105/joss.06849},
638 | journal = {Journal of Open Source Software},
639 | month = sep,
640 | number = {101},
641 | pages = {6849},
642 | title = {{Sports2D: Compute 2D human pose and angles from a video or a webcam}},
643 | url = {https://joss.theoj.org/papers/10.21105/joss.06849},
644 | volume = {9},
645 | year = {2024}
646 | }
647 |
648 |
649 | ### How to contribute
650 | I would happily welcome any proposal for new features, code improvement, and more!\
651 | If you want to contribute to Sports2D or Pose2Sim, please see [this issue](https://github.com/perfanalytics/pose2sim/issues/40).\
652 | You will be proposed a to-do list, but please feel absolutely free to propose your own ideas and improvements.
653 |
654 | *Here is a to-do list: feel free to complete it:*
655 | - [x] Compute **segment angles**.
656 | - [x] **Multi-person** detection, consistent over time.
657 | - [x] **Only interpolate small gaps**.
658 | - [x] **Filtering and plotting tools**.
659 | - [x] Handle sudden **changes of direction**.
660 | - [x] **Batch processing** for the analysis of multiple videos at once.
661 | - [x] Option to only save one person (with the highest average score, or with the most frames and fastest speed)
662 | - [x] Run again without pose estimation with the option `--load_trc_px` for px .trc file.
663 | - [x] **Convert positions to meters** by providing the person height, a calibration file, or 3D points [to click on the image](https://stackoverflow.com/questions/74248955/how-to-display-the-coordinates-of-the-points-clicked-on-the-image-in-google-cola)
664 | - [x] Support any detection and/or pose estimation model.
665 | - [x] Optionally let user select the persons of interest.
666 | - [x] Perform **Inverse kinematics and dynamics** with OpenSim (cf. [Pose2Sim](https://github.com/perfanalytics/pose2sim), but in 2D). Update [this model](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Utilities/2D_gait.osim) (add arms, markers, remove muscles and contact spheres). Add pipeline example.
667 |
668 | - [ ] Run with the option `--compare_to` to visually compare motion with a trc file. If run with a webcam input, the user can follow the motion of the trc file. Further calculation can then be done to compare specific variables.
669 | - [ ] **Colab version**: more user-friendly, usable on a smartphone.
670 | - [ ] **GUI applications** for Windows, Mac, and Linux, as well as for Android and iOS.
671 |
672 |
673 |
674 | - [ ] **Track other points and angles** with classic tracking methods (cf. [Kinovea](https://www.kinovea.org/features.html)), or by training a model (cf. [DeepLabCut](https://deeplabcut.github.io/DeepLabCut/README.html)).
675 | - [ ] **Pose refinement**. Click and move badly estimated 2D points. See [DeepLabCut](https://www.youtube.com/watch?v=bEuBKB7eqmk) for inspiration.
676 | - [ ] Add tools for annotating images, undistort them, take perspective into account, etc. (cf. [Kinovea](https://www.kinovea.org/features.html)).
677 |
678 |
--------------------------------------------------------------------------------
/Sports2D/Demo/Config_demo.toml:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | ## SPORTS2D PROJECT PARAMETERS ##
3 | ###############################################################################
4 |
5 | # Configure your project parameters here
6 |
7 | # Then open an Anaconda prompt and enter:
8 | # conda activate Sports2D
9 | # ipython
10 | # from Sports2D import Sports2D
11 | # Sports2D.process('Config_demo.toml')
12 |
13 |
14 | [base]
15 | video_input = 'demo.mp4' # 'webcam' or '', or ['video1_path.mp4', 'video2_path.avi>', ...]
16 | # On Windows, replace '\' with '/'
17 | # Beware that images won't be saved if paths contain non ASCII characters.
18 |
19 | nb_persons_to_detect = 'all' # int or 'all' # Limiting or not the number of persons to be analyzed
20 | person_ordering_method = 'on_click' # 'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'
21 | first_person_height = 1.65 # Height of the reference person in meters (for pixels -> meters conversion: not used if a calibration file is provided)
22 | visible_side = ['auto', 'front', 'none'] # Choose visible side among ['right', 'left', 'front', 'back', 'auto', 'none']. String or list of strings.
23 | # if 'auto', will be either 'left', 'right', or 'front' depending on the direction of the motion
24 | # if 'none', coordinates will be left in 2D rather than 3D
25 |
26 | load_trc_px = '' # If you do not want to recalculate pose, load it from a trc file (in px, not in m)
27 | compare = false # Not implemented yet
28 |
29 | # Video parameters
30 | time_range = [] # [] for the whole video, or [start_time, end_time] (in seconds), or [[start_time1, end_time1], [start_time2, end_time2], ...]
31 | # Time ranges can be different for each video.
32 | video_dir = '' # If empty, video dir is current dir
33 |
34 | # Webcam parameters
35 | webcam_id = 0 # your webcam id (0 is default)
36 | input_size = [1280, 720] # [W, H]. Lower resolution will be faster but less precise.
37 |
38 | show_realtime_results = true
39 | save_vid = true
40 | save_img = true
41 | save_pose = true
42 | calculate_angles = true
43 | save_angles = true
44 | result_dir = '' # If empty, project dir is current dir
45 |
46 |
47 | ##########################
48 | # ADVANCED CONFIGURATION #
49 | ##########################
50 | [pose]
51 | # Slow motion factor
52 | slowmo_factor = 1 # 1 for normal speed. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8
53 |
54 | # Pose detection parameters
55 | pose_model = 'Body_with_feet' #With RTMLib:
56 | # - Body_with_feet (default HALPE_26 model),
57 | # - Whole_body_wrist (COCO_133_WRIST: body + feet + 2 hand_points),
58 | # - Whole_body (COCO_133: body + feet + hands),
59 | # - Body (COCO_17). Marker augmentation won't work, Kinematic analysis will work,
60 | # - Hand (HAND_21, only lightweight mode. Potentially better results with Whole_body),
61 | # - Face (FACE_106),
62 | # - Animal (ANIMAL2D_17)
63 | # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
64 | # /!\ For Face and Animal, use mode="""{dictionary}""", and find the corresponding .onnx model there https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose
65 | mode = 'balanced' # 'lightweight', 'balanced', 'performance', or """{dictionary}""" (see below)
66 |
67 | # A dictionary (WITHIN THREE DOUBLE QUOTES) allows you to manually select the person detection (if top_down approach) and/or pose estimation models (see https://github.com/Tau-J/rtmlib).
68 | # Models can be local paths or URLs.
69 | # Make sure the input_sizes are within square brackets, and that they are in the opposite order from the one in the model path (for example, it would be [192,256] for rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip).
70 | # If your pose_model is not provided in skeletons.py, you may have to create your own one (see example at the end of the file).
71 | # Example, equivalent to mode='balanced':
72 | # mode = """{'det_class':'YOLOX',
73 | # 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_m_8xb8-300e_humanart-c2c7a14a.zip',
74 | # 'det_input_size':[640, 640],
75 | # 'pose_class':'RTMPose',
76 | # 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip',
77 | # 'pose_input_size':[192,256]}"""
78 | # Example with one-stage RTMO model (Requires pose_model = 'Body'):
79 | # mode = """{'pose_class':'RTMO',
80 | # 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip',
81 | # 'pose_input_size':[640, 640]}"""
82 | # Example with animal pose estimation:
83 | # mode = """{'pose_class':'RTMPose',
84 | # 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.zip',
85 | # 'pose_input_size':[256,256]}"""
86 |
87 | det_frequency = 4 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
88 | # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
89 | device = 'auto' # 'auto', 'CPU', 'CUDA', 'MPS', 'ROCM'
90 | backend = 'auto' # 'auto', 'openvino', 'onnxruntime', 'opencv'
91 | tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned
92 | # deepsort_params = """{'max_age':30, 'n_init':3, 'max_cosine_distance':0.3, 'max_iou_distance':0.8, 'embedder_gpu': True, embedder':'torchreid'}""" # """{dictionary between 3 double quotes}"""
93 | # More robust in crowded scenes but tricky to parametrize. More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51
94 | # Requires `pip install torch torchvision torchreid gdown tensorboard`
95 |
96 | # Processing parameters
97 | keypoint_likelihood_threshold = 0.3 # Keypoints whose likelihood is lower will not be taken into account
98 | average_likelihood_threshold = 0.5 # Person will be ignored if average likelihood of good keypoints is lower than this value
99 | keypoint_number_threshold = 0.3 # Person will be ignored if the number of good keypoints (above keypoint_likelihood_threshold) is less than this fraction
100 |
101 |
102 | [px_to_meters_conversion]
103 | # Pixel to meters conversion
104 | to_meters = true
105 | make_c3d = true
106 | save_calib = true # Coming soon!
107 |
108 | # If conversion from first_person_height
109 | floor_angle = 'auto' # 'auto' or a value in degrees, eg 2.3. If 'auto', estimated from the line formed by the toes when they are on the ground (where speed = 0)
110 | xy_origin = ['auto'] # ['auto'] or [px_x,px_y]. N.B.: px_y points downwards. If ['auto'], direction estimated from the start to the end of the line formed by the toes when they are on the ground
111 |
112 | # If conversion from a calibration file
113 | calib_file = '' # Calibration in the Pose2Sim format. 'calib_demo.toml', or '' if not available
114 |
115 |
116 | [angles]
117 | display_angle_values_on = ['body', 'list'] # 'body', 'list', ['body', 'list'], 'none'. Display angle values on the body, as a list in the upper left of the image, both, or do not display them.
118 | fontSize = 0.3
119 |
120 | # Select joint angles among
121 | # ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow', 'Right wrist', 'Left wrist']
122 | joint_angles = ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow', 'Right wrist', 'Left wrist']
123 | # Select segment angles among
124 | # ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
125 | segment_angles = ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
126 |
127 | # Processing parameters
128 | flip_left_right = true # Same angles whether the participant faces left/right. Set it to false if you want timeseries to be continuous even when the participent switches their stance.
129 | correct_segment_angles_with_floor_angle = true # If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead
130 |
131 |
132 | [post-processing]
133 | interpolate = true
134 | interp_gap_smaller_than = 10 # do not interpolate bigger gaps
135 | fill_large_gaps_with = 'last_value' # 'last_value', 'nan', or 'zeros'
136 |
137 | filter = true
138 | show_graphs = true # Show plots of raw and processed results
139 | filter_type = 'butterworth' # butterworth, gaussian, LOESS, median
140 | [post-processing.butterworth]
141 | order = 4
142 | cut_off_frequency = 6 # Hz # Will be divided by slowmo_factor to be equivalent to non slowed-down video
143 | [post-processing.gaussian]
144 | sigma_kernel = 1 #px
145 | [post-processing.loess]
146 | nb_values_used = 5 # = fraction of data used * nb frames
147 | [post-processing.median]
148 | kernel_size = 3
149 |
150 |
151 | [kinematics]
152 | do_ik = false # Do scaling and inverse kinematics?
153 | use_augmentation = false # true or false (lowercase) # Set to true if you want to use the model with augmented markers
154 | feet_on_floor = false # true or false (lowercase) # Set to false if you want to use the model with feet not on the floor (e.g. running, jumping, etc.)
155 | use_contacts_muscles = true # true or false (lowercase) # If true, contact spheres and muscles are added to the model
156 | participant_mass = [55.0, 67.0] # kg # defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)
157 | right_left_symmetry = true # true or false (lowercase) # Set to false only if you have good reasons to think the participant is not symmetrical (e.g. prosthetic limb)
158 |
159 | # Choosing best frames to scale the model
160 | default_height = 1.7 # meters # If automatic height calculation did not work, this value is used to scale the model
161 | fastest_frames_to_remove_percent = 0.1 # Frames with high speed are considered as outliers
162 | close_to_zero_speed_px = 50 # Sum for all keypoints: about 50 px/frame
163 | close_to_zero_speed_m = 0.2 # Sum for all keypoints: 0.2 m/frame
164 | large_hip_knee_angles = 45 # Hip and knee angles below this value are considered as imprecise
165 | trimmed_extrema_percent = 0.5 # Proportion of the most extreme segment values to remove before calculating their mean)
166 | remove_individual_scaling_setup = true # true or false (lowercase) # If true, the individual scaling setup files are removed to avoid cluttering
167 | remove_individual_ik_setup = true # true or false (lowercase) # If true, the individual IK setup files are removed to avoid cluttering
168 |
169 |
170 | [logging]
171 | use_custom_logging = false # if integrated in an API that already has logging
172 |
173 |
174 |
175 | # CUSTOM skeleton
176 | # If you use a model with different keypoints and/or different ordering
177 | # Useful if you trained your own model, from DeepLabCut or MMPose for example.
178 | # Make sure the ids are set in the right order and start from zero.
179 | #
180 | # If you want to perform inverse kinematics, you will also need to create an OpenSim model
181 | # and add to its markerset the location where you expect the triangulated keypoints to be detected.
182 | #
183 | # In this example, CUSTOM reproduces the HALPE_26 skeleton (default skeletons are stored in skeletons.py).
184 | # You can create as many custom skeletons as you want, just add them further down and rename them.
185 | #
186 | # Check your model hierarchy with: for pre, _, node in RenderTree(model):
187 | # print(f'{pre}{node.name} id={node.id}')
188 | [pose.CUSTOM]
189 | name = "Hip"
190 | id = 19
191 | [[pose.CUSTOM.children]]
192 | name = "RHip"
193 | id = 12
194 | [[pose.CUSTOM.children.children]]
195 | name = "RKnee"
196 | id = 14
197 | [[pose.CUSTOM.children.children.children]]
198 | name = "RAnkle"
199 | id = 16
200 | [[pose.CUSTOM.children.children.children.children]]
201 | name = "RBigToe"
202 | id = 21
203 | [[pose.CUSTOM.children.children.children.children.children]]
204 | name = "RSmallToe"
205 | id = 23
206 | [[pose.CUSTOM.children.children.children.children]]
207 | name = "RHeel"
208 | id = 25
209 | [[pose.CUSTOM.children]]
210 | name = "LHip"
211 | id = 11
212 | [[pose.CUSTOM.children.children]]
213 | name = "LKnee"
214 | id = 13
215 | [[pose.CUSTOM.children.children.children]]
216 | name = "LAnkle"
217 | id = 15
218 | [[pose.CUSTOM.children.children.children.children]]
219 | name = "LBigToe"
220 | id = 20
221 | [[pose.CUSTOM.children.children.children.children.children]]
222 | name = "LSmallToe"
223 | id = 22
224 | [[pose.CUSTOM.children.children.children.children]]
225 | name = "LHeel"
226 | id = 24
227 | [[pose.CUSTOM.children]]
228 | name = "Neck"
229 | id = 18
230 | [[pose.CUSTOM.children.children]]
231 | name = "Head"
232 | id = 17
233 | [[pose.CUSTOM.children.children.children]]
234 | name = "Nose"
235 | id = 0
236 | [[pose.CUSTOM.children.children]]
237 | name = "RShoulder"
238 | id = 6
239 | [[pose.CUSTOM.children.children.children]]
240 | name = "RElbow"
241 | id = 8
242 | [[pose.CUSTOM.children.children.children.children]]
243 | name = "RWrist"
244 | id = 10
245 | [[pose.CUSTOM.children.children]]
246 | name = "LShoulder"
247 | id = 5
248 | [[pose.CUSTOM.children.children.children]]
249 | name = "LElbow"
250 | id = 7
251 | [[pose.CUSTOM.children.children.children.children]]
252 | name = "LWrist"
253 | id = 9
254 |
--------------------------------------------------------------------------------
/Sports2D/Demo/demo.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/davidpagnon/Sports2D/cdd80ecac898736a8d533d8098e4a982b8244d6a/Sports2D/Demo/demo.mp4
--------------------------------------------------------------------------------
/Sports2D/Sports2D.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 |
5 | '''
6 | ##############################################################
7 | ## SPORTS2D ##
8 | ##############################################################
9 |
10 | Use sports2d to compute your athlete's pose, joint, and segment angles
11 |
12 | -----
13 | Help
14 | -----
15 | See https://github.com/davidpagnon/Sports2D
16 | Or run: sports2d --help
17 | Or check: https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml
18 |
19 | -----
20 | Usage
21 | -----
22 | - Run on Demo video with default parameters:
23 | sports2d
24 | - Run on custom video with default parameters:
25 | sports2d --video_input path_to_video.mp4
26 | - Run on multiple videos with default parameters:
27 | sports2d --video_input path_to_video1.mp4 path_to_video2.mp4
28 | - Run on webcam with default parameters:
29 | sports2d --video_input webcam
30 | - Run with custom parameters (all non specified are set to default):
31 | sports2d --show_plots False --time_range 0 2.1 --result_dir path_to_result_dir
32 | sports2d --person_detection_method highest_likelihood --mode lightweight --det_frequency 50
33 | - Run with a toml configuration file:
34 | sports2d --config path_to_config.toml
35 |
36 | -----
37 | Installation
38 | -----
39 | Optional:
40 | - Install Miniconda
41 | - Open a Anaconda Prompt and type:
42 | `conda create -n Sports2D python>=3.7`
43 | `conda activate Sports2D`
44 | pip install .
45 |
46 | -----
47 | /!\ Warning /!\
48 | -----
49 | - The angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect.
50 | - It will only lead to acceptable results if the persons move in the 2D plane (sagittal plane).
51 | - The persons need to be filmed as perpendicularly as possible from their side.
52 | If you need research-grade markerless joint kinematics, consider using several cameras,
53 | and constraining angles to a biomechanically accurate model. See Pose2Sim for example:
54 | https://github.com/perfanalytics/pose2sim
55 |
56 | -----
57 | How it works
58 | -----
59 | Detects 2D joint centers from a video or a webcam with RTMLib.
60 | Computes selected joint and segment angles.
61 | Optionally saves processed image files and video file.
62 | Optionally saves processed poses as a TRC file, and angles as a MOT file (OpenSim compatible).
63 |
64 | Further details. Sports2D:
65 | - loads skeleton information
66 | - reads stream from a video or a webcam
67 | - sets up the RTMLib pose tracker from RTMlib with specified parameters
68 | - detects poses within the selected time range
69 | - tracks people so that their IDs are consistent across frames
70 | - retrieves the keypoints with high enough confidence, and only keeps the persons with enough high-confidence keypoints
71 | - computes joint and segment angles (or not), and flips those on the left/right side them if the respective foot is pointing to the left
72 | - draws bounding boxes around each person with their IDs
73 | - draws joint and segment angles on the body, and writes the values either near the joint/segment, or on the upper-left of the image with a progress bar
74 | - draws the skeleton and the keypoints, with a green to red color scale to account for their confidence
75 | - optionally show processed images, saves them, or saves them as a video
76 | - interpolates missing pose and angle sequences if gaps are not too large
77 | - filters them with the selected filter and parameters
78 | - optionally plots pose and angle data before and after processing for comparison
79 | - optionally saves poses for each person as a trc file, and angles as a mot file
80 |
81 | -----
82 | Angle conventions
83 | -----
84 | Joint angles:
85 | - Ankle dorsiflexion: Between heel and big toe, and ankle and knee + 90°
86 | - Knee flexion: Between hip, knee, and ankle
87 | - Hip flexion: Between knee, hip, and shoulder
88 | - Shoulder flexion: Between hip, shoulder, and elbow
89 | - Elbow flexion: Between wrist, elbow, and shoulder
90 |
91 | Segment angles:
92 | Angles are measured anticlockwise between the horizontal and the segment.
93 | - Foot: Between heel and big toe
94 | - Shank: Between ankle and knee
95 | - Thigh: Between hip and knee
96 | - Pelvis: Between right and left hip
97 | - Trunk: Between hip midpoint and neck
98 | - Shoulders: Between right and left shoulder
99 | - Arm: Between shoulder and elbow
100 | - Forearm: Between elbow and wrist
101 |
102 | -----
103 | To-do list
104 | -----
105 | - GUI applications for all platforms
106 | - Constrain points to OpenSim skeletal model for better angle estimation (cf Pose2Sim but in 2D https://github.com/perfanalytics/pose2sim)
107 | - Pose refinement: click and move badly estimated 2D points (cf DeepLabCut: https://www.youtube.com/watch?v=bEuBKB7eqmk)
108 | '''
109 |
110 |
111 | ## INIT
112 | from importlib.metadata import version
113 | import argparse
114 | import toml
115 | from datetime import datetime
116 | from pathlib import Path
117 | import logging, logging.handlers
118 | import cv2
119 | import numpy as np
120 |
121 | from Sports2D import Sports2D
122 |
123 |
124 | ## CONSTANTS
125 | DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
126 | 'nb_persons_to_detect': 'all',
127 | 'person_ordering_method': 'on_click',
128 | 'first_person_height': 1.65,
129 | 'visible_side': ['auto', 'front', 'none'],
130 | 'load_trc_px': '',
131 | 'compare': False,
132 | 'time_range': [],
133 | 'video_dir': '',
134 | 'webcam_id': 0,
135 | 'input_size': [1280, 720],
136 | 'show_realtime_results': True,
137 | 'save_vid': True,
138 | 'save_img': True,
139 | 'save_pose': True,
140 | 'calculate_angles': True,
141 | 'save_angles': True,
142 | 'result_dir': ''
143 | },
144 | 'pose': {'slowmo_factor': 1,
145 | 'pose_model': 'body_with_feet',
146 | 'mode': 'balanced',
147 | 'det_frequency': 4,
148 | 'device': 'auto',
149 | 'backend': 'auto',
150 | 'tracking_mode': 'sports2d',
151 | 'deepsort_params': """{'max_age':30, 'n_init':3, 'nms_max_overlap':0.8, 'max_cosine_distance':0.3, 'nn_budget':200, 'max_iou_distance':0.8, 'embedder_gpu': True, 'embedder':'torchreid'}""",
152 | 'keypoint_likelihood_threshold': 0.3,
153 | 'average_likelihood_threshold': 0.5,
154 | 'keypoint_number_threshold': 0.3,
155 | 'CUSTOM': { 'name': 'Hip',
156 | 'id': 19,
157 | 'children': [{'name': 'RHip',
158 | 'id': 12,
159 | 'children': [{'name': 'RKnee',
160 | 'id': 14,
161 | 'children': [{'name': 'RAnkle',
162 | 'id': 16,
163 | 'children': [{'name': 'RBigToe',
164 | 'id': 21,
165 | 'children': [{'name': 'RSmallToe', 'id': 23}]},
166 | {'name': 'RHeel', 'id': 25}]}]}]},
167 | {'name': 'LHip',
168 | 'id': 11,
169 | 'children': [{'name': 'LKnee',
170 | 'id': 13,
171 | 'children': [{'name': 'LAnkle',
172 | 'id': 15,
173 | 'children': [{'name': 'LBigToe',
174 | 'id': 20,
175 | 'children': [{'name': 'LSmallToe', 'id': 22}]},
176 | {'name': 'LHeel', 'id': 24}]}]}]},
177 | {'name': 'Neck',
178 | 'id': 18,
179 | 'children': [{'name': 'Head',
180 | 'id': 17,
181 | 'children': [{'name': 'Nose', 'id': 0}]},
182 | {'name': 'RShoulder',
183 | 'id': 6,
184 | 'children': [{'name': 'RElbow',
185 | 'id': 8,
186 | 'children': [{'name': 'RWrist', 'id': 10}]}]},
187 | {'name': 'LShoulder',
188 | 'id': 5,
189 | 'children': [{'name': 'LElbow',
190 | 'id': 7,
191 | 'children': [{'name': 'LWrist', 'id': 9}]}]}]}]}
192 | },
193 | 'px_to_meters_conversion': {
194 | 'to_meters': True,
195 | 'make_c3d': True,
196 | 'calib_file': '',
197 | 'floor_angle': 'auto',
198 | 'xy_origin': ['auto'],
199 | 'save_calib': True
200 | },
201 | 'angles': {'display_angle_values_on': ['body', 'list'],
202 | 'fontSize': 0.3,
203 | 'joint_angles': [ 'Right ankle',
204 | 'Left ankle',
205 | 'Right knee',
206 | 'Left knee',
207 | 'Right hip',
208 | 'Left hip',
209 | 'Right shoulder',
210 | 'Left shoulder',
211 | 'Right elbow',
212 | 'Left elbow',
213 | 'Right wrist',
214 | 'Left wrist'],
215 | 'segment_angles': [ 'Right foot',
216 | 'Left foot',
217 | 'Right shank',
218 | 'Left shank',
219 | 'Right thigh',
220 | 'Left thigh',
221 | 'Pelvis',
222 | 'Trunk',
223 | 'Shoulders',
224 | 'Head',
225 | 'Right arm',
226 | 'Left arm',
227 | 'Right forearm',
228 | 'Left forearm'],
229 | 'flip_left_right': True,
230 | 'correct_segment_angles_with_floor_angle': True
231 | },
232 | 'post-processing': {'interpolate': True,
233 | 'interp_gap_smaller_than': 10,
234 | 'fill_large_gaps_with': 'last_value',
235 | 'filter': True,
236 | 'show_graphs': True,
237 | 'filter_type': 'butterworth',
238 | 'butterworth': {'order': 4, 'cut_off_frequency': 6},
239 | 'gaussian': {'sigma_kernel': 1},
240 | 'loess': {'nb_values_used': 5},
241 | 'median': {'kernel_size': 3}
242 | },
243 | 'kinematics':{'do_ik': False,
244 | 'use_augmentation': False,
245 | 'feet_on_floor': False,
246 | 'use_contacts_muscles': True,
247 | 'participant_mass': [55.0, 67.0],
248 | 'right_left_symmetry': True,
249 | 'default_height': 1.70,
250 | 'remove_individual_scaling_setup': True,
251 | 'remove_individual_ik_setup': True,
252 | 'fastest_frames_to_remove_percent': 0.1,
253 | 'close_to_zero_speed_px': 50,
254 | 'close_to_zero_speed_m': 0.2,
255 | 'large_hip_knee_angles': 45,
256 | 'trimmed_extrema_percent': 0.5,
257 | 'osim_setup_path': '../OpenSim_setup'
258 | },
259 | 'logging': {'use_custom_logging': False}
260 | }
261 |
262 | CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
263 | 'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
264 | 'nb_persons_to_detect': ["n", "number of persons to detect. int or 'all'. 'all' if not specified"],
265 | 'person_ordering_method': ["", "'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. 'on_click' if not specified"],
266 | 'first_person_height': ["H", "height of the reference person in meters. 1.65 if not specified. Not used if a calibration file is provided"],
267 | 'visible_side': ["", "front, back, left, right, auto, or none. 'auto front none' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
268 | 'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
269 | 'compare': ["", "visually compare motion with trc file. false if not specified"],
270 | 'webcam_id': ["w", "webcam ID. 0 if not specified"],
271 | 'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
272 | 'video_dir': ["d", "current directory if not specified"],
273 | 'result_dir': ["r", "current directory if not specified"],
274 | 'show_realtime_results': ["R", "show results in real-time. true if not specified"],
275 | 'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
276 | 'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
277 | 'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
278 | 'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
279 | 'save_vid': ["V", "save processed video. true if not specified"],
280 | 'save_img': ["I", "save processed images. true if not specified"],
281 | 'save_pose': ["P", "save pose as trc files. true if not specified"],
282 | 'calculate_angles': ["c", "calculate joint and segment angles. true if not specified"],
283 | 'save_angles': ["A", "save angles as mot files. true if not specified"],
284 | 'slowmo_factor': ["", "slow-motion factor. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8. 1 if not specified"],
285 | 'pose_model': ["p", "body_with_feet, whole_body_wrist, whole_body, or body. body_with_feet if not specified"],
286 | 'mode': ["m", 'light, balanced, performance, or a """{dictionary within triple quote}""". balanced if not specified. Use a dictionary to specify your own detection and/or pose estimation models (more about in the documentation).'],
287 | 'det_frequency': ["f", "run person detection only every N frames, and inbetween track previously detected bounding boxes. keypoint detection is still run on all frames.\n\
288 | Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. 1 if not specified: detection runs on all frames"],
289 | 'backend': ["", "Backend for pose estimation can be 'auto', 'cpu', 'cuda', 'mps' (for MacOS), or 'rocm' (for AMD GPUs)"],
290 | 'device': ["", "Device for pose estimatino can be 'auto', 'openvino', 'onnxruntime', 'opencv'"],
291 | 'to_meters': ["M", "convert pixels to meters. true if not specified"],
292 | 'make_c3d': ["", "Convert trc to c3d file. true if not specified"],
293 | 'floor_angle': ["", "angle of the floor (degrees). 'auto' if not specified"],
294 | 'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
295 | 'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
296 | 'save_calib': ["", "save calibration file. true if not specified"],
297 | 'do_ik': ["", "do inverse kinematics. false if not specified"],
298 | 'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
299 | 'feet_on_floor': ["", "offset marker augmentation results so that feet are at floor level. true if not specified"],
300 | 'use_contacts_muscles': ["", "Use model with contact spheres and muscles. false if not specified"],
301 | 'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
302 | 'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
303 | 'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
304 | 'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
305 | Default: max_age:30, n_init:3, nms_max_overlap:0.8, max_cosine_distance:0.3, nn_budget:200, max_iou_distance:0.8, embedder_gpu: True\n\
306 | More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'],
307 | 'input_size': ["", "width, height. 1280, 720 if not specified. Lower resolution will be faster but less precise"],
308 | 'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
309 | 'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
310 | 'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
311 | 'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
312 | 'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
313 | 'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
314 | 'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
315 | 'fontSize': ["", "font size for angle values. 0.3 if not specified"],
316 | 'flip_left_right': ["", "true or false. Flips angles when the person faces the other side. The person looks to the right if their toe keypoint is to the right of their heel. Set it to false if the person is sprinting or if you want timeseries to be continuous even when the participent switches their stance. true if not specified"],
317 | 'correct_segment_angles_with_floor_angle': ["", "true or false. If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead. True if not specified"],
318 | 'interpolate': ["", "interpolate missing data. true if not specified"],
319 | 'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
320 | 'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
321 | 'filter': ["", "filter results. true if not specified"],
322 | 'filter_type': ["", "butterworth, gaussian, median, or loess. butterworth if not specified"],
323 | 'order': ["", "order of the Butterworth filter. 4 if not specified"],
324 | 'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 3 if not specified"],
325 | 'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
326 | 'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
327 | 'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
328 | 'osim_setup_path': ["", "path to OpenSim setup. '../OpenSim_setup' if not specified"],
329 | 'right_left_symmetry': ["", "right left symmetry. true if not specified"],
330 | 'default_height': ["", "default height for scaling. 1.70 if not specified"],
331 | 'remove_individual_scaling_setup': ["", "remove individual scaling setup files generated during scaling. true if not specified"],
332 | 'remove_individual_ik_setup': ["", "remove individual IK setup files generated during IK. true if not specified"],
333 | 'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
334 | 'close_to_zero_speed_m': ["","Sum for all keypoints: about 0.2 m/frame. Defaults to 0.2"],
335 | 'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame. Defaults to 50"],
336 | 'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise and ignored. Defaults to 45"],
337 | 'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
338 | 'use_custom_logging': ["", "use custom logging. false if not specified"]
339 | }
340 |
341 |
342 | ## AUTHORSHIP INFORMATION
343 | __author__ = "David Pagnon"
344 | __copyright__ = "Copyright 2023, Sports2D"
345 | __credits__ = ["David Pagnon"]
346 | __license__ = "BSD 3-Clause License"
347 | __version__ = version("sports2d")
348 | __maintainer__ = "David Pagnon"
349 | __email__ = "contact@david-pagnon.com"
350 | __status__ = "Development"
351 |
352 |
353 | ## FUNCTIONS
354 | def read_config_file(config):
355 | '''
356 | Read configation file.
357 | '''
358 |
359 | config_dict = toml.load(config)
360 | return config_dict
361 |
362 |
363 | def base_params(config_dict):
364 | '''
365 | Retrieve sequence name and frames to be analyzed.
366 | '''
367 |
368 | # video_dir and result_dir
369 | video_dir = config_dict.get('base').get('video_dir')
370 | if video_dir == '': video_dir = Path.cwd()
371 | else: video_dir = Path(video_dir).resolve()
372 |
373 | result_dir = config_dict.get('base').get('result_dir')
374 | if result_dir == '': result_dir = Path.cwd()
375 | else: result_dir = Path(result_dir).resolve()
376 |
377 | # video_files, frame_rates, time_ranges
378 | video_input = config_dict.get('base').get('video_input')
379 | if video_input == "webcam" or video_input == ["webcam"]:
380 | video_files = ['webcam'] # No video files for webcam
381 | frame_rates = [None] # No frame rate for webcam
382 | time_ranges = [None]
383 | else:
384 | # video_files
385 | if isinstance(video_input, str):
386 | video_files = [Path(video_input)]
387 | else:
388 | video_files = [Path(v) for v in video_input]
389 |
390 | # frame_rates
391 | frame_rates = []
392 | for video_file in video_files:
393 | video = cv2.VideoCapture(str(video_dir / video_file)) if video_dir else cv2.VideoCapture(str(video_file))
394 | if not video.isOpened():
395 | raise FileNotFoundError(f'Error: Could not open {video_dir/video_file}. Check that the file exists.')
396 | frame_rate = round(video.get(cv2.CAP_PROP_FPS))
397 | if frame_rate == 0:
398 | frame_rate = 30
399 | logging.warning(f'Error: Could not retrieve frame rate from {video_dir/video_file}. Defaulting to 30fps.')
400 | frame_rates.append(frame_rate)
401 | video.release()
402 |
403 | # time_ranges
404 | time_ranges = np.array(config_dict.get('base').get('time_range'))
405 | # No time range provided
406 | if time_ranges.shape == (0,):
407 | time_ranges = [None] * len(video_files)
408 | # Same time range for all videos
409 | elif time_ranges.shape == (2,):
410 | time_ranges = [time_ranges.tolist()] * len(video_files)
411 | # Different time ranges for each video in Config file
412 | elif time_ranges.shape == (len(video_files), 2):
413 | time_ranges = time_ranges.tolist()
414 | # Different time ranges for each video in cli arguments
415 | elif time_ranges.shape == (len(video_files)*2,):
416 | time_ranges = time_ranges.reshape(-1,2).tolist()
417 | else:
418 | raise ValueError('time_range must be [] for analysing all frames of all videos, or [start_time, end_time] for analysing all videos from start_time to end_time, or [[start_time1, end_time1], [start_time2, end_time2], ...] for analysing each video for a different time_range.')
419 |
420 | return video_dir, video_files, frame_rates, time_ranges, result_dir
421 |
422 |
423 | def get_leaf_keys(config, prefix=''):
424 | '''
425 | Flatten configuration to map leaf keys to their full path
426 | '''
427 |
428 | leaf_keys = {}
429 | for key, value in config.items():
430 | if isinstance(value, dict):
431 | leaf_keys.update(get_leaf_keys(value, prefix=prefix + key + '.'))
432 | else:
433 | leaf_keys[prefix + key] = value
434 | return leaf_keys
435 |
436 |
437 | def update_nested_dict(config, key_path, value):
438 | '''
439 | Update a nested dictionary based on a key path string like 'base.nb_persons_to_detect'.
440 | '''
441 |
442 | keys = key_path.split('.')
443 | d = config
444 | for key in keys[:-1]:
445 | d = d[key]
446 | d[keys[-1]] = value
447 |
448 |
449 | def set_nested_value(config, flat_key, value):
450 | '''
451 | Update the nested dictionary based on flattened keys
452 | '''
453 |
454 | keys = flat_key.split('.')
455 | d = config
456 | for key in keys[:-1]:
457 | d = d.setdefault(key, {})
458 | d[keys[-1]] = value
459 |
460 |
461 | def str2bool(v):
462 | '''
463 | Convert a string to a boolean value.
464 | '''
465 |
466 | if isinstance(v, bool):
467 | return v
468 | if v.lower() in ('yes', 'true', 't', 'y', '1'):
469 | return True
470 | elif v.lower() in ('no', 'false', 'f', 'n', '0'):
471 | return False
472 | else:
473 | raise argparse.ArgumentTypeError('Boolean value expected.')
474 |
475 |
476 | def process(config='Config_demo.toml'):
477 | '''
478 | Read video or webcam input
479 | Compute 2D pose with RTMPose
480 | Compute joint and segment angles
481 | Optionally interpolate missing data, filter them, and display figures
482 | Save image and video results, save pose as trc files, save angles as csv files
483 | '''
484 |
485 | from Sports2D.process import process_fun
486 |
487 | if type(config) == dict:
488 | config_dict = config
489 | else:
490 | config_dict = read_config_file(config)
491 | video_dir, video_files, frame_rates, time_ranges, result_dir = base_params(config_dict)
492 | use_custom_logging = config_dict.get('logging').get('use_custom_logging')
493 |
494 | result_dir.mkdir(parents=True, exist_ok=True)
495 | if not use_custom_logging:
496 | with open(result_dir / 'logs.txt', 'a+') as log_f: pass
497 | logging.basicConfig(format='%(message)s', level=logging.INFO, force=True,
498 | handlers = [logging.handlers.TimedRotatingFileHandler(result_dir / 'logs.txt', when='D', interval=7), logging.StreamHandler()])
499 |
500 | for video_file, time_range, frame_rate in zip(video_files, time_ranges, frame_rates):
501 | currentDateAndTime = datetime.now()
502 | time_range_str = f' from {time_range[0]} to {time_range[1]} seconds' if time_range else ''
503 |
504 | logging.info("\n\n---------------------------------------------------------------------")
505 | logging.info(f"Processing {video_file}{time_range_str}")
506 | logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}")
507 | logging.info("---------------------------------------------------------------------")
508 |
509 | process_fun(config_dict, video_file, time_range, frame_rate, result_dir)
510 |
511 | elapsed_time = (datetime.now() - currentDateAndTime).total_seconds()
512 | logging.info(f'\nProcessing {video_file} took {elapsed_time:.2f} s.')
513 |
514 | logging.shutdown()
515 |
516 |
517 | def main():
518 | '''
519 | Use sports2d to compute your athlete's pose, joint, and segment angles
520 |
521 | Help:
522 | See https://github.com/davidpagnon/Sports2D
523 | Or run: sports2d --help
524 | Or check: https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml
525 |
526 | Usage:
527 | - Run on Demo video with default parameters:
528 | sports2d
529 | - Run on custom video with default parameters:
530 | sports2d --video_input path_to_video.mp4
531 | - Run on multiple videos with default parameters:
532 | sports2d --video_input path_to_video1.mp4 path_to_video2.mp4
533 | - Run on webcam with default parameters:
534 | sports2d --video_input webcam
535 | - Run with custom parameters (all non specified are set to default):
536 | sports2d --show_plots False --time_range 0 2.1 --result_dir path_to_result_dir
537 | sports2d --mode lightweight --det_frequency 50
538 | - Run with a toml configuration file:
539 | sports2d --config path_to_config.toml
540 | '''
541 |
542 | # Dynamically add arguments for each leaf key in the DEFAULT_CONFIG
543 | parser = argparse.ArgumentParser(description="Use sports2d to compute your athlete's pose, joint, and segment angles. See https://github.com/davidpagnon/Sports2D")
544 | parser.add_argument('-C', '--config', type=str, required=False, help='Path to a toml configuration file')
545 |
546 | leaf_keys = get_leaf_keys(DEFAULT_CONFIG)
547 | leaf_keys = {k.split('.')[-1]:v for k,v in leaf_keys.items()}
548 | for leaf_name in list(CONFIG_HELP.keys())[1:]:
549 | short_key = CONFIG_HELP[leaf_name][0]
550 | arg_str = [f'-{short_key}', f'--{leaf_name}'] if short_key else [f'--{leaf_name}']
551 | # Arg is bool
552 | if type(leaf_keys[leaf_name]) == bool:
553 | parser.add_argument(*arg_str, type=str2bool, help=CONFIG_HELP[leaf_name][1])
554 | # Arg is list of floats or others
555 | elif type(leaf_keys[leaf_name]) == list:
556 | if len(leaf_keys[leaf_name])==0:
557 | list_type = float # time_range=[] for example
558 | else:
559 | list_type = type(leaf_keys[leaf_name][0])
560 | parser.add_argument(*arg_str, type=list_type, nargs='*', help=CONFIG_HELP[leaf_name][1])
561 | # Arg is int, float, str
562 | else:
563 | parser.add_argument(*arg_str, type=type(leaf_keys[leaf_name]), help=CONFIG_HELP[leaf_name][1])
564 | args = parser.parse_args()
565 |
566 | # If config.toml file is provided, load it, else, use default config
567 | if args.config:
568 | new_config = toml.load(args.config)
569 | else:
570 | new_config = DEFAULT_CONFIG.copy()
571 | if not args.video_input:
572 | new_config.get('base').update({'video_dir': Path(__file__).resolve().parent / 'Demo'})
573 |
574 | # Override dictionary with command-line arguments if provided
575 | leaf_keys = get_leaf_keys(new_config)
576 | for leaf_key, default_value in leaf_keys.items():
577 | if not 'CUSTOM' in leaf_key:
578 | leaf_name = leaf_key.split('.')[-1]
579 | cli_value = getattr(args, leaf_name)
580 | if cli_value is not None:
581 | set_nested_value(new_config, leaf_key, cli_value)
582 |
583 | # Run process with the new configuration dictionary
584 | Sports2D.process(new_config)
585 |
586 |
587 | if __name__ == "__main__":
588 | main()
589 |
--------------------------------------------------------------------------------
/Sports2D/Utilities/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import sys
5 | from importlib.metadata import version
6 |
7 | __version__ = version("sports2d")
8 | VERSION = __version__
--------------------------------------------------------------------------------
/Sports2D/Utilities/common.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 |
5 | '''
6 | ##################################################
7 | ## Common classes and functions ##
8 | ##################################################
9 |
10 | - A class for displaying several matplotlib figures in tabs.
11 | - A function for interpolating sequences with missing data.
12 | It does not interpolate sequences of more than N contiguous missing data.
13 |
14 | '''
15 |
16 |
17 | ## INIT
18 | from importlib.metadata import version
19 | import subprocess
20 | from pathlib import Path
21 | import logging
22 | from collections import defaultdict
23 | import numpy as np
24 | import imageio_ffmpeg as ffmpeg
25 |
26 |
27 | ## AUTHORSHIP INFORMATION
28 | __author__ = "David Pagnon"
29 | __copyright__ = "Copyright 2023, Sports2D"
30 | __credits__ = ["David Pagnon"]
31 | __license__ = "BSD 3-Clause License"
32 | __version__ = version("sports2d")
33 | __maintainer__ = "David Pagnon"
34 | __email__ = "contact@david-pagnon.com"
35 | __status__ = "Development"
36 |
37 |
38 | ## CONSTANTS
39 | angle_dict = { # lowercase!
40 | # joint angles
41 | 'right ankle': [['RKnee', 'RAnkle', 'RBigToe', 'RHeel'], 'dorsiflexion', 90, 1],
42 | 'left ankle': [['LKnee', 'LAnkle', 'LBigToe', 'LHeel'], 'dorsiflexion', 90, 1],
43 | 'right knee': [['RAnkle', 'RKnee', 'RHip'], 'flexion', -180, 1],
44 | 'left knee': [['LAnkle', 'LKnee', 'LHip'], 'flexion', -180, 1],
45 | 'right hip': [['RKnee', 'RHip', 'Hip', 'Neck'], 'flexion', 0, -1],
46 | 'left hip': [['LKnee', 'LHip', 'Hip', 'Neck'], 'flexion', 0, -1],
47 | # 'lumbar': [['Neck', 'Hip', 'RHip', 'LHip'], 'flexion', -180, -1],
48 | # 'neck': [['Head', 'Neck', 'RShoulder', 'LShoulder'], 'flexion', -180, -1],
49 | 'right shoulder': [['RElbow', 'RShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
50 | 'left shoulder': [['LElbow', 'LShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
51 | 'right elbow': [['RWrist', 'RElbow', 'RShoulder'], 'flexion', 180, -1],
52 | 'left elbow': [['LWrist', 'LElbow', 'LShoulder'], 'flexion', 180, -1],
53 | 'right wrist': [['RElbow', 'RWrist', 'RIndex'], 'flexion', -180, 1],
54 | 'left wrist': [['LElbow', 'LWrist', 'LIndex'], 'flexion', -180, 1],
55 |
56 | # segment angles
57 | 'right foot': [['RBigToe', 'RHeel'], 'horizontal', 0, -1],
58 | 'left foot': [['LBigToe', 'LHeel'], 'horizontal', 0, -1],
59 | 'right shank': [['RAnkle', 'RKnee'], 'horizontal', 0, -1],
60 | 'left shank': [['LAnkle', 'LKnee'], 'horizontal', 0, -1],
61 | 'right thigh': [['RKnee', 'RHip'], 'horizontal', 0, -1],
62 | 'left thigh': [['LKnee', 'LHip'], 'horizontal', 0, -1],
63 | 'pelvis': [['LHip', 'RHip'], 'horizontal', 0, -1],
64 | 'trunk': [['Neck', 'Hip'], 'horizontal', 0, -1],
65 | 'shoulders': [['LShoulder', 'RShoulder'], 'horizontal', 0, -1],
66 | 'head': [['Head', 'Neck'], 'horizontal', 0, -1],
67 | 'right arm': [['RElbow', 'RShoulder'], 'horizontal', 0, -1],
68 | 'left arm': [['LElbow', 'LShoulder'], 'horizontal', 0, -1],
69 | 'right forearm': [['RWrist', 'RElbow'], 'horizontal', 0, -1],
70 | 'left forearm': [['LWrist', 'LElbow'], 'horizontal', 0, -1],
71 | 'right hand': [['RIndex', 'RWrist'], 'horizontal', 0, -1],
72 | 'left hand': [['LIndex', 'LWrist'], 'horizontal', 0, -1]
73 | }
74 |
75 | marker_Z_positions = {'right':
76 | {"RHip": 0.105, "RKnee": 0.0886, "RAnkle": 0.0972, "RBigToe":0.0766, "RHeel":0.0883, "RSmallToe": 0.1200,
77 | "RShoulder": 0.2016, "RElbow": 0.1613, "RWrist": 0.120, "RThumb": 0.1625, "RIndex": 0.1735, "RPinky": 0.1740, "REye": 0.0311,
78 | "LHip": -0.105, "LKnee": -0.0886, "LAnkle": -0.0972, "LBigToe": -0.0766, "LHeel": -0.0883, "LSmallToe": -0.1200,
79 | "LShoulder": -0.2016, "LElbow": -0.1613, "LWrist": -0.120, "LThumb": -0.1625, "LIndex": -0.1735, "LPinky": -0.1740, "LEye": -0.0311,
80 | "Hip": 0.0, "Neck": 0.0, "Head":0.0, "Nose": 0.0},
81 | 'left':
82 | {"RHip": -0.105, "RKnee": -0.0886, "RAnkle": -0.0972, "RBigToe": -0.0766, "RHeel": -0.0883, "RSmallToe": -0.1200,
83 | "RShoulder": -0.2016, "RElbow": -0.1613, "RWrist": -0.120, "RThumb": -0.1625, "RIndex": -0.1735, "RPinky": -0.1740, "REye": -0.0311,
84 | "LHip": 0.105, "LKnee": 0.0886, "LAnkle": 0.0972, "LBigToe":0.0766, "LHeel":0.0883, "LSmallToe": 0.1200,
85 | "LShoulder": 0.2016, "LElbow": 0.1613, "LWrist": 0.120, "LThumb": 0.1625, "LIndex": 0.1735, "LPinky": 0.1740, "LEye": 0.0311,
86 | "Hip": 0.0, "Neck": 0.0, "Head":0.0, "Nose": 0.0},
87 | 'front': # original knee:0.0179
88 | {"RHip": 0.0301, "RKnee": 0.129, "RAnkle": 0.0230, "RBigToe": 0.2179, "RHeel": -0.0119, "RSmallToe": 0.1804,
89 | "RShoulder": -0.01275, "RElbow": 0.0702, "RWrist": 0.1076, "RThumb": 0.0106, "RIndex": -0.0004, "RPinky": -0.0009, "REye": 0.0702,
90 | "LHip": 0.0301, "LKnee": 0.129, "LAnkle": 0.0230, "LBigToe": 0.2179, "LHeel": -0.0119, "LSmallToe": 0.1804,
91 | "LShoulder": -0.01275, "LElbow": 0.0702, "LWrist": 0.1076, "LThumb": 0.0106, "LIndex": -0.0004, "LPinky": -0.0009, "LEye": 0.0702,
92 | "Hip": 0.0301, "Neck": 0.0008, "Head": 0.0655, "Nose": 0.1076},
93 | 'back':
94 | {"RHip": -0.0301, "RKnee": -0.129, "RAnkle": -0.0230, "RBigToe": -0.2179, "RHeel": 0.0119, "RSmallToe": -0.1804,
95 | "RShoulder": 0.01275, "RElbow": 0.0702, "RWrist": -1076.0002, "RThumb": -0.0106, "RIndex": 0.0004, "RPinky": 0.0009, "REye": -0.0702,
96 | "LHip": -0.0301, "LKnee": -0.129, "LAnkle": -0.0230, "LBigToe": -0.2179, "LHeel": 0.0119, "LSmallToe": -0.1804,
97 | "LShoulder": 0.01275, "LElbow": 0.0702, "LWrist": -0.1076, "LThumb": -0.0106, "LIndex": 0.0004, "LPinky": 0.0009, "LEye": -0.0702,
98 | "Hip": -0.0301, "Neck": -0.0008, "Head": -0.0655, "Nose": -0.1076},
99 | }
100 |
101 | colors = [(255, 0, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (0, 0, 0), (255, 255, 255),
102 | (125, 0, 0), (0, 125, 0), (0, 0, 125), (125, 125, 0), (125, 0, 125), (0, 125, 125),
103 | (255, 125, 125), (125, 255, 125), (125, 125, 255), (255, 255, 125), (255, 125, 255), (125, 255, 255), (125, 125, 125),
104 | (255, 0, 125), (255, 125, 0), (0, 125, 255), (0, 255, 125), (125, 0, 255), (125, 255, 0), (0, 255, 0)]
105 | thickness = 1
106 |
107 | ## FUNCTIONS
108 | def to_dict(d):
109 | '''
110 | Convert a defaultdict to a dict.
111 | '''
112 | if isinstance(d, defaultdict):
113 | return {k: to_dict(v) for k, v in d.items()}
114 | return d
115 |
116 |
117 | def make_homogeneous(list_of_arrays):
118 | '''
119 | Make a list of arrays (or a list of lists) homogeneous by padding with nans
120 |
121 | Example: foo = [[array([nan, 656.02643776]), array([nan, nan])],
122 | [array([1, 2, 3]), array([1, 2])]]
123 | becomes foo_updated = array([[[nan, 656.02643776, nan], [nan, nan, nan]],
124 | [[1., 2., 3.], [1., 2., nan]]])
125 | Or foo = [[1, 2, 3], [1, 2], [3, 4, 5]]
126 | becomes foo_updated = array([[1., 2., 3.], [1., 2., nan], [3., 4., 5.]])
127 |
128 | INPUTS:
129 | - list_of_arrays: list of arrays or list of lists
130 |
131 | OUTPUT:
132 | - np.array(list_of_arrays): numpy array of padded arrays
133 | '''
134 |
135 | def get_max_shape(list_of_arrays):
136 | '''
137 | Recursively determine the maximum shape of a list of arrays.
138 | '''
139 | if isinstance(list_of_arrays[0], list):
140 | # Maximum length at the current level plus the max shape at the next level
141 | return [max(len(arr) for arr in list_of_arrays)] + get_max_shape(
142 | [item for sublist in list_of_arrays for item in sublist])
143 | else:
144 | # Determine the maximum shape across all list_of_arrays at this level
145 | return [len(list_of_arrays)] + [max(arr.shape[i] for arr in list_of_arrays if arr.size > 0) for i in range(list_of_arrays[0].ndim)]
146 |
147 | def pad_with_nans(list_of_arrays, target_shape):
148 | '''
149 | Recursively pad list_of_arrays with nans to match the target shape.
150 | '''
151 | if isinstance(list_of_arrays, np.ndarray):
152 | # Pad the current array to the target shape
153 | pad_width = []
154 | for dim_index in range(0, len(target_shape)):
155 | if dim_index == len(list_of_arrays.shape) or dim_index > len(list_of_arrays.shape):
156 | list_of_arrays = np.expand_dims(list_of_arrays, 0)
157 | for dim_index in range(0, len(target_shape)):
158 | max_dim = target_shape[dim_index]
159 | curr_dim = list_of_arrays.shape[dim_index]
160 | pad_width.append((0, max_dim - curr_dim))
161 | return np.pad(list_of_arrays.astype(float), pad_width, constant_values=np.nan)
162 | # Recursively pad each array in the list
163 | return [pad_with_nans(array, target_shape[1:]) for array in list_of_arrays]
164 |
165 | # Pad all missing dimensions of arrays with nans
166 | list_of_arrays = [np.array(arr, dtype=float) if not isinstance(arr, np.ndarray) else arr for arr in list_of_arrays]
167 | max_shape = get_max_shape(list_of_arrays)
168 | list_of_arrays = pad_with_nans(list_of_arrays, max_shape)
169 |
170 | return np.array(list_of_arrays)
171 |
172 |
173 | def get_start_time_ffmpeg(video_path):
174 | '''
175 | Get the start time of a video using FFmpeg.
176 | '''
177 |
178 | try:
179 | ffmpeg_path = ffmpeg.get_ffmpeg_exe()
180 | except Exception as e:
181 | logging.warning(f"No ffmpeg exe could be found. Starting time set to 0.0. Error: {e}")
182 | return 0.0
183 |
184 | cmd = [ffmpeg_path, "-i", video_path]
185 | result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL, text=True)
186 | for line in result.stderr.splitlines():
187 | if "start:" in line:
188 | parts = line.split("start:")
189 | if len(parts) > 1:
190 | start_time = parts[1].split(",")[0].strip()
191 | return float(start_time)
192 | return 0.0 # Default to 0 if not found
193 |
194 |
195 | def resample_video(vid_output_path, fps, desired_framerate):
196 | '''
197 | Resample video to the desired fps using ffmpeg.
198 | '''
199 |
200 | ffmpeg_path = ffmpeg.get_ffmpeg_exe()
201 | new_vid_path = vid_output_path.parent / Path(vid_output_path.stem+'_2'+vid_output_path.suffix)
202 | subprocess.run([ffmpeg_path, '-i', vid_output_path, '-filter:v', f'setpts={fps/desired_framerate}*PTS', '-r', str(desired_framerate), new_vid_path])
203 | vid_output_path.unlink()
204 | new_vid_path.rename(vid_output_path)
205 |
206 |
207 | def write_calibration(calib_params, toml_path):
208 | '''
209 | Write calibration file from calibration parameters
210 | '''
211 |
212 | S, D, N, K, R, T, P = calib_params
213 | with open(toml_path, 'w+') as cal_f:
214 | for c in range(len(S)):
215 | cam_str = f'[{N[c]}]\n'
216 | name_str = f'name = "{N[c]}"\n'
217 | size_str = f'size = {S[c]} \n'
218 | mat_str = f'matrix = {K[c]} \n'
219 | dist_str = f'distortions = {D[c]} \n'
220 | rot_str = f'rotation = {R[c]} \n'
221 | tran_str = f'translation = {T[c]} \n'
222 | fish_str = f'fisheye = false\n\n'
223 | cal_f.write(cam_str + name_str + size_str + mat_str + dist_str + rot_str + tran_str + fish_str)
224 | meta = '[metadata]\nadjusted = false\nerror = 0.0\n'
225 | cal_f.write(meta)
226 |
--------------------------------------------------------------------------------
/Sports2D/Utilities/filter.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 |
5 | '''
6 | ##################################################
7 | ## Filter TRC files ##
8 | ##################################################
9 |
10 | Filters pandans columns or numpy arrays.
11 | Available filters: Butterworth, Gaussian, LOESS, Median.
12 |
13 | Usage:
14 | col_filtered = filter1d(col, *filter_options)
15 | filter_options = (do_filter, filter_type, butterworth_filter_order, butterworth_filter_cutoff, frame_rate, gaussian_filter_kernel, loess_filter_kernel, median_filter_kernel)
16 | bool str int int int int int int
17 |
18 | '''
19 |
20 |
21 | ## INIT
22 | from importlib.metadata import version
23 | import numpy as np
24 | from scipy import signal
25 | from scipy.ndimage import gaussian_filter1d
26 | from statsmodels.nonparametric.smoothers_lowess import lowess
27 |
28 |
29 | ## AUTHORSHIP INFORMATION
30 | __author__ = "David Pagnon"
31 | __copyright__ = "Copyright 2021, Pose2Sim"
32 | __credits__ = ["David Pagnon"]
33 | __license__ = "BSD 3-Clause License"
34 | __version__ = version("sports2d")
35 | __maintainer__ = "David Pagnon"
36 | __email__ = "contact@david-pagnon.com"
37 | __status__ = "Development"
38 |
39 |
40 |
41 | ## FUNCTIONS
42 | def butterworth_filter_1d(col, args):
43 | '''
44 | 1D Zero-phase Butterworth filter (dual pass)
45 | Deals with nans
46 |
47 | INPUT:
48 | - col: numpy array
49 | - order: int
50 | - cutoff: int
51 | - framerate: int
52 |
53 | OUTPUT
54 | - col_filtered: Filtered pandas dataframe column
55 | '''
56 |
57 | order, cutoff, framerate = args
58 |
59 | # Filter
60 | b, a = signal.butter(order/2, cutoff/(framerate/2), 'low', analog = False)
61 | padlen = 3 * max(len(a), len(b))
62 |
63 | # split into sequences of not nans
64 | col_filtered = col.copy()
65 | mask = np.isnan(col_filtered) | col_filtered.eq(0)
66 | falsemask_indices = np.where(~mask)[0]
67 | gaps = np.where(np.diff(falsemask_indices) > 1)[0] + 1
68 | idx_sequences = np.split(falsemask_indices, gaps)
69 | if idx_sequences[0].size > 0:
70 | idx_sequences_to_filter = [seq for seq in idx_sequences if len(seq) > padlen]
71 |
72 | # Filter each of the selected sequences
73 | for seq_f in idx_sequences_to_filter:
74 | col_filtered[seq_f] = signal.filtfilt(b, a, col_filtered[seq_f])
75 |
76 | return col_filtered
77 |
78 |
79 | def gaussian_filter_1d(col, kernel):
80 | '''
81 | 1D Gaussian filter
82 |
83 | INPUT:
84 | - col: numpy array
85 | - kernel: Sigma kernel value (int)
86 |
87 | OUTPUT
88 | - col_filtered: Filtered pandas dataframe column
89 | '''
90 |
91 | col_filtered = gaussian_filter1d(col, kernel)
92 |
93 | return col_filtered
94 |
95 |
96 | def loess_filter_1d(col, kernel):
97 | '''
98 | 1D LOWESS filter (Locally Weighted Scatterplot Smoothing)
99 |
100 | INPUT:
101 | - col: numpy array
102 | - kernel: Kernel value: window length used for smoothing (int)
103 | NB: frac = kernel / frames_number
104 |
105 | OUTPUT
106 | - col_filtered: Filtered pandas dataframe column
107 | '''
108 |
109 | # split into sequences of not nans
110 | col_filtered = col.copy()
111 | mask = np.isnan(col_filtered)
112 | falsemask_indices = np.where(~mask)[0]
113 | gaps = np.where(np.diff(falsemask_indices) > 1)[0] + 1
114 | idx_sequences = np.split(falsemask_indices, gaps)
115 | if idx_sequences[0].size > 0:
116 | idx_sequences_to_filter = [seq for seq in idx_sequences if len(seq) > kernel]
117 |
118 | # Filter each of the selected sequences
119 | for seq_f in idx_sequences_to_filter:
120 | col_filtered[seq_f] = lowess(col_filtered[seq_f], seq_f, is_sorted=True, frac=kernel/len(seq_f), it=0)[:,1]
121 |
122 | return col_filtered
123 |
124 |
125 | def median_filter_1d(col, kernel):
126 | '''
127 | 1D median filter
128 |
129 | INPUT:
130 | - col: numpy array
131 | - kernel: window size (int)
132 |
133 | OUTPUT
134 | - col_filtered: Filtered pandas dataframe column
135 | '''
136 |
137 | col_filtered = signal.medfilt(col, kernel_size=kernel)
138 |
139 | return col_filtered
140 |
141 |
142 | def filter1d(col, *filter_options):
143 | '''
144 | Choose filter type and filter column
145 |
146 | INPUT:
147 | - col: Pandas dataframe column
148 | - filter_options = (do_filter, filter_type, butterworth_filter_order, butterworth_filter_cutoff, frame_rate, gaussian_filter_kernel, loess_filter_kernel, median_filter_kernel)
149 |
150 | OUTPUT
151 | - col_filtered: Filtered pandas dataframe column
152 | '''
153 |
154 | filter_type = filter_options[1]
155 | if filter_type == 'butterworth':
156 | args = (filter_options[2], filter_options[3], filter_options[4])
157 | if filter_type == 'gaussian':
158 | args = (filter_options[5])
159 | if filter_type == 'loess':
160 | args = (filter_options[6])
161 | if filter_type == 'median':
162 | args = (filter_options[7])
163 |
164 | # Choose filter
165 | filter_mapping = {
166 | 'butterworth': butterworth_filter_1d,
167 | 'gaussian': gaussian_filter_1d,
168 | 'loess': loess_filter_1d,
169 | 'median': median_filter_1d
170 | }
171 | filter_fun = filter_mapping[filter_type]
172 |
173 | # Filter column
174 | col_filtered = filter_fun(col, args)
175 |
176 | return col_filtered
177 |
--------------------------------------------------------------------------------
/Sports2D/Utilities/tests.py:
--------------------------------------------------------------------------------
1 | '''
2 | ########################################
3 | ## Sports2D tests ##
4 | ########################################
5 |
6 | Check whether Sports2D still works after each code modification.
7 | Disable the real-time results and plots to avoid any GUI issues.
8 |
9 | Usage:
10 | tests_sports2d
11 | OR
12 | python tests.py
13 | '''
14 |
15 | ## INIT
16 | from importlib.metadata import version
17 | import os
18 | import toml
19 | import subprocess
20 | from pathlib import Path
21 |
22 |
23 | ## AUTHORSHIP INFORMATION
24 | __author__ = "David Pagnon"
25 | __copyright__ = "Copyright 2023, Sports2D"
26 | __credits__ = ["David Pagnon"]
27 | __license__ = "BSD 3-Clause License"
28 | __version__ = version("sports2d")
29 | __maintainer__ = "David Pagnon"
30 | __email__ = "contact@david-pagnon.com"
31 | __status__ = "Development"
32 |
33 |
34 | ## FUNCTIONS
35 | def test_workflow():
36 | '''
37 | Test the workflow of Sports2D.
38 | '''
39 |
40 | root_dir = os.path.dirname(os.path.abspath(__file__))
41 | os.chdir(root_dir)
42 |
43 | #############################
44 | ## From Python ##
45 | #############################
46 |
47 | # Default
48 | config_path = Path(__file__).resolve().parent.parent / 'Demo' / 'Config_demo.toml'
49 | config_dict = toml.load(config_path)
50 | video_dir = Path(__file__).resolve().parent.parent / 'Demo'
51 | config_dict.get("base").update({"video_dir": str(video_dir)})
52 | config_dict.get("base").update({"person_ordering_method": "highest_likelihood"})
53 | config_dict.get("base").update({"show_realtime_results":False})
54 | config_dict.get("post-processing").update({"show_graphs":False})
55 |
56 | from Sports2D import Sports2D
57 | Sports2D.process(config_dict)
58 |
59 |
60 | #############################
61 | ## From command line (CLI) ##
62 | #############################
63 |
64 | # Default
65 | demo_cmd = ["sports2d", "--person_ordering_method", "highest_likelihood", "--show_realtime_results", "False", "--show_graphs", "False"]
66 | subprocess.run(demo_cmd, check=True, capture_output=True, text=True, encoding='utf-8')
67 |
68 | # With loading a trc file, visible_side 'front', first_person_height '1.76", floor_angle 0, xy_origin [0, 928]
69 | demo_cmd2 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
70 | "--load_trc_px", os.path.join(root_dir, "demo_Sports2D", "demo_Sports2D_px_person01.trc"),
71 | "--visible_side", "front", "--first_person_height", "1.76", "--time_range", "1.2", "2.7",
72 | "--floor_angle", "0", "--xy_origin", "0", "928"]
73 | subprocess.run(demo_cmd2, check=True, capture_output=True, text=True, encoding='utf-8')
74 |
75 | # With no pixels to meters conversion, one person to select, lightweight mode, detection frequency, slowmo factor, gaussian filter, RTMO body pose model
76 | demo_cmd3 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
77 | "--to_meters", "False",
78 | "--nb_persons_to_detect", "1", "--person_ordering_method", "greatest_displacement",
79 | "--mode", "lightweight", "--det_frequency", "50",
80 | "--slowmo_factor", "4",
81 | "--filter_type", "gaussian",
82 | "--pose_model", "body", "--mode", """{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}"""]
83 | subprocess.run(demo_cmd3, check=True, capture_output=True, text=True, encoding='utf-8')
84 |
85 | # With a time range, inverse kinematics, marker augmentation, body pose_model and custom RTMO mode
86 | demo_cmd4 = ["sports2d", "--person_ordering_method", "greatest_displacement", "--show_realtime_results", "False", "--show_graphs", "False",
87 | "--time_range", "1.2", "2.7",
88 | "--do_ik", "True", "--use_augmentation", "True",
89 | "--nb_persons_to_detect", "all", "--first_person_height", "1.65",
90 | "--visible_side", "auto", "front", "--participant_mass", "55.0", "67.0"]
91 | subprocess.run(demo_cmd4, check=True, capture_output=True, text=True, encoding='utf-8')
92 |
93 | # From config file
94 | config_path = Path(__file__).resolve().parent.parent / 'Demo' / 'Config_demo.toml'
95 | config_dict = toml.load(config_path)
96 | video_dir = Path(__file__).resolve().parent.parent / 'Demo'
97 | config_dict.get("base").update({"video_dir": str(video_dir)})
98 | config_dict.get("base").update({"person_ordering_method": "highest_likelihood"})
99 | with open(config_path, 'w') as f: toml.dump(config_dict, f)
100 | demo_cmd4 = ["sports2d", "--config", str(config_path), "--show_realtime_results", "False", "--show_graphs", "False"]
101 | subprocess.run(demo_cmd4, check=True, capture_output=True, text=True, encoding='utf-8')
102 |
103 |
104 | if __name__ == "__main__":
105 | test_workflow()
--------------------------------------------------------------------------------
/Sports2D/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import sys
5 | from importlib.metadata import version
6 |
7 | __version__ = version("sports2d")
8 | VERSION = __version__
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=45", "wheel", "setuptools-scm"] # PEP 508 specs
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "sports2d"
7 | dynamic = ["version"] # Generate version from git tags
8 | authors = [
9 | {name = "David Pagnon", email = "contact@david-pagnon.com"},
10 | ]
11 | maintainers = [
12 | {name = "David Pagnon", email = "contact@david-pagnon.com"},
13 | ]
14 | description = "Compute 2D human pose and angles from a video or a webcam."
15 | readme = "README.md"
16 | requires-python = ">=3.9"
17 | keywords = ["markerless", "kinematics", "OpenPose", "OpenSim", "Mocap", "biomechanics", "sports coaching", "pose estimation", "joint angles", "sports analytics", "kinovea"]
18 | license = "BSD-3-Clause"
19 | license-files = ["LICENSE"]
20 | classifiers = [
21 | "Programming Language :: Python :: 3",
22 | "Development Status :: 3 - Alpha",
23 | "Intended Audience :: Science/Research",
24 | "Intended Audience :: Healthcare Industry",
25 | "Intended Audience :: Education",
26 | # "License :: OSI Approved :: BSD-3-Clause",
27 | "Operating System :: OS Independent",
28 | "Topic :: Scientific/Engineering",
29 | "Topic :: Scientific/Engineering :: Image Processing",
30 | "Topic :: Scientific/Engineering :: Medical Science Apps.",
31 | "Topic :: Multimedia :: Graphics",
32 | "Topic :: Multimedia :: Graphics :: 3D Modeling",
33 | ]
34 | urls = {Homepage = "https://github.com/davidpagnon/Sports2D", "Bug Tracker" = "https://github.com/davidpagnon/Sports2D/issues"}
35 | dependencies = [
36 | "toml",
37 | "numpy>=1.19",
38 | "matplotlib",
39 | "PyQt5",
40 | "tqdm",
41 | "anytree",
42 | "pandas>=1.5",
43 | "scipy",
44 | "statsmodels",
45 | "ipython",
46 | "c3d",
47 | "rtmlib",
48 | "openvino",
49 | "opencv-python",
50 | "imageio_ffmpeg",
51 | "deep-sort-realtime",
52 | "Pose2Sim"
53 | ]
54 |
55 | [tool.setuptools_scm]
56 |
57 | [tool.setuptools]
58 | packages = {find = {}}
59 |
60 | [tool.setuptools.package-data]
61 | "*" = ["Demo/**/*", "Demo/**/**/*"]
62 |
63 | [project.scripts]
64 | sports2d = "Sports2D.Sports2D:main"
65 | tests_sports2d = "Sports2D.Utilities.tests:test_workflow"
66 |
--------------------------------------------------------------------------------