├── .github └── workflows │ ├── dart.yaml │ ├── js.yaml │ ├── pypi-publish.yaml │ └── python.yaml ├── .gitignore ├── .gitmodules ├── .readthedocs.yaml ├── LICENSE.txt ├── Makefile ├── README.md ├── docs ├── Makefile ├── README.md ├── _static │ └── nav.css ├── conf.py ├── examples.rst ├── index.rst ├── js_docs.rst ├── json.rst ├── make.bat ├── modules.rst ├── pose_format.numpy.pose_body.rst ├── pose_format.numpy.representation.distance.rst ├── pose_format.numpy.representation.distance_test.rst ├── pose_format.numpy.representation.rst ├── pose_format.numpy.rst ├── pose_format.pose.rst ├── pose_format.pose_body.rst ├── pose_format.pose_header.rst ├── pose_format.pose_representation.rst ├── pose_format.pose_test.rst ├── pose_format.pose_visualizer.rst ├── pose_format.rst ├── pose_format.tensorflow.masked.rst ├── pose_format.tensorflow.masked.tensor.rst ├── pose_format.tensorflow.masked.tensor_graph_mode_test.rst ├── pose_format.tensorflow.masked.tensor_test.rst ├── pose_format.tensorflow.masked.tensorflow.rst ├── pose_format.tensorflow.masked.tensorflow_test.rst ├── pose_format.tensorflow.pose_body.rst ├── pose_format.tensorflow.pose_body_test.rst ├── pose_format.tensorflow.pose_representation.rst ├── pose_format.tensorflow.pose_representation_test.rst ├── pose_format.tensorflow.representation.angle.rst ├── pose_format.tensorflow.representation.angle_test.rst ├── pose_format.tensorflow.representation.distance.rst ├── pose_format.tensorflow.representation.distance_test.rst ├── pose_format.tensorflow.representation.inner_angle.rst ├── pose_format.tensorflow.representation.inner_angle_test.rst ├── pose_format.tensorflow.representation.point_line_distance.rst ├── pose_format.tensorflow.representation.point_line_distance_test.rst ├── pose_format.tensorflow.representation.rst ├── pose_format.tensorflow.rst ├── pose_format.testing.rst ├── pose_format.third_party.py.rst ├── pose_format.third_party.rst ├── pose_format.torch.masked.rst ├── pose_format.torch.masked.tensor.rst ├── pose_format.torch.masked.torch.rst ├── pose_format.torch.masked.torch_test.rst ├── pose_format.torch.pose_body.rst ├── pose_format.torch.pose_representation.rst ├── pose_format.torch.representation.angle.rst ├── pose_format.torch.representation.angle_test.rst ├── pose_format.torch.representation.distance.rst ├── pose_format.torch.representation.distance_test.rst ├── pose_format.torch.representation.inner_angle.rst ├── pose_format.torch.representation.inner_angle_test.rst ├── pose_format.torch.representation.point_line_distance.rst ├── pose_format.torch.representation.point_line_distance_test.rst ├── pose_format.torch.representation.points.rst ├── pose_format.torch.representation.rst ├── pose_format.torch.rst ├── pose_format.utils.fast_math.rst ├── pose_format.utils.holistic.rst ├── pose_format.utils.normalization_3d.rst ├── pose_format.utils.openpose.rst ├── pose_format.utils.openpose_135.rst ├── pose_format.utils.openpose_test.rst ├── pose_format.utils.optical_flow.rst ├── pose_format.utils.pose_converter.rst ├── pose_format.utils.reader.rst ├── pose_format.utils.reader_test.rst ├── pose_format.utils.rst ├── pose_format.utils.siren.rst ├── pose_format_js.rst ├── pose_viewer_example.rst ├── pose_viewer_files.rst ├── pose_viewer_js.rst ├── pose_viewer_src.rst ├── readme.rst ├── references.bib ├── requirements.txt ├── specs │ ├── v0.0.md │ ├── v0.1.md │ └── v0.2.md ├── specs_v01.rst ├── src.rst ├── test_data.rst ├── tests.rst ├── tests_hand_normalization_test.rst ├── tests_optical_flow.rst ├── tests_pose.rst ├── tests_pose_tf.rst ├── tests_subs.rst └── utils │ └── plantuml.jar ├── examples ├── __init__.py └── pose_to_siren_to_pose.py └── src ├── js ├── pose_format │ ├── .gitignore │ ├── README.md │ ├── package-lock.json │ ├── package.json │ ├── src │ │ ├── index.ts │ │ ├── parser.ts │ │ └── types.d.ts │ ├── tsconfig.json │ └── tslint.json └── pose_viewer │ ├── .editorconfig │ ├── .gitignore │ ├── .prettierrc.json │ ├── LICENSE │ ├── README.md │ ├── example │ ├── example.pose │ ├── example.zip │ └── index.html │ ├── package-lock.json │ ├── package.json │ ├── src │ ├── components.d.ts │ ├── components │ │ └── pose-viewer │ │ │ ├── pose-viewer.css │ │ │ ├── pose-viewer.tsx │ │ │ ├── readme.md │ │ │ └── renderers │ │ │ ├── canvas.pose-renderer.tsx │ │ │ ├── interactive.pose-renderer.tsx │ │ │ ├── pose-renderer.tsx │ │ │ └── svg.pose-renderer.tsx │ ├── index.html │ ├── index.ts │ └── sample-data │ │ └── example.pose │ ├── stencil.config.ts │ └── tsconfig.json └── python ├── .gitignore ├── ComfyUI-Pose-Format ├── __init__.py └── nodes.py ├── README.md ├── pose_format ├── BUILD ├── WORKSPACE ├── __init__.py ├── bin │ ├── __init__.py │ ├── directory.py │ ├── pose_estimation.py │ ├── pose_info.py │ └── pose_visualizer.py ├── numpy │ ├── BUILD │ ├── __init__.py │ ├── pose_body.py │ └── representation │ │ ├── __init__.py │ │ ├── distance.py │ │ └── distance_test.py ├── pose.py ├── pose_body.py ├── pose_header.py ├── pose_representation.py ├── pose_visualizer.py ├── tensorflow │ ├── BUILD │ ├── __init__.py │ ├── masked │ │ ├── BUILD │ │ ├── __init__.py │ │ ├── tensor.py │ │ ├── tensor_graph_mode_test.py │ │ ├── tensor_test.py │ │ ├── tensorflow.py │ │ └── tensorflow_test.py │ ├── pose_body.py │ ├── pose_body_test.py │ ├── pose_representation.py │ ├── pose_representation_test.py │ └── representation │ │ ├── BUILD │ │ ├── __init__.py │ │ ├── angle.py │ │ ├── angle_test.py │ │ ├── distance.py │ │ ├── distance_test.py │ │ ├── inner_angle.py │ │ ├── inner_angle_test.py │ │ ├── point_line_distance.py │ │ └── point_line_distance_test.py ├── testing │ ├── __init__.py │ └── pybase │ │ └── BUILD ├── third_party │ ├── OpenCVX │ │ └── BUILD │ ├── __init__.py │ └── py │ │ ├── __init__.py │ │ ├── dataclasses │ │ └── BUILD │ │ ├── numpy │ │ └── BUILD │ │ ├── scipy │ │ └── BUILD │ │ ├── tensorflow │ │ └── BUILD │ │ ├── torch │ │ └── BUILD │ │ └── tqdm │ │ └── BUILD ├── torch │ ├── BUILD │ ├── __init__.py │ ├── masked │ │ ├── BUILD │ │ ├── __init__.py │ │ ├── collator.py │ │ ├── tensor.py │ │ ├── torch.py │ │ └── torch_test.py │ ├── pose_body.py │ ├── pose_representation.py │ └── representation │ │ ├── BUILD │ │ ├── __init__.py │ │ ├── angle.py │ │ ├── angle_test.py │ │ ├── distance.py │ │ ├── distance_test.py │ │ ├── inner_angle.py │ │ ├── inner_angle_test.py │ │ ├── point_line_distance.py │ │ ├── point_line_distance_test.py │ │ └── points.py └── utils │ ├── BUILD │ ├── __init__.py │ ├── conftest.py │ ├── fast_math.py │ ├── generic.py │ ├── generic_test.py │ ├── holistic.py │ ├── normalization_3d.py │ ├── openpose.py │ ├── openpose_135.py │ ├── openpose_test.py │ ├── optical_flow.py │ ├── pose_converter.py │ ├── reader.py │ ├── reader_test.py │ └── siren.py ├── pyproject.toml └── tests ├── __init__.py ├── data ├── mediapipe.pose ├── mediapipe_hand_normalized.pose ├── mediapipe_long.pose ├── mediapipe_long_hand_normalized.pose ├── openpose.pose └── optical_flow.png ├── hand_normalization_test.py ├── optical_flow_test.py ├── pose_test.py ├── pose_tf_graph_mode_test.py └── visualization_test.py /.github/workflows/dart.yaml: -------------------------------------------------------------------------------- 1 | name: Dart Tests 2 | 3 | on: 4 | pull_request: 5 | branches: [master, main] 6 | push: 7 | branches: [master, main] 8 | 9 | jobs: 10 | build: 11 | name: Run Tests 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | with: 16 | submodules: true 17 | - uses: dart-lang/setup-dart@v1 18 | with: 19 | sdk: 3.6.0 20 | 21 | - name: Install Requirements 22 | working-directory: src/dart 23 | run: dart pub get 24 | 25 | - name: Check formatting 26 | working-directory: src/dart 27 | run: dart format --output=none --set-exit-if-changed . 28 | 29 | - name: Analyze 30 | working-directory: src/dart 31 | run: dart analyze --fatal-infos 32 | 33 | - name: Run tests 34 | working-directory: src/dart 35 | run: dart test 36 | -------------------------------------------------------------------------------- /.github/workflows/js.yaml: -------------------------------------------------------------------------------- 1 | name: JavaScript Tests 2 | 3 | on: 4 | push: 5 | branches: [master, main] 6 | pull_request: 7 | branches: [master, main] 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | 13 | strategy: 14 | matrix: 15 | node-version: [16.x, 18.x] 16 | 17 | steps: 18 | - uses: actions/checkout@v3 19 | - name: Use Node.js ${{ matrix.node-version }} 20 | uses: actions/setup-node@v3 21 | with: 22 | node-version: ${{ matrix.node-version }} 23 | 24 | - name: Install dependencies for pose_format 25 | working-directory: ./src/js/pose_format 26 | run: npm ci 27 | 28 | - name: Run tests for pose_format 29 | working-directory: ./src/js/pose_format 30 | run: npm run build # we can have test command when we have test files 31 | 32 | - name: Install dependencies for pose_viewer 33 | working-directory: ./src/js/pose_viewer 34 | run: npm ci # && npm install --save-dev @types/jest@27.0.3 jest@27.0.3 puppeteer@20 35 | 36 | - name: Run tests for pose_viewer 37 | working-directory: ./src/js/pose_viewer 38 | run: npm run build # we can have test command when we have test files 39 | -------------------------------------------------------------------------------- /.github/workflows/pypi-publish.yaml: -------------------------------------------------------------------------------- 1 | name: Publish Python Package 2 | 3 | on: 4 | release: 5 | types: [ created ] 6 | 7 | jobs: 8 | build-n-publish: 9 | name: Build and publish Python 🐍 distributions 📦 to PyPI 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v3 13 | 14 | - uses: actions/setup-python@v4 15 | with: 16 | python-version: "3.10" 17 | 18 | - name: Extract release version 19 | id: get_version 20 | run: echo "version=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV 21 | 22 | - name: Update version in pyproject.toml 23 | working-directory: src/python 24 | run: | 25 | sed -i 's/^version = .*/version = "${{ env.version }}"/' pyproject.toml 26 | 27 | - name: Commit updated pyproject.toml 28 | run: | 29 | git fetch origin master 30 | git checkout master 31 | git config user.name "GitHub Actions" 32 | git config user.email "actions@github.com" 33 | git add pyproject.toml 34 | git commit -m "Bump version to ${{ env.version }}" 35 | git push origin master 36 | working-directory: src/python 37 | 38 | - name: Install dependencies 39 | working-directory: src/python 40 | run: | 41 | python -m pip install --upgrade pip 42 | pip install build 43 | 44 | - name: Build a binary wheel dist 45 | working-directory: src/python 46 | run: | 47 | rm -rf dist 48 | python -m build 49 | 50 | - name: Quick test of the built dist 51 | run: | 52 | pip install "src/python/dist/$(ls src/python/dist/ | head -1)" 53 | python -c "import pose_format" 54 | 55 | - name: Publish distribution 📦 to PyPI 56 | uses: pypa/gh-action-pypi-publish@release/v1 57 | with: 58 | packages-dir: src/python/dist/ 59 | user: __token__ 60 | password: ${{ secrets.PYPI_API_TOKEN }} 61 | -------------------------------------------------------------------------------- /.github/workflows/python.yaml: -------------------------------------------------------------------------------- 1 | name: Python Tests 2 | 3 | on: 4 | push: 5 | branches: [master, main] 6 | pull_request: 7 | branches: [master, main] 8 | 9 | jobs: 10 | test: 11 | name: Run Tests 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] 16 | fail-fast: false 17 | 18 | steps: 19 | - uses: actions/checkout@v3 20 | - uses: actions/setup-python@v4 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | 24 | - name: Install Requirements 25 | working-directory: src/python 26 | run: pip install .[dev] 27 | 28 | - name: Run tests 29 | working-directory: src/python 30 | run: pytest pose_format 31 | 32 | - name: Run additional tests 33 | working-directory: src/python 34 | run: pytest tests 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .DS_Store 3 | .vscode/ 4 | .coverage 5 | .coveragerc 6 | coverage.lcov -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "examples/pose-pipelines"] 2 | path = examples/pose-pipelines 3 | url = git@github.com:J22Melody/pose-pipelines.git 4 | [submodule "src/dart"] 5 | path = src/dart 6 | url = https://github.com/bipinkrish/pose-dart.git 7 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the OS, Python version and other tools you might need 9 | build: 10 | os: ubuntu-22.04 # OS on Read the Docs servers, not your local machine 11 | tools: 12 | python: "3.8" 13 | # You can also specify other tool versions: 14 | # nodejs: "19" 15 | # rust: "1.64" 16 | # golang: "1.19" 17 | 18 | # Build documentation in the "docs/" directory with Sphinx 19 | sphinx: 20 | configuration: docs/conf.py 21 | 22 | # Optionally build your docs in additional formats such as PDF and ePub 23 | # formats: 24 | # - pdf 25 | # - epub 26 | 27 | # Optional but recommended, declare the Python requirements required 28 | # to build your documentation 29 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 30 | python: 31 | install: 32 | - requirements: docs/requirements.txt -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Amit Moryossef 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | lint: 2 | cd src/python && yapf -i -r . 3 | cd src/python && isort . 4 | cd src/python && pydoclint . 5 | 6 | test: 7 | cd src/python && pytest . -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Sphinx Documentation Builder Guide 2 | 3 | This guide gives instruction on hwo to set up and building Sphinx documentation, especially when `.rst` (reStructuredText) files are already created. 4 | 5 | ## Table of Contents 6 | - [Introduction](#introduction) 7 | - [Requirements](#requirements) 8 | - [Installation](#installation) 9 | - [How to Use the Makefile](#how-to-use-the-makefile) 10 | - [Using Sphinx with Existing `.rst` Files](#using-sphinx-with-existing-rst-files) 11 | - [Editing `.rst` Files](#tips-for-editing-rst-files) 12 | 13 | ## Introduction 14 | Sphinx is a tool for creating intelligent auto-documentation for Python projects. Sphinx uses reStructuredText (`.rst`) files to create documentation. This guide will walk you through the basics of setting up Sphinx, especially when you already have `.rst` files. 15 | 16 | ## Requirements 17 | 18 | - Ensure that [Sphinx](https://www.sphinx-doc.org/) is installed on your system. 19 | - The `sphinx-build` command should either be available in your PATH or the `SPHINXBUILD` environment variable should point to its location. 20 | 21 | ## Installation 22 | 23 | To set up Sphinx and the required extensions for this porject use the following commands: 24 | 25 | ```bash 26 | pip install sphinx sphinx_rtd_theme myst-parser autodocsumm sphinxcontrib-bibtex sphinx-needs sphinxcontrib-plantuml 27 | 28 | ``` 29 | 30 | ## How to Use the Makefile 31 | 32 | 1. **Navigate to the Directory**: Open a command prompt or terminal and navigate to the directory containing the make file and the .rst files, in our case: `docs` 33 | 34 | 2. **Build Documentation**: 35 | ```bash 36 | .\make # for Windows: .\make (makefile_name) html 37 | ``` 38 | 39 | 3. **View Documentation**: The built html will be in `_build` directory. Navigate to `_build/html` and open `index.html` in a web browser. 40 | 41 | ## Using Sphinx with Existing `.rst` Files 42 | 43 | 1. **Edit the `conf.py` file**: You can edit `conf.py` file - which should be in the same directory as the `.rst` files. This file will contain the configuration for the Sphinx documentation builder. 44 | 45 | 2. **Working with the `index.rst` file**: This file will contain the table of contents for the documentation. It will also contain the names of the `.rst` files that will be included in the documentation. 46 | 47 | 3. **Changing `.rst` files**: You can edit the `.rst` files to add content to the documentation. You can also add new `.rst` files to the directory and include them in the `index.rst` file. 48 | 49 | ## Tips for Editing `.rst` Files 50 | It is important that the `.rst` files are formatted correctly. Here are some tips for editing `.rst` files: 51 | 52 | - Using `.. toctree::` directive: This directive is used to create a table of contents for the documentation. It should be used in `index.rst` file to list the `.rst` files that will be included in the documentation. It can also be used in other `.rst` files to create sub-tables of contents. It is also recursively used to create sub-tables of contents within sub-tables of contents. 53 | 54 | - Headings: `=` and `-` symbols create headings. The `=` symbol creates a top-level heading and the `-` symbol creates a second-level heading. 55 | 56 | - Links: `.. _: ` directive to create a link. To use the link, use the `:ref:` directive. For example, if the link name is `link_name`, then use `:ref:`link_name`` to create a link to the URL. 57 | 58 | - Emphasis: Use the `*` symbol to create emphasis. For example, `*emphasis*` will create *emphasis*. 59 | 60 | - Images: `.. image:: ` to add an image. The `image_path` is the path to the image file. For example, `.. image:: images/image.png` will add the image `image.png` to the documentation. 61 | 62 | - Include Files: `.. include:: ` directive to include file. For example, `.. include:: file.rst` will include the file `file.rst` in the documentation. -------------------------------------------------------------------------------- /docs/_static/nav.css: -------------------------------------------------------------------------------- 1 | .wy-nav-side { 2 | width: 300px; /* This should match the width you set for .wy-nav-content-wrap */ 3 | } 4 | 5 | .wy-nav-content-wrap { 6 | margin-left: 300px; 7 | } -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | import os 9 | import sys 10 | sys.path.insert(0, os.path.abspath('../src/python')) 11 | 12 | project = 'Pose-Format Toolkit' 13 | copyright = '2023, Moryossef, Amit and Müller, Mathias and Fahrni, Rebecka' 14 | author = 'Amit Moryossef, Mathias Müller, Rebecka Fahrni' 15 | release = 'v0.2.2' 16 | 17 | # -- General configuration --------------------------------------------------- 18 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 19 | 20 | extensions = ['sphinx.ext.autodoc', 21 | 'sphinx.ext.napoleon', 'sphinx.ext.inheritance_diagram', 22 | 'sphinx.ext.viewcode', 23 | 'myst_parser', 24 | 'autodocsumm','sphinxcontrib.bibtex', 25 | 'sphinx_rtd_theme','sphinx_needs','sphinxcontrib.plantuml'] 26 | 27 | 28 | autodoc_default_options = { 29 | 'autosummary': True, 30 | 'members': True, 31 | 'show-inheritance':True} 32 | templates_path = ['_templates'] 33 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 34 | master_doc = 'index' 35 | 36 | 37 | # -- Options for HTML output ------------------------------------------------- 38 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 39 | 40 | autodoc_typehints = "description" 41 | autodoc_mock_imports = ['mediapipe'] # mocks import of mediapipe for readthedocs 42 | html_theme = 'sphinx_rtd_theme' 43 | html_static_path = ['_static'] 44 | html_theme_options = { 45 | 'logo_only': False, 46 | 'display_version': True} 47 | 48 | html_context = { 49 | 'display_github': True, 50 | 'github_user': 'sign-language-processing', 51 | 'github_repo': 'pose', 52 | 'github_version': 'master/', 53 | 'conf_py_path': '/docs/', 54 | 'source_suffix': '.rst', 55 | } 56 | 57 | def setup(app): 58 | app.add_css_file('nav.css') 59 | 60 | bibtex_bibfiles = ['references.bib'] 61 | 62 | default_role = 'py:obj' 63 | -------------------------------------------------------------------------------- /docs/examples.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | ======== 3 | 4 | Pose Format Conversion with Sirens 5 | ----------------------------------- 6 | 7 | The ``pose_to_siren_to_pose.py`` script shows us how to change a 3D pose using something called a Siren neural network. 8 | 9 | .. note:: 10 | 11 | The function follows these steps: 12 | 13 | 1. Fills missing data in the body with zeros. 14 | 2. Normalizes the pose distribution. 15 | 3. Converts the pose using the Siren neural network. 16 | 4. Constructs the new Pose with the predicted data. 17 | 5. Unnormalizes the pose distribution. 18 | 19 | Also: Before you start, ensure you have a `.pose` file/ path. This is the standardized format that stores the 3D pose information. If you don't have one, you might either need to obtain it from a relevant dataset or convert your existing pose data into this format. 20 | 21 | Step-by-step Guide: 22 | ~~~~~~~~~~~~~~~~~~~ 23 | 24 | 1. **Preparation** 25 | 26 | Begin by importing the necessary modules: 27 | 28 | .. code-block:: python 29 | 30 | import numpy as np 31 | from numpy import ma 32 | import pose_format.utils.siren as siren 33 | from pose_format import Pose 34 | from pose_format.numpy import NumPyPoseBody 35 | from pose_format.pose_visualizer import PoseVisualizer 36 | 37 | 38 | 2. **Define the Conversion Function** 39 | 40 | The function `pose_to_siren_to_pose` is used to perform the conversion. An you find the overview of the whole function :ref:`overview` 41 | 42 | 43 | .. code-block:: python 44 | 45 | def pose_to_siren_to_pose(p: Pose, fps=None) -> Pose: 46 | """Converts a given Pose object to its Siren representation and back to Pose.""" 47 | 48 | # Fills missing values with 0's 49 | p.body.zero_filled() 50 | 51 | # Noralizes 52 | mu, std = p.normalize_distribution() 53 | 54 | # Use siren net 55 | net = siren.get_pose_siren(p, total_steps=3000, steps_til_summary=100, learning_rate=1e-4, cuda=True) 56 | 57 | new_fps = fps if fps is not None else p.body.fps 58 | coords = siren.PoseDataset.get_coords(time=len(p.body.data) / p.body.fps, fps=new_fps) 59 | 60 | # Get predicitons of new Pose data 61 | pred = net(coords).cpu().numpy() 62 | 63 | # Construct new Body out of predcitions 64 | pose_body = NumPyPoseBody(fps=new_fps, data=ma.array(pred), confidence=np.ones(shape=tuple(pred.shape[:3]))) 65 | p = Pose(header=p.header, body=pose_body) 66 | 67 | # Revert normalization and give back the pose instance 68 | p.unnormalize_distribution(mu, std) 69 | return p 70 | 71 | 72 | The function does the following operations: 73 | 74 | - Fills missing data in the pose body with zeros. 75 | - Normalizes the pose distribution. 76 | - Uses the Siren neural network to transform the pose. 77 | - Constructs a new Pose with the predicted data. 78 | - Reverts the normalization on the pose distribution. 79 | 80 | 3. **Usage** 81 | 82 | After defining the function, you can use it in your main script: 83 | 84 | .. code-block:: python 85 | 86 | if __name__ == "__main__": 87 | pose_path = "/home/nlp/amit/PhD/PoseFormat/sample-data/1.pose" # your own file path to a `.pose` file 88 | 89 | buffer = open(pose_path, "rb").read() 90 | p = Pose.read(buffer) 91 | print("Poses loaded") 92 | 93 | p = pose_to_siren_to_pose(p) 94 | 95 | info = p.header.normalization_info( 96 | p1=("pose_keypoints_2d", "RShoulder"), 97 | p2=("pose_keypoints_2d", "LShoulder") 98 | ) 99 | p.normalize(info, scale_factor=300) 100 | p.focus() 101 | 102 | v = PoseVisualizer(p) 103 | v.save_video("reconstructed.mp4", v.draw(max_frames=3000)) 104 | 105 | 106 | The main script performs these tasks: 107 | 108 | - Reads the pose data from a file. 109 | - Applies the ``pose_to_siren_to_pose`` function to the read pose. 110 | - Normalizes and focuses the pose. 111 | - Visualizes the converted pose using the ``PoseVisualizer``. 112 | 113 | 4. **Execution** 114 | 115 | To run the script: 116 | 117 | .. code-block:: bash 118 | 119 | $ python pose_format_converter.py 120 | 121 | 122 | 123 | The ``pose_format`` combined with Siren neural networks is great to transform and work with 3D pose data. 124 | By understanding and using the functions and methods provided in this script, you will be able to understand better how to manipulate and visualize 3D poses to suit your own requirements. 125 | 126 | 127 | .. _overview: 128 | 129 | Overview of ``pose_to_siren_to_pose.py`` 130 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 131 | 132 | .. literalinclude:: ../examples/pose_to_siren_to_pose.py 133 | :language: python 134 | :linenos: -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Pose documentation master file, created by 2 | sphinx-quickstart on Tue Aug 15 14:09:31 2023. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | 7 | 8 | ======================================== 9 | Welcome to Pose-Format's documentation! 10 | ======================================== 11 | 12 | Pose is an innovative toolkit created by :cite:`moryossef2021pose-format` dedicated to the handling, manipulation, and visualization of poses. With support for various file formats and the ability to operate 13 | with Python especially the :ref:`pose_format` package and JavaScript, Pose provides a comprehensive solution for pose-related tasks. 14 | 15 | 16 | 17 | 18 | .. toctree:: 19 | :maxdepth: 3 20 | :caption: Introduction 21 | 22 | readme 23 | 24 | .. toctree:: 25 | :maxdepth: 7 26 | :caption: Python Code Documentation 27 | 28 | modules 29 | pose_format 30 | 31 | .. toctree:: 32 | :maxdepth: 2 33 | :caption: Additional Material 34 | 35 | js_docs 36 | examples 37 | specs_v01 38 | 39 | 40 | 41 | References 42 | ---------- 43 | 44 | .. bibliography:: references.bib 45 | 46 | Indices and tables 47 | ================== 48 | 49 | * :ref:`genindex` 50 | * :ref:`modindex` 51 | * :ref:`search` 52 | -------------------------------------------------------------------------------- /docs/js_docs.rst: -------------------------------------------------------------------------------- 1 | JavaScript 2 | ------------------------ 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | pose_format_js 8 | pose_viewer_js -------------------------------------------------------------------------------- /docs/json.rst: -------------------------------------------------------------------------------- 1 | JSON Files 2 | ========== 3 | 4 | package.JSON 5 | ------------- 6 | 7 | .. literalinclude:: ../src/js/pose_format/package.json 8 | :language: json 9 | :linenos: 10 | :caption: Description or Title for the JSON file 11 | 12 | package-lock.JSON 13 | ----------------- 14 | .. literalinclude:: ../src/js/pose_format/package-lock.json 15 | :language: json 16 | :linenos: 17 | :caption: Description or Title for the JSON file 18 | 19 | tsconfig.JSON 20 | ------------- 21 | 22 | .. literalinclude:: ../src/js/pose_format/tsconfig.json 23 | :language: json 24 | :linenos: 25 | :caption: Description or Title for the JSON file 26 | 27 | tslint.JSON 28 | ------------- 29 | 30 | 31 | .. literalinclude:: ../src/js/pose_format/tslint.json 32 | :language: json 33 | :linenos: 34 | :caption: Description or Title for the JSON file -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/modules.rst: -------------------------------------------------------------------------------- 1 | Structure 2 | ============= 3 | 4 | The :ref:`pose_format` aims to help users with the pose data management and pose analysis. 5 | The toolkit offers many functionalities, from reading and editing to visualizing and testing pose data. It provides a wide range of features for these tasks. 6 | 7 | This section gives an brief overview of the main feature structure of the package and its functionalities. 8 | 9 | Main Features 10 | ------------- 11 | 12 | 1. **Reading and Manipulating Pose Data**: 13 | 14 | * `.pose` files ensuring cross-compatibility between popular libraries such as NumPy, PyTorch, and TensorFlow. 15 | * The loaded data presents multiple manipulation options including: 16 | 17 | - Normalizing pose data. 18 | - Agumentation of data. 19 | - Interpolation of data. 20 | 21 | 22 | 2. **Visualization Capabilities**: 23 | 24 | - Methods to visualize raw and processed pose data using `pose_format.pose_visualizer.PoseVisualizer` module. 25 | - Includes overlay functions for videos. 26 | 27 | 28 | 3. **Package Organization and Components**: 29 | 30 | * Structured with submodules and subpackages serving the purposes: 31 | 32 | - :ref:`pose_format_numpy` for NumPy interactions. 33 | 34 | - :ref:`pose_format_tensorflow` for TensorFlow functionalities. 35 | 36 | - :ref:`pose_format_torch` for PyTorch-related tools. 37 | 38 | - :ref:`pose_format_third_party` for externals. 39 | 40 | - :ref:`pose_format_utils` for utility tools. 41 | 42 | 4. **Testing Suite**: 43 | 44 | - Tests for the reliability of the package and its setups/data can be found in :ref:`tests`. 45 | 46 | Tests 47 | ------ 48 | 49 | This section illustrates the content of the testing suite and the used data. 50 | 51 | .. toctree:: 52 | :maxdepth: 4 53 | 54 | tests -------------------------------------------------------------------------------- /docs/pose_format.numpy.pose_body.rst: -------------------------------------------------------------------------------- 1 | pose\_format.numpy.pose\_body module 2 | ==================================== 3 | 4 | .. automodule:: pose_format.numpy.pose_body 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.numpy.representation.distance.rst: -------------------------------------------------------------------------------- 1 | pose\_format.numpy.representation.distance module 2 | ================================================= 3 | 4 | .. automodule:: pose_format.numpy.representation.distance 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.numpy.representation.distance_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.numpy.representation.distance\_test module 2 | ======================================================= 3 | 4 | .. automodule:: pose_format.numpy.representation.distance_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.numpy.representation.rst: -------------------------------------------------------------------------------- 1 | pose\_format.numpy.representation package 2 | ========================================= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | pose_format.numpy.representation.distance 8 | pose_format.numpy.representation.distance_test 9 | 10 | 11 | -------------------------------------------------------------------------------- /docs/pose_format.numpy.rst: -------------------------------------------------------------------------------- 1 | .. _pose_format_numpy: 2 | 3 | pose_format.numpy package 4 | ========================== 5 | 6 | Integration with the NumPy library. 7 | 8 | .. automodule:: pose_format.numpy 9 | :members: 10 | :undoc-members: 11 | :show-inheritance: 12 | 13 | .. toctree:: 14 | :maxdepth: 4 15 | 16 | pose_format.numpy.representation 17 | pose_format.numpy.pose_body -------------------------------------------------------------------------------- /docs/pose_format.pose.rst: -------------------------------------------------------------------------------- 1 | pose\_format.pose module 2 | ======================== 3 | 4 | .. automodule:: pose_format.pose 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.pose_body.rst: -------------------------------------------------------------------------------- 1 | pose\_format.pose\_body module 2 | ============================== 3 | 4 | .. automodule:: pose_format.pose_body 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.pose_header.rst: -------------------------------------------------------------------------------- 1 | pose\_format.pose\_header module 2 | ================================ 3 | 4 | .. automodule:: pose_format.pose_header 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.pose_representation.rst: -------------------------------------------------------------------------------- 1 | pose\_format.pose\_representation module 2 | ======================================== 3 | 4 | .. automodule:: pose_format.pose_representation 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.pose_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.pose\_test module 2 | ============================== 3 | 4 | .. automodule:: pose_format.pose_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.pose_visualizer.rst: -------------------------------------------------------------------------------- 1 | pose\_format.pose\_visualizer module 2 | ==================================== 3 | 4 | .. automodule:: pose_format.pose_visualizer 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.rst: -------------------------------------------------------------------------------- 1 | .. _pose_format: 2 | 3 | pose\_format package 4 | ==================== 5 | 6 | Here's a breakdown of the main subpackages: 7 | 8 | - **NumPy package** e.g `pose_format.numpy`: 9 | 10 | This subpackage offers an integration with the popular NumPy library and helps with numerical computations on pose data. 11 | 12 | - `pose_format.numpy.pose_body` module and subpackages: 13 | 14 | Provides tools to handle body data in context of pose with NumPy. 15 | 16 | - **TensorFlow package** e.g `pose_format.tensorflow`: 17 | 18 | Provides integration with TensorFlow library, therefore this subpackage offers operations on pose data in TensorFlow contexts. 19 | 20 | - `pose_format.tensorflow.pose_body`: 21 | 22 | Main module for handling body data within TensorFlow. 23 | 24 | - `pose_format.tensorflow.pose_body_test`: 25 | 26 | Test module to ensure reliability of pose body operations in TensorFlow. 27 | 28 | - `pose_format.tensorflow.pose_representation` and `pose_format.tensorflow.pose_representation_test`: 29 | 30 | These modules help the representation of poses in TensorFlow. 31 | 32 | - **Testing package** e.g `pose_format.testing`: 33 | 34 | This subpackage provides test cases to ensure reliability of entire `pose_format` package. 35 | 36 | - **Third-Party package** e.g `pose_format.third_party`: 37 | 38 | Contains modules and utilities that integrate third-party tools or libraries with pose format 39 | 40 | - **PyTorch package** e.g `pose_format.torch`: 41 | 42 | This subpackage provides modules to manage and manipulate pose data in a PyTorch context 43 | 44 | - `pose_format.torch.pose_body`: 45 | 46 | Module for handling pose body data within PyTorch. 47 | 48 | - `pose_format.torch.pose_representation`: 49 | 50 | Helps representing pose data in PyTorch. 51 | 52 | - **Utils package** e.g `pose_format.utils`: 53 | 54 | This subpackage consists of various modules to help operations related to pose data. 55 | 56 | 57 | 58 | .. toctree:: 59 | :maxdepth: 9 60 | :caption: Subpackages: 61 | 62 | pose_format.numpy 63 | pose_format.tensorflow 64 | pose_format.testing 65 | pose_format.third_party 66 | pose_format.torch 67 | pose_format.utils 68 | 69 | 70 | Submodules 71 | ---------- 72 | 73 | pose\_format.pose module 74 | ~~~~~~~~~~~~~~~~~~~~~~~~ 75 | 76 | .. automodule:: pose_format.pose 77 | :members: 78 | :undoc-members: 79 | :show-inheritance: 80 | 81 | pose\_format.pose\_body module 82 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | 84 | .. automodule:: pose_format.pose_body 85 | :members: 86 | :undoc-members: 87 | :show-inheritance: 88 | 89 | pose\_format.pose\_header module 90 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 91 | 92 | .. automodule:: pose_format.pose_header 93 | :members: 94 | :undoc-members: 95 | :show-inheritance: 96 | 97 | pose\_format.pose\_representation module 98 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 99 | 100 | .. automodule:: pose_format.pose_representation 101 | :members: 102 | :undoc-members: 103 | :show-inheritance: 104 | 105 | pose\_format.pose\_test module 106 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 107 | 108 | .. automodule:: pose_format.pose_test 109 | :members: 110 | :undoc-members: 111 | :show-inheritance: 112 | 113 | pose\_format.pose\_visualizer module 114 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 115 | 116 | .. automodule:: pose_format.pose_visualizer 117 | :members: 118 | :undoc-members: 119 | :show-inheritance: 120 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.masked.rst: -------------------------------------------------------------------------------- 1 | pose_format.tensorflow.masked package 2 | ======================================= 3 | 4 | .. automodule:: pose_format.tensorflow.masked 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | .. toctree:: 10 | :maxdepth: 4 11 | 12 | pose_format.tensorflow.masked.tensor 13 | pose_format.tensorflow.masked.tensor_graph_mode_test 14 | pose_format.tensorflow.masked.tensor_test 15 | pose_format.tensorflow.masked.tensorflow 16 | pose_format.tensorflow.masked.tensorflow_test -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.masked.tensor.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.masked.tensor module 2 | ============================================ 3 | 4 | .. automodule:: pose_format.tensorflow.masked.tensor 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.masked.tensor_graph_mode_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.masked.tensor\_graph\_mode\_test module 2 | =============================================================== 3 | 4 | .. currentmodule:: pose_format.tensorflow.masked.tensor_graph_mode_test 5 | 6 | .. automodule:: pose_format.tensorflow.masked.tensor_graph_mode_test 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.masked.tensor_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.masked.tensor\_test module 2 | ================================================== 3 | 4 | .. currentmodule:: pose_format.tensorflow.masked.tensor_test 5 | 6 | .. automodule:: pose_format.tensorflow.masked.tensor_test 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.masked.tensorflow.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.masked.tensorflow module 2 | ================================================ 3 | 4 | .. currentmodule:: pose_format.tensorflow.masked.tensorflow 5 | 6 | .. automodule:: pose_format.tensorflow.masked.tensorflow 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.masked.tensorflow_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.masked.tensorflow\_test module 2 | ====================================================== 3 | 4 | .. currentmodule:: pose_format.tensorflow.masked.tensorflow_test 5 | 6 | .. automodule:: pose_format.tensorflow.masked.tensorflow_test 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.pose_body.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.pose\_body module 2 | ========================================= 3 | 4 | .. automodule:: pose_format.tensorflow.pose_body 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.pose_body_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.pose\_body\_test module 2 | =============================================== 3 | 4 | .. automodule:: pose_format.tensorflow.pose_body_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.pose_representation.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.pose\_representation module 2 | =================================================== 3 | 4 | .. automodule:: pose_format.tensorflow.pose_representation 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.pose_representation_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.pose\_representation\_test module 2 | ========================================================= 3 | 4 | .. automodule:: pose_format.tensorflow.pose_representation_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.representation.angle.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.representation.angle module 2 | =================================================== 3 | 4 | .. automodule:: pose_format.tensorflow.representation.angle 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.representation.angle_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.representation.angle\_test module 2 | ========================================================= 3 | 4 | .. automodule:: pose_format.tensorflow.representation.angle_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.representation.distance.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.representation.distance module 2 | ====================================================== 3 | 4 | .. automodule:: pose_format.tensorflow.representation.distance 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.representation.distance_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.representation.distance\_test module 2 | ============================================================ 3 | 4 | .. automodule:: pose_format.tensorflow.representation.distance_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.representation.inner_angle.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.representation.inner\_angle module 2 | ========================================================== 3 | 4 | .. automodule:: pose_format.tensorflow.representation.inner_angle 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.representation.inner_angle_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.representation.inner\_angle\_test module 2 | ================================================================ 3 | 4 | .. automodule:: pose_format.tensorflow.representation.inner_angle_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.representation.point_line_distance.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.representation.point\_line\_distance module 2 | =================================================================== 3 | 4 | .. automodule:: pose_format.tensorflow.representation.point_line_distance 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.representation.point_line_distance_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.tensorflow.representation.point\_line\_distance\_test module 2 | ========================================================================= 3 | 4 | .. automodule:: pose_format.tensorflow.representation.point_line_distance_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.representation.rst: -------------------------------------------------------------------------------- 1 | pose_format.tensorflow.representation package 2 | ============================================== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | pose_format.tensorflow.representation.angle 8 | pose_format.tensorflow.representation.angle_test 9 | pose_format.tensorflow.representation.distance 10 | pose_format.tensorflow.representation.distance_test 11 | pose_format.tensorflow.representation.inner_angle 12 | pose_format.tensorflow.representation.inner_angle_test 13 | pose_format.tensorflow.representation.point_line_distance 14 | pose_format.tensorflow.representation.point_line_distance_test -------------------------------------------------------------------------------- /docs/pose_format.tensorflow.rst: -------------------------------------------------------------------------------- 1 | .. _pose_format_tensorflow: 2 | 3 | pose\_format.tensorflow package 4 | =============================== 5 | 6 | Tools for TensorFlow compatibility. 7 | 8 | .. toctree:: 9 | :maxdepth: 8 10 | :caption: pose_format.tensorflow subpackages 11 | 12 | pose_format.tensorflow.masked 13 | pose_format.tensorflow.representation 14 | 15 | 16 | 17 | pose\_format.tensorflow.pose\_body module 18 | ----------------------------------------- 19 | 20 | .. automodule:: pose_format.tensorflow.pose_body 21 | :members: 22 | :undoc-members: 23 | :show-inheritance: 24 | 25 | pose\_format.tensorflow.pose\_body\_test module 26 | ----------------------------------------------- 27 | 28 | .. automodule:: pose_format.tensorflow.pose_body_test 29 | :members: 30 | :undoc-members: 31 | :show-inheritance: 32 | 33 | pose\_format.tensorflow.pose\_representation module 34 | --------------------------------------------------- 35 | 36 | .. automodule:: pose_format.tensorflow.pose_representation 37 | :members: 38 | :undoc-members: 39 | :show-inheritance: 40 | 41 | pose\_format.tensorflow.pose\_representation\_test module 42 | --------------------------------------------------------- 43 | 44 | .. automodule:: pose_format.tensorflow.pose_representation_test 45 | :members: 46 | :undoc-members: 47 | :show-inheritance: 48 | 49 | 50 | -------------------------------------------------------------------------------- /docs/pose_format.testing.rst: -------------------------------------------------------------------------------- 1 | .. _pose_format_testing: 2 | 3 | pose\_format.testing package 4 | ============================ 5 | 6 | .. automodule:: pose_format.testing 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | 11 | Modules for testing and validation of pose data. 12 | -------------------------------------------------------------------------------- /docs/pose_format.third_party.py.rst: -------------------------------------------------------------------------------- 1 | pose\_format.third\_party.py package 2 | ==================================== 3 | 4 | Here are the third-party packages contained in the ``pose_format.third_party.py`` package: 5 | 6 | - `pose_format.third_party.py/dataclasses` - Contains a BUILD file. 7 | - `pose_format.third_party.py/numpy` - Contains a BUILD file. 8 | - `pose_format.third_party.py/scipy` - Contains a BUILD file. 9 | - `pose_format.third_party.py/tensorflow` - Contains a BUILD file. 10 | - `pose_format.third_party.py/torch` - Contains a BUILD file. 11 | - `pose_format.third_party.py/tqdm` - Contains a BUILD file. 12 | 13 | -------------------------------------------------------------------------------- /docs/pose_format.third_party.rst: -------------------------------------------------------------------------------- 1 | .. _pose_format_third_party: 2 | 3 | pose\_format.third\_party package 4 | ================================= 5 | 6 | Interfaces with third-party tools and libraries. 7 | 8 | .. automodule:: pose_format.third_party 9 | :members: 10 | :undoc-members: 11 | :show-inheritance: 12 | 13 | 14 | Subpackages 15 | ----------- 16 | 17 | .. toctree:: 18 | :maxdepth: 4 19 | 20 | pose_format.third_party.py 21 | 22 | -------------------------------------------------------------------------------- /docs/pose_format.torch.masked.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.masked package 2 | ================================= 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | :caption: pose_format.torch.masked subpackages: 7 | 8 | pose_format.torch.masked.tensor 9 | pose_format.torch.masked.torch 10 | pose_format.torch.masked.torch_test 11 | 12 | pose_format.torch.masked.tensor module 13 | --------------------------------------- 14 | .. currentmodule:: pose_format.torch.masked.tensor 15 | 16 | .. automodule:: pose_format.torch.masked.tensor 17 | :no-members: 18 | 19 | .. autoclass:: MaskedTensor 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | :inherited-members: 24 | 25 | pose_format.torch.masked.torch module 26 | -------------------------------------- 27 | .. currentmodule:: pose_format.torch.masked.torch 28 | 29 | .. automodule:: pose_format.torch.masked.torch 30 | :members: 31 | :undoc-members: 32 | :show-inheritance: 33 | 34 | pose_format.torch.masked.torch_test module 35 | ------------------------------------------- 36 | .. currentmodule:: pose_format.torch.masked.torch_test 37 | 38 | .. automodule:: pose_format.torch.masked.torch_test 39 | :members: 40 | :undoc-members: 41 | :show-inheritance: 42 | -------------------------------------------------------------------------------- /docs/pose_format.torch.masked.tensor.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.masked.tensor module 2 | ======================================= 3 | 4 | .. automodule:: pose_format.torch.masked.tensor 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.masked.torch.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.masked.torch module 2 | ====================================== 3 | 4 | .. automodule:: pose_format.torch.masked.torch 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.masked.torch_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.masked.torch\_test module 2 | ============================================ 3 | 4 | .. automodule:: pose_format.torch.masked.torch_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.pose_body.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.pose\_body module 2 | ==================================== 3 | 4 | .. automodule:: pose_format.torch.pose_body 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.pose_representation.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.pose\_representation module 2 | ============================================== 3 | 4 | .. automodule:: pose_format.torch.pose_representation 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.representation.angle.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.representation.angle module 2 | ============================================== 3 | 4 | .. automodule:: pose_format.torch.representation.angle 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.representation.angle_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.representation.angle\_test module 2 | ==================================================== 3 | 4 | .. automodule:: pose_format.torch.representation.angle_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.representation.distance.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.representation.distance module 2 | ================================================= 3 | 4 | .. automodule:: pose_format.torch.representation.distance 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.representation.distance_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.representation.distance\_test module 2 | ======================================================= 3 | 4 | .. automodule:: pose_format.torch.representation.distance_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.representation.inner_angle.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.representation.inner\_angle module 2 | ===================================================== 3 | 4 | .. automodule:: pose_format.torch.representation.inner_angle 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.representation.inner_angle_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.representation.inner\_angle\_test module 2 | =========================================================== 3 | 4 | .. automodule:: pose_format.torch.representation.inner_angle_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.representation.point_line_distance.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.representation.point\_line\_distance module 2 | ============================================================== 3 | 4 | .. automodule:: pose_format.torch.representation.point_line_distance 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.representation.point_line_distance_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.representation.point\_line\_distance\_test module 2 | ==================================================================== 3 | 4 | .. automodule:: pose_format.torch.representation.point_line_distance_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.representation.points.rst: -------------------------------------------------------------------------------- 1 | pose\_format.torch.representation.points module 2 | =============================================== 3 | 4 | .. automodule:: pose_format.torch.representation.points 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.torch.representation.rst: -------------------------------------------------------------------------------- 1 | pose_format.torch.representation package 2 | ========================================= 3 | 4 | .. automodule:: pose_format.torch.representation 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | .. toctree:: 10 | :maxdepth: 4 11 | 12 | pose_format.torch.representation.angle 13 | pose_format.torch.representation.angle_test 14 | pose_format.torch.representation.distance 15 | pose_format.torch.representation.distance_test 16 | pose_format.torch.representation.inner_angle 17 | pose_format.torch.representation.inner_angle_test 18 | pose_format.torch.representation.point_line_distance 19 | pose_format.torch.representation.point_line_distance_test 20 | pose_format.torch.representation.points -------------------------------------------------------------------------------- /docs/pose_format.torch.rst: -------------------------------------------------------------------------------- 1 | .. _pose_format_torch: 2 | 3 | pose\_format.torch package 4 | ========================== 5 | 6 | PyTorch-specific functionalities. 7 | 8 | .. automodule:: pose_format.torch 9 | :members: 10 | :undoc-members: 11 | :show-inheritance: 12 | 13 | 14 | 15 | .. toctree:: 16 | :maxdepth: 4 17 | :caption: Subpackages: 18 | 19 | pose_format.torch.masked 20 | pose_format.torch.representation 21 | 22 | 23 | Submodules 24 | ---------- 25 | 26 | pose\_format.torch.pose\_body module 27 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 28 | 29 | .. automodule:: pose_format.torch.pose_body 30 | :members: 31 | :undoc-members: 32 | :show-inheritance: 33 | 34 | pose\_format.torch.pose\_representation module 35 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 36 | 37 | .. automodule:: pose_format.torch.pose_representation 38 | :members: 39 | :undoc-members: 40 | :show-inheritance: 41 | -------------------------------------------------------------------------------- /docs/pose_format.utils.fast_math.rst: -------------------------------------------------------------------------------- 1 | pose\_format.utils.fast\_math module 2 | ==================================== 3 | 4 | .. automodule:: pose_format.utils.fast_math 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.utils.holistic.rst: -------------------------------------------------------------------------------- 1 | pose\_format.utils.holistic module 2 | ================================== 3 | 4 | .. automodule:: pose_format.utils.holistic 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.utils.normalization_3d.rst: -------------------------------------------------------------------------------- 1 | pose\_format.utils.normalization\_3d module 2 | =========================================== 3 | 4 | .. automodule:: pose_format.utils.normalization_3d 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.utils.openpose.rst: -------------------------------------------------------------------------------- 1 | pose\_format.utils.openpose module 2 | ================================== 3 | 4 | .. automodule:: pose_format.utils.openpose 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.utils.openpose_135.rst: -------------------------------------------------------------------------------- 1 | pose\_format.utils.openpose\_135 module 2 | ======================================= 3 | 4 | .. automodule:: pose_format.utils.openpose_135 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.utils.openpose_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.utils.openpose\_test module 2 | ======================================== 3 | 4 | .. automodule:: pose_format.utils.openpose_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.utils.optical_flow.rst: -------------------------------------------------------------------------------- 1 | pose\_format.utils.optical\_flow module 2 | ======================================= 3 | 4 | .. automodule:: pose_format.utils.optical_flow 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.utils.pose_converter.rst: -------------------------------------------------------------------------------- 1 | pose\_format.utils.pose\_converter module 2 | ========================================= 3 | 4 | .. automodule:: pose_format.utils.pose_converter 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.utils.reader.rst: -------------------------------------------------------------------------------- 1 | pose\_format.utils.reader module 2 | ================================ 3 | 4 | .. automodule:: pose_format.utils.reader 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.utils.reader_test.rst: -------------------------------------------------------------------------------- 1 | pose\_format.utils.reader\_test module 2 | ====================================== 3 | 4 | .. automodule:: pose_format.utils.reader_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format.utils.rst: -------------------------------------------------------------------------------- 1 | .. _pose_format_utils: 2 | 3 | pose\_format.utils package 4 | ========================== 5 | 6 | 7 | .. toctree:: 8 | :maxdepth: 8 9 | :caption: pose_format.utils submodules: 10 | 11 | pose_format.utils.fast_math 12 | pose_format.utils.normalization_3d 13 | pose_format.utils.openpose 14 | pose_format.utils.openpose_135 15 | pose_format.utils.openpose_test 16 | pose_format.utils.optical_flow 17 | pose_format.utils.reader 18 | pose_format.utils.reader_test 19 | pose_format.utils.siren 20 | pose_format.utils.pose_converter 21 | pose_format.utils.holistic -------------------------------------------------------------------------------- /docs/pose_format.utils.siren.rst: -------------------------------------------------------------------------------- 1 | pose\_format.utils.siren module 2 | =============================== 3 | 4 | .. automodule:: pose_format.utils.siren 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/pose_format_js.rst: -------------------------------------------------------------------------------- 1 | pose_format 2 | ============ 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | json 8 | src 9 | 10 | -------------------------------------------------------------------------------- /docs/pose_viewer_example.rst: -------------------------------------------------------------------------------- 1 | example 2 | ========= 3 | 4 | example.pose 5 | ------------ 6 | 7 | .. raw:: html 8 | :file: ../src/js/pose_viewer/example/index.html 9 | -------------------------------------------------------------------------------- /docs/pose_viewer_files.rst: -------------------------------------------------------------------------------- 1 | pose viewer files 2 | ================= 3 | 4 | stencil.config.ts 5 | ----------------- 6 | 7 | .. literalinclude:: ../src/js/pose_viewer/stencil.config.ts 8 | :language: typescript 9 | :linenos: 10 | :caption: Description or Title for the TypeScript file 11 | 12 | 13 | prettierrc.json 14 | ---------------- 15 | 16 | .. literalinclude:: ../src/js/pose_viewer/.prettierrc.json 17 | :language: json 18 | :linenos: 19 | :caption: Description or Title for the TypeScript file 20 | 21 | 22 | package.json 23 | ------------- 24 | 25 | .. literalinclude:: ../src/js/pose_viewer/package.json 26 | :language: json 27 | :linenos: 28 | :caption: Description or Title for the TypeScript file 29 | 30 | 31 | package-lock.json 32 | ------------------ 33 | 34 | .. literalinclude:: ../src/js/pose_viewer/package-lock.json 35 | :language: json 36 | :linenos: 37 | :caption: Description or Title for the TypeScript file 38 | 39 | 40 | tsconfig.json 41 | ------------- 42 | 43 | .. literalinclude:: ../src/js/pose_viewer/tsconfig.json 44 | :language: json 45 | :linenos: 46 | :caption: Description or Title for the TypeScript file -------------------------------------------------------------------------------- /docs/pose_viewer_js.rst: -------------------------------------------------------------------------------- 1 | pose_viewer 2 | =========== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | pose_viewer_files 8 | pose_viewer_example 9 | pose_viewer_src 10 | 11 | -------------------------------------------------------------------------------- /docs/pose_viewer_src.rst: -------------------------------------------------------------------------------- 1 | src 2 | === 3 | 4 | index.html 5 | ---------- 6 | 7 | .. raw:: html 8 | :file: ../src/js/pose_viewer/example/index.html 9 | 10 | components.d.ts 11 | ---------------- 12 | 13 | .. literalinclude:: ../src/js/pose_viewer/src/components.d.ts 14 | :language: typescript 15 | :linenos: 16 | :caption: Description or Title for the TypeScript file 17 | 18 | 19 | index.ts 20 | -------- 21 | 22 | .. literalinclude:: ../src/js/pose_viewer/src/index.ts 23 | :language: typescript 24 | :linenos: 25 | :caption: Description or Title for the TypeScript file 26 | 27 | 28 | -------------------------------------------------------------------------------- /docs/readme.rst: -------------------------------------------------------------------------------- 1 | Pose Format Introduction 2 | ======================== 3 | 4 | .. include:: ../README.md 5 | :parser: myst_parser.sphinx_ 6 | 7 | -------------------------------------------------------------------------------- /docs/references.bib: -------------------------------------------------------------------------------- 1 | @misc{moryossef2021pose-format, 2 | title={pose-format: Library for viewing, augmenting, and handling .pose files}, 3 | author={Moryossef, Amit and M\"{u}ller, Mathias}, 4 | howpublished={\url{https://github.com/sign-language-processing/pose}}, 5 | year={2021} 6 | } -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==1.4.0 2 | alabaster==0.7.13 3 | astunparse==1.6.3 4 | attrs==23.1.0 5 | autodocsumm==0.2.11 6 | Babel==2.12.1 7 | cachetools==5.3.1 8 | certifi==2023.7.22 9 | cffi==1.15.1 10 | charset-normalizer==3.2.0 11 | colorama==0.4.6 12 | colorlog==6.7.0 13 | commonmark==0.9.1 14 | contourpy==1.1.0 15 | cycler==0.11.0 16 | Cython==3.0.0 17 | filelock==3.12.2 18 | flatbuffers==23.5.26 19 | fonttools==4.43.0 20 | gast==0.4.0 21 | google-auth==2.22.0 22 | google-auth-oauthlib==0.4.6 23 | google-pasta==0.2.0 24 | graphviz==0.20.1 25 | grpcio==1.57.0 26 | h5py==3.9.0 27 | idna==3.4 28 | imagesize==1.4.1 29 | importlib-metadata==6.8.0 30 | importlib-resources==6.0.1 31 | Jinja2==3.1.2 32 | jsonschema==4.19.0 33 | jsonschema-specifications==2023.7.1 34 | keras==2.10.0 35 | Keras-Preprocessing==1.1.2 36 | kiwisolver==1.4.4 37 | latexcodec==2.0.1 38 | libclang==16.0.6 39 | Markdown==3.4.4 40 | markdown-it-py==3.0.0 41 | MarkupSafe==2.1.3 42 | matplotlib==3.7.2 43 | mdit-py-plugins==0.4.0 44 | mdurl==0.1.2 45 | mediapipe==0.10.3 46 | mistune==0.8.4 47 | mpmath==1.3.0 48 | myst-parser==2.0.0 49 | networkx==3.1 50 | numpy==1.24.4 51 | oauthlib==3.2.2 52 | opencv-contrib-python==4.8.0.76 53 | opt-einsum==3.3.0 54 | packaging==23.1 55 | Pillow==10.0.0 56 | pip==23.2.1 57 | pkgutil_resolve_name==1.3.10 58 | pose-format==0.2.2 59 | protobuf==3.19.6 60 | pyasn1==0.5.0 61 | pyasn1-modules==0.3.0 62 | pybtex==0.24.0 63 | pybtex-docutils==1.0.2 64 | pycparser==2.21 65 | Pygments==2.16.1 66 | pyparsing==3.0.9 67 | python-dateutil==2.8.2 68 | pytz==2023.3 69 | PyYAML==6.0.1 70 | referencing==0.30.2 71 | requests==2.31.0 72 | requests-file==1.5.1 73 | requests-oauthlib==1.3.1 74 | rpds-py==0.9.2 75 | rsa==4.9 76 | scipy==1.10.1 77 | setuptools==56.0.0 78 | six==1.16.0 79 | snowballstemmer==2.2.0 80 | sounddevice==0.4.6 81 | Sphinx==6.2.1 82 | sphinx-data-viewer==0.1.2 83 | sphinx-needs==1.3.0 84 | sphinx-rtd-theme==1.2.2 85 | sphinxcontrib-applehelp==1.0.4 86 | sphinxcontrib-bibtex==2.5.0 87 | sphinxcontrib-devhelp==1.0.2 88 | sphinxcontrib-htmlhelp==2.0.1 89 | sphinxcontrib-jquery==4.1 90 | sphinxcontrib-jsmath==1.0.1 91 | sphinxcontrib-plantuml==0.25 92 | sphinxcontrib-qthelp==1.0.3 93 | sphinxcontrib-serializinghtml==1.1.5 94 | sympy==1.12 95 | tensorboard==2.10.1 96 | tensorboard-data-server==0.6.1 97 | tensorboard-plugin-wit==1.8.1 98 | tensorflow==2.10.1 99 | tensorflow-estimator==2.10.0 100 | tensorflow-io-gcs-filesystem==0.31.0 101 | termcolor==2.3.0 102 | torch==2.0.1 103 | tqdm==4.66.1 104 | typing_extensions==4.7.1 105 | urllib3==1.26.16 106 | vidgear==0.3.1 107 | Werkzeug==2.3.7 108 | wheel==0.41.1 109 | wrapt==1.15.0 110 | zipp==3.16.2 111 | docutils==0.18.1 -------------------------------------------------------------------------------- /docs/specs/v0.0.md: -------------------------------------------------------------------------------- 1 | # Header 2 | \[`float` Version] 3 | \[`unsigned short` width] 4 | \[`unsigned short` height] 5 | \[`unsigned short` depth] 6 | \[`unsigned short` Number of Components] 7 | 8 | ### For every component: 9 | \[`string` Component Name] 10 | \[`char[]` Format] 11 | \[`unsigned short` Number of Points] 12 | \[`unsigned short` Number of Limbs] 13 | \[`unsigned short` Number of Colors] 14 | 15 | #### For every point: 16 | \[`string` Point Name] 17 | 18 | #### For every limb: 19 | \[`unsigned short` From Point Index] 20 | \[`unsigned short` To Point Index] 21 | 22 | #### For every color: 23 | \[`unsigned short` Red] 24 | \[`unsigned short` Green] 25 | \[`unsigned short` Blue] 26 | 27 | # Body 28 | \[`unsined short` FPS] 29 | \[`unsined short` Number of frames] 30 | 31 | ## For every frame 32 | \[`unsigned short` Number of People] 33 | 34 | #### For every person: 35 | \[`short` Person ID] 36 | 37 | ##### For every person's component: 38 | \[`float` X] 39 | \[`float` Y] 40 | \[`float` Confidence] 41 | -------------------------------------------------------------------------------- /docs/specs/v0.1.md: -------------------------------------------------------------------------------- 1 | # Header 2 | \[`float` Version] 3 | \[`unsigned short` width] 4 | \[`unsigned short` height] 5 | \[`unsigned short` depth] 6 | \[`unsigned short` Number of Components] 7 | 8 | ### For every component: 9 | \[`string` Component Name] 10 | \[`char[]` Format] 11 | \[`unsigned short` Number of Points] 12 | \[`unsigned short` Number of Limbs] 13 | \[`unsigned short` Number of Colors] 14 | 15 | #### For every point: 16 | \[`string` Point Name] 17 | 18 | #### For every limb: 19 | \[`unsigned short` From Point Index] 20 | \[`unsigned short` To Point Index] 21 | 22 | #### For every color: 23 | \[`unsigned short` Red] 24 | \[`unsigned short` Green] 25 | \[`unsigned short` Blue] 26 | 27 | 28 | # Body 29 | \[`unsined short` FPS] 30 | \[`unsined short` Number of frames] # THIS IS A PROBLEM 31 | \[`unsined short` Number of people] 32 | 33 | ## For every frame 34 | #### For every person: 35 | ##### For every person's component: 36 | \[`float` X] 37 | \[`float` Y] 38 | 39 | ## For every frame 40 | #### For every person: 41 | ##### For every person's component: 42 | \[`float` Confidence] 43 | -------------------------------------------------------------------------------- /docs/specs/v0.2.md: -------------------------------------------------------------------------------- 1 | # Header 2 | \[`float` Version] 3 | \[`unsigned short` width] 4 | \[`unsigned short` height] 5 | \[`unsigned short` depth] 6 | \[`unsigned short` Number of Components] 7 | 8 | ### For every component: 9 | \[`string` Component Name] 10 | \[`char[]` Format] 11 | \[`unsigned short` Number of Points] 12 | \[`unsigned short` Number of Limbs] 13 | \[`unsigned short` Number of Colors] 14 | 15 | #### For every point: 16 | \[`string` Point Name] 17 | 18 | #### For every limb: 19 | \[`unsigned short` From Point Index] 20 | \[`unsigned short` To Point Index] 21 | 22 | #### For every color: 23 | \[`unsigned short` Red] 24 | \[`unsigned short` Green] 25 | \[`unsigned short` Blue] 26 | 27 | # Body 28 | \[`float` FPS] 29 | \[`unsined int` Number of frames] 30 | \[`unsined short` Number of people] 31 | 32 | ## For every frame 33 | #### For every person: 34 | ##### For every person's component: 35 | \[`float` X] 36 | \[`float` Y] 37 | 38 | ## For every frame 39 | #### For every person: 40 | ##### For every person's component: 41 | \[`float` Confidence] 42 | -------------------------------------------------------------------------------- /docs/specs_v01.rst: -------------------------------------------------------------------------------- 1 | .. _specs_v01: 2 | 3 | specs/v0.1.md 4 | ============= 5 | 6 | .. include:: ../docs/specs/v0.1.md 7 | :parser: myst_parser.sphinx_ -------------------------------------------------------------------------------- /docs/src.rst: -------------------------------------------------------------------------------- 1 | pose_format/src 2 | ================ 3 | 4 | index.ts 5 | --------- 6 | 7 | 8 | .. literalinclude:: ../src/js/pose_format/src/index.ts 9 | :language: typescript 10 | :linenos: 11 | :caption: Description or Title for the TypeScript file 12 | 13 | 14 | parser.ts 15 | --------- 16 | 17 | .. literalinclude:: ../src/js/pose_format/src/parser.ts 18 | :language: typescript 19 | :linenos: 20 | :caption: Description or Title for the TypeScript file 21 | 22 | types.ts 23 | -------- 24 | 25 | .. literalinclude:: ../src/js/pose_format/src/types.ts 26 | :language: typescript 27 | :linenos: 28 | :caption: Description or Title for the TypeScript file -------------------------------------------------------------------------------- /docs/test_data.rst: -------------------------------------------------------------------------------- 1 | Test Data 2 | ========= 3 | 4 | All test data is stored in the repository, specifically under the path: ``test/data``. 5 | 6 | Available Test Data 7 | ------------------- 8 | 9 | Below is the list of test data available: 10 | 11 | * **Pose Files**: 12 | 13 | - ``mediapipe.pose`` 14 | - ``mediapipe_hand_normalized.pose`` 15 | - ``mediapipe_long.pose`` 16 | - ``mediapipe_long_hand_normalized.pose`` 17 | - ``openpose.pose`` 18 | 19 | * **Image Files**: 20 | 21 | - ``optical_flow.png`` 22 | -------------------------------------------------------------------------------- /docs/tests.rst: -------------------------------------------------------------------------------- 1 | .. _tests: 2 | 3 | tests package 4 | ============= 5 | 6 | The `tests` package is a part of the `pose_format` package, 7 | ensuring the robustnes of the package. It also has designed test datasets and submodules for testing. 8 | 9 | - **Test Data**: This section has the datasets used for testing the package. 10 | 11 | - **tests Submodules**: These are testing modules, each is looking into specific functionalities of the `pose_format` package. 12 | 13 | - ``tests.hand_normalization_test``: Focuses on validating the accuracy and correctness of 3D hand pose normalization techniques. 14 | 15 | - ``tests.optical_flow_test``: For tesing the implementation of optical flow algorithms. 16 | 17 | - ``tests.pose_tf_graph_mode_test``: Set of tests dedicated to test the behavior and output of TensorFlow operations when run in graph mode. 18 | 19 | - ``tests.pose_test``: A broad-spectrum testing module, probing a plethora of pose-related functionalities. It delves into standard pose operations and further evaluates their interplay with platforms like Numpy and TensorFlow. 20 | 21 | .. toctree:: 22 | :maxdepth: 3 23 | 24 | test_data 25 | tests_subs 26 | -------------------------------------------------------------------------------- /docs/tests_hand_normalization_test.rst: -------------------------------------------------------------------------------- 1 | tests.hand\_normalization\_test module 2 | ====================================== 3 | 4 | .. automodule:: tests.hand_normalization_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/tests_optical_flow.rst: -------------------------------------------------------------------------------- 1 | tests.optical\_flow\_test module 2 | ================================ 3 | 4 | .. automodule:: tests.optical_flow_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /docs/tests_pose.rst: -------------------------------------------------------------------------------- 1 | tests.pose\_test module 2 | ======================= 3 | 4 | .. automodule:: tests.pose_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /docs/tests_pose_tf.rst: -------------------------------------------------------------------------------- 1 | tests.pose\_tf\_graph\_mode\_test module 2 | ======================================== 3 | 4 | .. automodule:: tests.pose_tf_graph_mode_test 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/tests_subs.rst: -------------------------------------------------------------------------------- 1 | tests Submodules 2 | ================= 3 | 4 | .. toctree:: 5 | :maxdepth: 6 6 | 7 | tests_hand_normalization_test 8 | tests_optical_flow 9 | tests_pose_tf 10 | tests_pose -------------------------------------------------------------------------------- /docs/utils/plantuml.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/docs/utils/plantuml.jar -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/examples/__init__.py -------------------------------------------------------------------------------- /examples/pose_to_siren_to_pose.py: -------------------------------------------------------------------------------- 1 | """ 2 | pose_format_converter 3 | --------------------- 4 | A utility to convert poses using Siren neural networks and visualize the results. 5 | 6 | Modules: 7 | - numpy 8 | - pose_format 9 | - pose_format.utils.siren 10 | - pose_format.numpy 11 | - pose_format.pose_visualizer 12 | 13 | Functions: 14 | - pose_to_siren_to_pose(p: Pose, fps=None) -> Pose 15 | 16 | Example usage: 17 | $ python pose_format_converter.py 18 | """ 19 | 20 | import numpy as np 21 | from numpy import ma 22 | 23 | import pose_format.utils.siren as siren 24 | from pose_format import Pose 25 | from pose_format.numpy import NumPyPoseBody 26 | from pose_format.pose_visualizer import PoseVisualizer 27 | 28 | 29 | def pose_to_siren_to_pose(p: Pose, fps=None) -> Pose: 30 | """ 31 | Converts a given Pose object to its Siren representation and back to Pose. 32 | 33 | Parameters 34 | ---------- 35 | p : Pose 36 | Input pose to be converted. 37 | fps : int, optional 38 | Frames per second for the Siren representation. If None, uses the fps of the input Pose. 39 | 40 | Returns 41 | ------- 42 | Pose 43 | The Pose representation after converting it through the Siren neural network. 44 | 45 | """ 46 | p.body.zero_filled() 47 | mu, std = p.normalize_distribution() 48 | 49 | net = siren.get_pose_siren(p, total_steps=3000, steps_til_summary=100, learning_rate=1e-4, cuda=True) 50 | 51 | new_fps = fps if fps is not None else p.body.fps 52 | coords = siren.PoseDataset.get_coords(time=len(p.body.data) / p.body.fps, fps=new_fps) 53 | pred = net(coords).cpu().numpy() 54 | 55 | pose_body = NumPyPoseBody(fps=new_fps, data=ma.array(pred), confidence=np.ones(shape=tuple(pred.shape[:3]))) 56 | p = Pose(header=p.header, body=pose_body) 57 | p.unnormalize_distribution(mu, std) 58 | return p 59 | 60 | 61 | if __name__ == "__main__": 62 | # Example usage of the pose_to_siren_to_pose function. 63 | pose_path = "/home/nlp/amit/PhD/PoseFormat/sample-data/1.pose" 64 | 65 | buffer = open(pose_path, "rb").read() 66 | p = Pose.read(buffer) 67 | print("Poses loaded") 68 | 69 | p = pose_to_siren_to_pose(p) 70 | 71 | info = p.header.normalization_info( 72 | p1=("pose_keypoints_2d", "RShoulder"), 73 | p2=("pose_keypoints_2d", "LShoulder") 74 | ) 75 | p.normalize(info, scale_factor=300) 76 | p.focus() 77 | 78 | v = PoseVisualizer(p) 79 | v.save_video("reconstructed.mp4", v.draw(max_frames=3000)) 80 | -------------------------------------------------------------------------------- /src/js/pose_format/.gitignore: -------------------------------------------------------------------------------- 1 | dist/ 2 | node_modules/ 3 | -------------------------------------------------------------------------------- /src/js/pose_format/README.md: -------------------------------------------------------------------------------- 1 | # `pose-format` 2 | 3 | ## Publishing 4 | 5 | ```bash 6 | npm run build 7 | npm publish 8 | ``` -------------------------------------------------------------------------------- /src/js/pose_format/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "pose-format", 3 | "version": "1.5.1", 4 | "lockfileVersion": 2, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "pose-format", 9 | "version": "1.5.1", 10 | "license": "MIT", 11 | "dependencies": { 12 | "binary-parser": "2.2.1" 13 | }, 14 | "devDependencies": { 15 | "@types/node": "22.10.0", 16 | "typescript": "5.7.2" 17 | } 18 | }, 19 | "node_modules/@types/node": { 20 | "version": "22.10.0", 21 | "resolved": "https://registry.npmjs.org/@types/node/-/node-22.10.0.tgz", 22 | "integrity": "sha512-XC70cRZVElFHfIUB40FgZOBbgJYFKKMa5nb9lxcwYstFG/Mi+/Y0bGS+rs6Dmhmkpq4pnNiLiuZAbc02YCOnmA==", 23 | "dev": true, 24 | "dependencies": { 25 | "undici-types": "~6.20.0" 26 | } 27 | }, 28 | "node_modules/binary-parser": { 29 | "version": "2.2.1", 30 | "resolved": "https://registry.npmjs.org/binary-parser/-/binary-parser-2.2.1.tgz", 31 | "integrity": "sha512-5ATpz/uPDgq5GgEDxTB4ouXCde7q2lqAQlSdBRQVl/AJnxmQmhIfyxJx+0MGu//D5rHQifkfGbWWlaysG0o9NA==", 32 | "engines": { 33 | "node": ">=12" 34 | } 35 | }, 36 | "node_modules/typescript": { 37 | "version": "5.7.2", 38 | "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz", 39 | "integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==", 40 | "dev": true, 41 | "bin": { 42 | "tsc": "bin/tsc", 43 | "tsserver": "bin/tsserver" 44 | }, 45 | "engines": { 46 | "node": ">=14.17" 47 | } 48 | }, 49 | "node_modules/undici-types": { 50 | "version": "6.20.0", 51 | "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", 52 | "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", 53 | "dev": true 54 | } 55 | }, 56 | "dependencies": { 57 | "@types/node": { 58 | "version": "22.10.0", 59 | "resolved": "https://registry.npmjs.org/@types/node/-/node-22.10.0.tgz", 60 | "integrity": "sha512-XC70cRZVElFHfIUB40FgZOBbgJYFKKMa5nb9lxcwYstFG/Mi+/Y0bGS+rs6Dmhmkpq4pnNiLiuZAbc02YCOnmA==", 61 | "dev": true, 62 | "requires": { 63 | "undici-types": "~6.20.0" 64 | } 65 | }, 66 | "binary-parser": { 67 | "version": "2.2.1", 68 | "resolved": "https://registry.npmjs.org/binary-parser/-/binary-parser-2.2.1.tgz", 69 | "integrity": "sha512-5ATpz/uPDgq5GgEDxTB4ouXCde7q2lqAQlSdBRQVl/AJnxmQmhIfyxJx+0MGu//D5rHQifkfGbWWlaysG0o9NA==" 70 | }, 71 | "typescript": { 72 | "version": "5.7.2", 73 | "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz", 74 | "integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==", 75 | "dev": true 76 | }, 77 | "undici-types": { 78 | "version": "6.20.0", 79 | "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", 80 | "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", 81 | "dev": true 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/js/pose_format/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "pose-format", 3 | "version": "1.6.0", 4 | "description": "Library for handling .pose files", 5 | "scripts": { 6 | "build": "tsc", 7 | "start": "tsc -w" 8 | }, 9 | "files": [ 10 | "dist/" 11 | ], 12 | "types": "dist/index.d.ts", 13 | "main": "dist/index.js", 14 | "module": "dist/index.js", 15 | "author": "Amit Moryossef", 16 | "license": "MIT", 17 | "dependencies": { 18 | "binary-parser": "2.2.1" 19 | }, 20 | "devDependencies": { 21 | "@types/node": "22.10.0", 22 | "typescript": "5.7.2" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/js/pose_format/src/index.ts: -------------------------------------------------------------------------------- 1 | import {PoseBodyModel, PoseHeaderModel} from "./types"; 2 | import {Buffer} from "buffer"; 3 | import {parsePose} from "./parser"; 4 | import * as fs from "fs"; 5 | 6 | export * from './types'; 7 | 8 | export class Pose { 9 | constructor(public header: PoseHeaderModel, public body: PoseBodyModel) { 10 | } 11 | 12 | static from(buffer: Buffer) { 13 | const pose = parsePose(buffer); 14 | return new Pose(pose.header, pose.body); 15 | } 16 | 17 | static async fromLocal(path: string) { 18 | const buffer = fs.readFileSync(path); 19 | return Pose.from(buffer); 20 | } 21 | 22 | static async fromRemote(url: string, abortController?: AbortController) { 23 | const init: RequestInit = {}; 24 | if (abortController) { 25 | init.signal = abortController.signal; 26 | } 27 | const res = await fetch(url, init); 28 | if (!res.ok) { 29 | let message = res.statusText ?? String(res.status); 30 | try { 31 | const json = await res.json(); 32 | message = json.message; 33 | } catch (e) { 34 | } 35 | throw new Error(message); 36 | } 37 | const buffer = Buffer.from(await res.arrayBuffer()); 38 | return Pose.from(buffer); 39 | } 40 | } 41 | 42 | -------------------------------------------------------------------------------- /src/js/pose_format/src/types.d.ts: -------------------------------------------------------------------------------- 1 | export interface RGBColor { 2 | R: number; 3 | G: number; 4 | B: number; 5 | } 6 | 7 | export interface PoseLimb { 8 | from: number; 9 | to: number; 10 | } 11 | 12 | export interface PoseHeaderComponentModel { 13 | name: string; 14 | format: string; 15 | _points: number; 16 | _limbs: number; 17 | _colors: number; 18 | points: string[]; 19 | limbs: PoseLimb[], 20 | colors: RGBColor[] 21 | } 22 | 23 | export interface PoseHeaderModel { 24 | version: number, 25 | width: number, 26 | height: number, 27 | depth: number, 28 | _components: number, 29 | components: PoseHeaderComponentModel[], 30 | headerLength: number 31 | } 32 | 33 | export interface PosePointModel { 34 | X: number; 35 | Y: number; 36 | Z?: number; 37 | C?: number; 38 | } 39 | 40 | export interface PoseBodyFramePersonModel { 41 | [key: string]: PosePointModel[]; 42 | } 43 | 44 | export interface PoseBodyFrameModel { 45 | _people: number; 46 | people: PoseBodyFramePersonModel[] 47 | } 48 | 49 | export interface PoseBodyModel { 50 | fps: number, 51 | _frames: number, 52 | frames: PoseBodyFrameModel[] 53 | } 54 | 55 | export interface PoseModel { 56 | header: PoseHeaderModel, 57 | body: PoseBodyModel 58 | } -------------------------------------------------------------------------------- /src/js/pose_format/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "noImplicitReturns": true, 4 | "skipLibCheck": true, 5 | "noUnusedLocals": true, 6 | "outDir": "dist", 7 | "sourceMap": true, 8 | "declaration": true, 9 | "strict": true, 10 | "target": "es2017", 11 | "moduleResolution": "node" 12 | }, 13 | "compileOnSave": true, 14 | "files": [ 15 | "node_modules/binary-parser/dist/binary_parser.d.ts" 16 | ], 17 | "include": [ 18 | "src/**/*", 19 | "src/**/*.json" 20 | ], 21 | "exclude": [ 22 | "node_modules", 23 | "**/*.spec.ts" 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /src/js/pose_viewer/.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | charset = utf-8 7 | indent_style = space 8 | indent_size = 2 9 | end_of_line = lf 10 | insert_final_newline = true 11 | trim_trailing_whitespace = true 12 | 13 | [*.md] 14 | insert_final_newline = false 15 | trim_trailing_whitespace = false 16 | -------------------------------------------------------------------------------- /src/js/pose_viewer/.gitignore: -------------------------------------------------------------------------------- 1 | dist/ 2 | www/ 3 | loader/ 4 | 5 | *~ 6 | *.sw[mnpcod] 7 | *.log 8 | *.lock 9 | *.tmp 10 | *.tmp.* 11 | log.txt 12 | *.sublime-project 13 | *.sublime-workspace 14 | 15 | .stencil/ 16 | .idea/ 17 | .vscode/ 18 | .sass-cache/ 19 | .versions/ 20 | node_modules/ 21 | $RECYCLE.BIN/ 22 | 23 | .DS_Store 24 | Thumbs.db 25 | UserInterfaceState.xcuserstate 26 | .env 27 | -------------------------------------------------------------------------------- /src/js/pose_viewer/.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "arrowParens": "avoid", 3 | "bracketSpacing": true, 4 | "jsxBracketSameLine": false, 5 | "jsxSingleQuote": false, 6 | "quoteProps": "consistent", 7 | "printWidth": 180, 8 | "semi": true, 9 | "singleQuote": true, 10 | "tabWidth": 2, 11 | "trailingComma": "all", 12 | "useTabs": false 13 | } 14 | -------------------------------------------------------------------------------- /src/js/pose_viewer/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/js/pose_viewer/README.md: -------------------------------------------------------------------------------- 1 | # Pose Viewer 2 | 3 | ## Installation Guide 4 | 5 | 1. go to `../lib/typescript` and run `tsc` 6 | 2. `npm install` 7 | 8 | ## Usage 9 | 10 | See [Basic Example](https://github.com/AmitMY/pose-format/tree/master/pose_viewer/example) 11 | 12 | ## Publishing 13 | ```bash 14 | npm run build 15 | npm publish 16 | ``` -------------------------------------------------------------------------------- /src/js/pose_viewer/example/example.pose: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/js/pose_viewer/example/example.pose -------------------------------------------------------------------------------- /src/js/pose_viewer/example/example.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/js/pose_viewer/example/example.zip -------------------------------------------------------------------------------- /src/js/pose_viewer/example/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Example Pose Thingy 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /src/js/pose_viewer/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "pose-viewer", 3 | "version": "1.0.1", 4 | "description": "Stencil Component Starter", 5 | "homepage": "https://github.com/sign-language-processing/pose", 6 | "main": "dist/index.cjs.js", 7 | "module": "dist/index.js", 8 | "es2015": "dist/esm/index.mjs", 9 | "es2017": "dist/esm/index.mjs", 10 | "types": "dist/types/index.d.ts", 11 | "collection": "dist/collection/collection-manifest.json", 12 | "collection:main": "dist/collection/index.js", 13 | "unpkg": "dist/pose-viewer/pose-viewer.esm.js", 14 | "files": [ 15 | "dist/", 16 | "loader/" 17 | ], 18 | "scripts": { 19 | "build": "stencil build --docs --prod", 20 | "start": "stencil build --dev --watch --serve", 21 | "test": "stencil test --spec --e2e", 22 | "test.watch": "stencil test --spec --e2e --watchAll", 23 | "generate": "stencil generate", 24 | "prepare": "npm run build" 25 | }, 26 | "dependencies": { 27 | "@stencil/core": "4.26.0", 28 | "pose-format": "1.6.0", 29 | "three": "0.173.0" 30 | }, 31 | "license": "MIT", 32 | "devDependencies": { 33 | "@types/node": "22.13.4", 34 | "@types/three": "0.173.0", 35 | "rollup-plugin-node-polyfills": "0.2.1" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/js/pose_viewer/src/components/pose-viewer/pose-viewer.css: -------------------------------------------------------------------------------- 1 | :host { 2 | display: inline-block; 3 | } 4 | 5 | svg, canvas { 6 | /* required for proper resizing in mobile devices */ 7 | max-width: 100%; 8 | } 9 | 10 | svg circle { 11 | stroke: black; 12 | stroke-width: 1px; 13 | opacity: 0.8; 14 | } 15 | 16 | svg line { 17 | stroke-width: 8px; 18 | opacity: 0.8; 19 | stroke: black; 20 | } 21 | 22 | canvas { 23 | display: block; 24 | } 25 | -------------------------------------------------------------------------------- /src/js/pose_viewer/src/components/pose-viewer/readme.md: -------------------------------------------------------------------------------- 1 | # my-component 2 | 3 | 4 | 5 | 6 | 7 | 8 | ## Properties 9 | 10 | | Property | Attribute | Description | Type | Default | 11 | | -------------- | --------------- | ----------- | ------------------------------------ | ----------- | 12 | | `aspectRatio` | `aspect-ratio` | | `number` | `null` | 13 | | `autoplay` | `autoplay` | | `boolean` | `true` | 14 | | `background` | `background` | | `string` | `null` | 15 | | `currentTime` | `current-time` | | `number` | `NaN` | 16 | | `duration` | `duration` | | `number` | `NaN` | 17 | | `ended` | `ended` | | `boolean` | `false` | 18 | | `height` | `height` | | `string` | `null` | 19 | | `loop` | `loop` | | `boolean` | `false` | 20 | | `padding` | `padding` | | `string` | `null` | 21 | | `paused` | `paused` | | `boolean` | `true` | 22 | | `playbackRate` | `playback-rate` | | `number` | `1` | 23 | | `readyState` | `ready-state` | | `number` | `0` | 24 | | `renderer` | `renderer` | | `"canvas" \| "interactive" \| "svg"` | `'canvas'` | 25 | | `src` | `src` | | `Buffer \| string` | `undefined` | 26 | | `thickness` | `thickness` | | `number` | `null` | 27 | | `width` | `width` | | `string` | `null` | 28 | 29 | 30 | ## Events 31 | 32 | | Event | Description | Type | 33 | | ----------------- | ----------- | ------------------- | 34 | | `canplaythrough$` | | `CustomEvent` | 35 | | `ended$` | | `CustomEvent` | 36 | | `firstRender$` | | `CustomEvent` | 37 | | `loadeddata$` | | `CustomEvent` | 38 | | `loadedmetadata$` | | `CustomEvent` | 39 | | `loadstart$` | | `CustomEvent` | 40 | | `pause$` | | `CustomEvent` | 41 | | `play$` | | `CustomEvent` | 42 | | `render$` | | `CustomEvent` | 43 | 44 | 45 | ## Methods 46 | 47 | ### `getPose() => Promise` 48 | 49 | 50 | 51 | #### Returns 52 | 53 | Type: `Promise` 54 | 55 | 56 | 57 | ### `nextFrame() => Promise` 58 | 59 | 60 | 61 | #### Returns 62 | 63 | Type: `Promise` 64 | 65 | 66 | 67 | ### `pause() => Promise` 68 | 69 | 70 | 71 | #### Returns 72 | 73 | Type: `Promise` 74 | 75 | 76 | 77 | ### `play() => Promise` 78 | 79 | 80 | 81 | #### Returns 82 | 83 | Type: `Promise` 84 | 85 | 86 | 87 | ### `syncMedia(media: HTMLMediaElement) => Promise` 88 | 89 | 90 | 91 | #### Parameters 92 | 93 | | Name | Type | Description | 94 | | ------- | ------------------ | ----------- | 95 | | `media` | `HTMLMediaElement` | | 96 | 97 | #### Returns 98 | 99 | Type: `Promise` 100 | 101 | 102 | 103 | 104 | ---------------------------------------------- 105 | 106 | *Built with [StencilJS](https://stenciljs.com/)* 107 | -------------------------------------------------------------------------------- /src/js/pose_viewer/src/components/pose-viewer/renderers/canvas.pose-renderer.tsx: -------------------------------------------------------------------------------- 1 | import {PoseBodyFrameModel, PosePointModel, RGBColor} from "pose-format"; 2 | import {PoseRenderer} from "./pose-renderer"; 3 | import {h} from "@stencil/core"; 4 | 5 | 6 | export class CanvasPoseRenderer extends PoseRenderer { 7 | ctx!: CanvasRenderingContext2D; 8 | thickness!: number; 9 | 10 | renderJoint(_: number, joint: PosePointModel, color: RGBColor) { 11 | const {R, G, B} = color; 12 | this.ctx.strokeStyle = `rgba(0, 0, 0, 0)`; 13 | this.ctx.fillStyle = `rgba(${R}, ${G}, ${B}, ${joint.C})`; 14 | 15 | const radius = Math.round(this.thickness / 3); 16 | this.ctx.beginPath(); 17 | this.ctx.arc(this.x(joint.X), this.y(joint.Y), radius, 0, 2 * Math.PI); 18 | this.ctx.fill(); 19 | this.ctx.stroke(); 20 | } 21 | 22 | renderLimb(from: PosePointModel, to: PosePointModel, color: RGBColor) { 23 | const {R, G, B} = color; 24 | 25 | this.ctx.lineWidth = this.thickness * 5/4; 26 | this.ctx.strokeStyle = `rgba(${R}, ${G}, ${B}, ${(from.C + to.C) / 2})`; 27 | 28 | this.ctx.beginPath(); 29 | this.ctx.moveTo(this.x(from.X), this.y(from.Y)); 30 | this.ctx.lineTo(this.x(to.X), this.y(to.Y)); 31 | this.ctx.stroke(); 32 | } 33 | 34 | render(frame: PoseBodyFrameModel) { 35 | const drawCanvas = () => { 36 | const canvas = this.viewer.element.shadowRoot.querySelector('canvas'); 37 | if (canvas) { 38 | // TODO: this should be unnecessary, but stencil doesn't apply attributes 39 | canvas.width = this.viewer.elWidth; 40 | canvas.height = this.viewer.elHeight; 41 | 42 | this.ctx = canvas.getContext('2d'); 43 | 44 | if (this.viewer.background) { 45 | this.ctx.fillStyle = this.viewer.background; 46 | this.ctx.fillRect(0, 0, canvas.width, canvas.height); 47 | } else { 48 | this.ctx.clearRect(0, 0, canvas.width, canvas.height); 49 | } 50 | 51 | const w = this.viewer.elWidth - 2 * this.viewer.elPadding.width; 52 | const h = this.viewer.elHeight - 2 * this.viewer.elPadding.height; 53 | this.thickness = this.viewer.thickness ?? Math.round(Math.sqrt(w * h) / 150); 54 | this.renderFrame(frame); 55 | } else { 56 | throw new Error("Canvas isn't available before first render") 57 | } 58 | }; 59 | 60 | try { 61 | drawCanvas(); 62 | } catch (e) { 63 | requestAnimationFrame(drawCanvas) 64 | } 65 | 66 | 67 | return ( 68 | 69 | ) 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/js/pose_viewer/src/components/pose-viewer/renderers/pose-renderer.tsx: -------------------------------------------------------------------------------- 1 | import {PoseViewer} from "../pose-viewer"; 2 | import {PoseBodyFrameModel, PoseLimb, PosePointModel, RGBColor} from "pose-format"; 3 | 4 | export abstract class PoseRenderer { 5 | 6 | constructor(protected viewer: PoseViewer) { 7 | } 8 | 9 | x(v: number) { 10 | const n = v * (this.viewer.elWidth - 2 * this.viewer.elPadding.width); 11 | return n / this.viewer.pose.header.width + this.viewer.elPadding.width; 12 | } 13 | 14 | y(v: number) { 15 | const n = v * (this.viewer.elHeight - 2 * this.viewer.elPadding.height); 16 | return n / this.viewer.pose.header.height + this.viewer.elPadding.height; 17 | } 18 | 19 | isJointValid(joint: PosePointModel) { 20 | return joint.C > 0; 21 | } 22 | 23 | abstract renderJoint(i: number, joint: PosePointModel, color: RGBColor); 24 | 25 | renderJoints(joints: PosePointModel[], colors: RGBColor[]) { 26 | return joints 27 | .filter(this.isJointValid.bind(this)) 28 | .map((joint, i) => { 29 | return this.renderJoint(i, joint, colors[i % colors.length]); 30 | }); 31 | } 32 | 33 | abstract renderLimb(from: PosePointModel, to: PosePointModel, color: RGBColor); 34 | 35 | renderLimbs(limbs: PoseLimb[], joints: PosePointModel[], colors: RGBColor[]) { 36 | /** 37 | This implementation is a bit different from the python one. 38 | In python, we sort all limbs of all people and all components by depth and then render them. 39 | Here, we only sort the limbs of the current component by depth. 40 | */ 41 | 42 | const lines = limbs.map(({from, to}) => { 43 | const a = joints[from]; 44 | const b = joints[to]; 45 | if (!this.isJointValid(a) || !this.isJointValid(b)) { 46 | return null; 47 | } 48 | 49 | const c1 = colors[from % colors.length]; 50 | const c2 = colors[to % colors.length]; 51 | const color = { 52 | R: (c1.R + c2.R) / 2, 53 | G: (c1.G + c2.G) / 2, 54 | B: (c1.B + c2.B) / 2, 55 | }; 56 | 57 | return {from: a, to: b, color, z: (a.Z + b.Z) / 2}; 58 | }); 59 | 60 | return lines 61 | .filter(Boolean) // Remove invalid lines 62 | .sort((a, b) => b.z - a.z) // Sort lines by depth 63 | .map(({from, to, color}) => this.renderLimb(from, to, color)); 64 | } 65 | 66 | renderFrame(frame: PoseBodyFrameModel) { 67 | return frame.people.map(person => this.viewer.pose.header.components.map(component => { 68 | const joints = person[component.name]; 69 | return [ 70 | this.renderJoints(joints, component.colors), 71 | this.renderLimbs(component.limbs, joints, component.colors), 72 | ] 73 | })) 74 | } 75 | 76 | abstract render(frame: PoseBodyFrameModel); 77 | } 78 | -------------------------------------------------------------------------------- /src/js/pose_viewer/src/components/pose-viewer/renderers/svg.pose-renderer.tsx: -------------------------------------------------------------------------------- 1 | import {PoseBodyFrameModel, PosePointModel, RGBColor} from "pose-format"; 2 | import {PoseRenderer} from "./pose-renderer"; 3 | import {h} from '@stencil/core'; 4 | 5 | export class SVGPoseRenderer extends PoseRenderer { 6 | 7 | renderJoint(i: number, joint: PosePointModel, color: RGBColor) { 8 | const {R, G, B} = color; 9 | 10 | return ( 20 | ); 21 | } 22 | 23 | renderLimb(from: PosePointModel, to: PosePointModel, color: RGBColor) { 24 | const {R, G, B} = color; 25 | 26 | return ( 35 | ); 36 | } 37 | 38 | render(frame: PoseBodyFrameModel) { 39 | const viewBox = `0 0 ${this.viewer.pose.header.width} ${this.viewer.pose.header.height}`; 40 | return ( 41 | 45 | 46 | {this.renderFrame(frame)} 47 | 48 | 49 | ) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/js/pose_viewer/src/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Stencil Component Starter 7 | 8 | 9 | 10 | 11 | 12 | 13 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /src/js/pose_viewer/src/index.ts: -------------------------------------------------------------------------------- 1 | export { Components, JSX } from './components'; 2 | export { PoseViewer } from "./components/pose-viewer/pose-viewer"; 3 | -------------------------------------------------------------------------------- /src/js/pose_viewer/src/sample-data/example.pose: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/js/pose_viewer/src/sample-data/example.pose -------------------------------------------------------------------------------- /src/js/pose_viewer/stencil.config.ts: -------------------------------------------------------------------------------- 1 | import { Config } from '@stencil/core'; 2 | import nodePolyfills from 'rollup-plugin-node-polyfills'; 3 | 4 | export const config: Config = { 5 | namespace: 'pose-viewer', 6 | buildEs5: false, 7 | plugins: [ 8 | nodePolyfills(), 9 | ], 10 | extras: { 11 | enableImportInjection: true, 12 | }, 13 | outputTargets: [ 14 | { 15 | type: 'dist', 16 | esmLoaderPath: '../loader', 17 | }, 18 | { 19 | type: 'docs-readme', 20 | }, 21 | { 22 | type: 'www', 23 | serviceWorker: null, // disable service workers 24 | }, 25 | { 26 | type: 'dist-custom-elements', 27 | externalRuntime: false, 28 | }, 29 | ], 30 | }; 31 | -------------------------------------------------------------------------------- /src/js/pose_viewer/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowSyntheticDefaultImports": true, 4 | "allowUnreachableCode": false, 5 | "declaration": false, 6 | "experimentalDecorators": true, 7 | "lib": [ 8 | "dom", 9 | "es2017" 10 | ], 11 | "moduleResolution": "node", 12 | "module": "esnext", 13 | "target": "es2017", 14 | "noUnusedLocals": true, 15 | "noUnusedParameters": true, 16 | "jsx": "react", 17 | "jsxFactory": "h" 18 | }, 19 | "include": [ 20 | "src" 21 | ], 22 | "exclude": [ 23 | "node_modules" 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /src/python/.gitignore: -------------------------------------------------------------------------------- 1 | .pytest_cache 2 | __pycache__ 3 | pose_format.egg-info 4 | dist 5 | build -------------------------------------------------------------------------------- /src/python/ComfyUI-Pose-Format/__init__.py: -------------------------------------------------------------------------------- 1 | from .nodes import PoseLoader 2 | 3 | NODE_CLASS_MAPPINGS = { 4 | "PoseLoader": PoseLoader, 5 | } 6 | 7 | NODE_DISPLAY_NAME_MAPPINGS = { 8 | "PoseLoader": "Pose Loader" 9 | } -------------------------------------------------------------------------------- /src/python/ComfyUI-Pose-Format/nodes.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 4 | import torch 5 | from pose_format.pose import Pose 6 | from pose_format.pose_visualizer import PoseVisualizer 7 | from pose_format.utils.generic import reduce_holistic 8 | from pose_format.utils.openpose import OpenPose_Components 9 | from pose_format.utils.pose_converter import convert_pose 10 | 11 | try: 12 | import folder_paths 13 | except ImportError as e: 14 | raise ImportError("Please make sure to run this node in ComfyUI Context.") 15 | 16 | 17 | class PoseLoader: 18 | @classmethod 19 | def INPUT_TYPES(s): 20 | input_dir = folder_paths.get_input_directory() 21 | files = [] 22 | for f in os.listdir(input_dir): 23 | if os.path.isfile(os.path.join(input_dir, f)): 24 | file_parts = f.split('.') 25 | if len(file_parts) > 1 and file_parts[-1].lower() == "pose": 26 | files.append(f) 27 | 28 | return { 29 | "required": { 30 | "file": (sorted(files),), 31 | "is_reduce_holistic": ("BOOLEAN", {"default": False, "input": True}), 32 | "is_convert_to_openpose": ("BOOLEAN", {"default": False}), 33 | "thickness": ("INT", {"default": 1, "min": 1, "max": 10}) 34 | } 35 | } 36 | 37 | CATEGORY = "Pose Helper Suite 🕺" 38 | 39 | RETURN_TYPES = ("STRING", "INT", "FLOAT", "INT", "INT", "IMAGE") 40 | RETURN_NAMES = ("file_path", "num_frames", "fps", "width", "height", "frames") 41 | 42 | FUNCTION = "run" 43 | 44 | def run(self, file, is_reduce_holistic, is_convert_to_openpose, thickness): 45 | pose_file = folder_paths.get_annotated_filepath(file) 46 | 47 | if not os.path.exists(pose_file): 48 | raise ValueError(f"File {pose_file} does not exist") 49 | 50 | # Load Pose file 51 | with open(pose_file, "rb") as f: 52 | pose = Pose.read(f.read()) 53 | 54 | if is_reduce_holistic: 55 | pose = reduce_holistic(pose) 56 | 57 | if is_convert_to_openpose: 58 | pose = convert_pose(pose, OpenPose_Components) 59 | pose.header.components[1].colors = [(255, 255, 255)] 60 | pose.header.components[0].colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], 61 | [85, 255, 0], [0, 255, 0], \ 62 | [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], 63 | [0, 0, 255], [85, 0, 255], \ 64 | [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] 65 | print("thickness", thickness) 66 | print(pose.body.fps, pose.header.dimensions.width, pose.header.dimensions.height) 67 | 68 | visualizer = PoseVisualizer(pose, thickness=thickness) 69 | frames = visualizer.draw(background_color=(0, 0, 0)) 70 | frames_rgb = (cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in frames) 71 | frames_torch = [torch.from_numpy(f).float() / 255 for f in frames_rgb] 72 | 73 | return (pose_file, 74 | len(pose.body.data), 75 | pose.body.fps, 76 | pose.header.dimensions.width, 77 | pose.header.dimensions.height, 78 | frames_torch) 79 | -------------------------------------------------------------------------------- /src/python/README.md: -------------------------------------------------------------------------------- 1 | # Pose Format 2 | 3 | ## Publishing 4 | ```bash 5 | pip install --upgrade build twine pyopenssl cryptography requests-toolbelt 6 | rm -rf dist 7 | python3 -m build 8 | python3 -m twine upload dist/* 9 | ``` -------------------------------------------------------------------------------- /src/python/pose_format/BUILD: -------------------------------------------------------------------------------- 1 | # This library includes utilities to handle pose data 2 | 3 | licenses(["notice"]) 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | py_library( 8 | name = "pose", 9 | srcs = ["pose.py"], 10 | srcs_version = "PY3", 11 | visibility = ["//visibility:public"], 12 | deps = [ 13 | ":pose_body", 14 | ":pose_header", 15 | "//third_party/py/numpy", 16 | "//numpy:pose_body", 17 | "//utils:fast_math", 18 | "//utils:reader", 19 | ], 20 | ) 21 | 22 | py_library( 23 | name = "pose_body", 24 | srcs = ["pose_body.py"], 25 | srcs_version = "PY3", 26 | visibility = ["//visibility:public"], 27 | deps = [ 28 | ":pose_header", 29 | "//third_party/py/numpy", 30 | "//utils:reader", 31 | ], 32 | ) 33 | 34 | py_library( 35 | name = "pose_header", 36 | srcs = ["pose_header.py"], 37 | srcs_version = "PY3", 38 | visibility = ["//visibility:public"], 39 | deps = [ 40 | "//utils:reader", 41 | ], 42 | ) 43 | 44 | py_library( 45 | name = "pose_visualizer", 46 | srcs = ["pose_visualizer.py"], 47 | srcs_version = "PY3", 48 | visibility = ["//visibility:public"], 49 | deps = [ 50 | ":pose", 51 | "//third_party/OpenCVX:core", 52 | "//third_party/OpenCVX:video", 53 | "//third_party/py/numpy", 54 | "//third_party/py/tqdm", 55 | ], 56 | ) 57 | 58 | py_library( 59 | name = "pose_representation", 60 | srcs = ["pose_representation.py"], 61 | srcs_version = "PY3", 62 | visibility = ["//visibility:public"], 63 | deps = [ 64 | ":pose_header", 65 | ], 66 | ) 67 | 68 | py_test( 69 | name = "pose_test", 70 | srcs = ["pose_test.py"], 71 | python_version = "PY3", 72 | srcs_version = "PY3", 73 | visibility = ["//visibility:public"], 74 | deps = [ 75 | ":pose", 76 | ], 77 | ) 78 | -------------------------------------------------------------------------------- /src/python/pose_format/WORKSPACE: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/pose_format/WORKSPACE -------------------------------------------------------------------------------- /src/python/pose_format/__init__.py: -------------------------------------------------------------------------------- 1 | from pose_format.pose import Pose 2 | from pose_format.pose_body import PoseBody 3 | from pose_format.pose_header import PoseHeader 4 | -------------------------------------------------------------------------------- /src/python/pose_format/bin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/pose_format/bin/__init__.py -------------------------------------------------------------------------------- /src/python/pose_format/bin/pose_estimation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse 3 | import os 4 | 5 | import cv2 6 | from pose_format.utils.holistic import load_holistic 7 | 8 | 9 | def load_video_frames(cap: cv2.VideoCapture): 10 | while True: 11 | ret, frame = cap.read() 12 | if not ret: 13 | break 14 | yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 15 | cap.release() 16 | 17 | 18 | def pose_video(input_path: str, output_path: str, format: str, additional_config: dict = {'model_complexity': 1}, progress: bool = True): 19 | # Load video frames 20 | print('Loading video ...') 21 | cap = cv2.VideoCapture(input_path) 22 | width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 23 | height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) 24 | fps = cap.get(cv2.CAP_PROP_FPS) 25 | frames = load_video_frames(cap) 26 | 27 | # Perform pose estimation 28 | print('Estimating pose ...') 29 | if format == 'mediapipe': 30 | pose = load_holistic(frames, 31 | fps=fps, 32 | width=width, 33 | height=height, 34 | progress=progress, 35 | additional_holistic_config=additional_config) 36 | else: 37 | raise NotImplementedError('Pose format not supported') 38 | 39 | # Write 40 | print('Saving to disk ...') 41 | with open(output_path, "wb") as f: 42 | pose.write(f) 43 | 44 | 45 | def parse_additional_config(config: str): 46 | if not config: 47 | return {} 48 | config = config.split(',') 49 | 50 | def parse_value(value): 51 | try: 52 | return int(value) 53 | except ValueError: 54 | pass 55 | try: 56 | return float(value) 57 | except ValueError: 58 | pass 59 | if value.lower() == 'true': 60 | return True 61 | if value.lower() == 'false': 62 | return False 63 | return value 64 | 65 | return {k: parse_value(v) for k, v in [c.split('=') for c in config]} 66 | 67 | 68 | def main(): 69 | parser = argparse.ArgumentParser() 70 | parser.add_argument('-i', required=True, type=str, help='path to input video file') 71 | parser.add_argument('-o', required=True, type=str, help='path to output pose file') 72 | parser.add_argument('--format', 73 | choices=['mediapipe'], 74 | default='mediapipe', 75 | type=str, 76 | help='type of pose estimation to use') 77 | parser.add_argument('--additional-config', type=str, help='additional configuration for the pose estimator') 78 | 79 | args = parser.parse_args() 80 | 81 | if not os.path.exists(args.i): 82 | raise FileNotFoundError(f"Video file {args.i} not found") 83 | 84 | additional_config = parse_additional_config(args.additional_config) 85 | pose_video(args.i, args.o, args.format, additional_config) 86 | 87 | # pip install . && video_to_pose -i como.mp4 -o como1.pose --format mediapipe 88 | # pip install . && video_to_pose -i como.mp4 -o como2.pose --format mediapipe --additional-config="model_complexity=2,smooth_landmarks=false,refine_face_landmarks=true" 89 | # pip install . && video_to_pose -i sparen.mp4 -o sparen.pose --format mediapipe --additional-config="model_complexity=2,smooth_landmarks=false,refine_face_landmarks=true" 90 | -------------------------------------------------------------------------------- /src/python/pose_format/bin/pose_info.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse 3 | import os 4 | from pose_format.pose import Pose 5 | 6 | 7 | 8 | 9 | def pose_info(input_path: str): 10 | with open(input_path, "rb") as f: 11 | pose = Pose.read(f.read()) 12 | 13 | print(pose) 14 | 15 | 16 | 17 | def main(): 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument('-i', required=True, type=str, help='path to input pose file') 20 | 21 | args = parser.parse_args() 22 | 23 | if not os.path.exists(args.i): 24 | raise FileNotFoundError(f"Pose file {args.i} not found") 25 | 26 | pose_info(args.i) 27 | -------------------------------------------------------------------------------- /src/python/pose_format/bin/pose_visualizer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import os 5 | 6 | from pose_format.pose import Pose 7 | from pose_format.pose_visualizer import PoseVisualizer 8 | from pose_format.utils.generic import pose_normalization_info 9 | 10 | from pose_format.utils.generic import normalize_pose_size 11 | 12 | 13 | def visualize_pose(pose_path: str, video_path: str, normalize=False): 14 | with open(pose_path, "rb") as f: 15 | pose = Pose.read(f.read()) 16 | 17 | if normalize: 18 | pose = pose.normalize(pose_normalization_info(pose.header)) 19 | normalize_pose_size(pose) 20 | 21 | v = PoseVisualizer(pose) 22 | 23 | v.save_video(video_path, v.draw()) 24 | 25 | 26 | def main(): 27 | parser = argparse.ArgumentParser() 28 | parser.add_argument('-i', required=True, type=str, help='path to input pose file') 29 | parser.add_argument('-o', required=True, type=str, help='path to output video file') 30 | parser.add_argument('--normalize', action='store_true', help='Normalize pose before visualization') 31 | 32 | args = parser.parse_args() 33 | 34 | if not os.path.exists(args.i): 35 | raise FileNotFoundError(f"Pose file {args.i} not found") 36 | 37 | visualize_pose(args.i, args.o, args.normalize) 38 | -------------------------------------------------------------------------------- /src/python/pose_format/numpy/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | py_library( 4 | name = "pose_body", 5 | srcs = ["pose_body.py"], 6 | srcs_version = "PY3", 7 | visibility = ["//visibility:public"], 8 | deps = [ 9 | "//third_party/py/numpy", 10 | "//:pose_body", 11 | "//:pose_header", 12 | "//torch:pose_body", 13 | "//utils:reader", 14 | "//third_party/py/scipy", 15 | "//third_party/py/torch:pytorch", 16 | ], 17 | ) 18 | -------------------------------------------------------------------------------- /src/python/pose_format/numpy/__init__.py: -------------------------------------------------------------------------------- 1 | from .pose_body import NumPyPoseBody 2 | -------------------------------------------------------------------------------- /src/python/pose_format/numpy/representation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/pose_format/numpy/representation/__init__.py -------------------------------------------------------------------------------- /src/python/pose_format/numpy/representation/distance.py: -------------------------------------------------------------------------------- 1 | import numpy.ma as ma 2 | 3 | 4 | class DistanceRepresentation: 5 | """ 6 | A class to compute the Euclidean distance between two sets of points. 7 | """ 8 | 9 | def distance(self, p1s: ma.MaskedArray, p2s: ma.MaskedArray) -> ma.MaskedArray: 10 | """ 11 | Compute the Euclidean distance between two sets of points. 12 | 13 | Parameters 14 | ---------- 15 | p1s : ma.MaskedArray 16 | First set of points. 17 | p2s : ma.MaskedArray 18 | Second set of points. 19 | 20 | Returns 21 | ------- 22 | ma.MaskedArray 23 | Euclidean distances between the two sets of points. The returned array has one fewer dimension than the input arrays, as the distance calculation collapses the last dimension. 24 | 25 | Note 26 | ---- 27 | this method assumes that input arrays `p1s` and `p2s` have same shape. 28 | """ 29 | diff = p1s - p2s 30 | square = ma.power(diff, 2) 31 | sum_squares = square.sum(axis=-1) 32 | sqrt = ma.sqrt(sum_squares).filled(0) 33 | return sqrt 34 | 35 | def __call__(self, p1s: ma.MaskedArray, p2s: ma.MaskedArray) -> ma.MaskedArray: 36 | """ 37 | For `distance` method to compute Euclidean distance between two points. 38 | 39 | Parameters 40 | ---------- 41 | p1s : ma.MaskedArray, shape (Points, Batch, Len, Dims) 42 | First set of points. 43 | p2s : ma.MaskedArray, shape (Points, Batch, Len, Dims) 44 | Second set of points. 45 | 46 | Returns 47 | ------- 48 | ma.MaskedArray, shape (Points, Batch, Len) 49 | Euclidean distances between the two sets of points. 50 | """ 51 | return self.distance(p1s, p2s) 52 | -------------------------------------------------------------------------------- /src/python/pose_format/numpy/representation/distance_test.py: -------------------------------------------------------------------------------- 1 | import math 2 | from unittest import TestCase 3 | 4 | import numpy as np 5 | import numpy.ma as ma 6 | 7 | from pose_format.numpy.representation.distance import DistanceRepresentation 8 | 9 | representation = DistanceRepresentation() 10 | 11 | 12 | class TestDistanceRepresentation(TestCase): 13 | """ 14 | Unit tests for DistanceRepresentation class to computes Euclidean distance between sets of 3D points. 15 | """ 16 | 17 | def test_call_value_should_be_distance(self): 18 | """ 19 | Test if computed distance between two points is correct. 20 | 21 | Note 22 | ---- 23 | This test initializes two sets of 3D points: 24 | p1 = (1, 2, 3) and p2 = (4, 5, 6). It then checks if the 25 | computed Euclidean distance between p1 and p2 is sqrt(27). 26 | """ 27 | 28 | p1s = ma.array([[[[1, 2, 3]]]], dtype=np.float32) 29 | p2s = ma.array([[[[4, 5, 6]]]], dtype=np.float32) 30 | distances = representation(p1s, p2s) 31 | self.assertAlmostEqual(float(distances[0][0][0]), math.sqrt(27), places=6) 32 | 33 | def test_call_masked_value_should_be_zero(self): 34 | """ 35 | Test if the distance for masked values is zero. 36 | 37 | Note 38 | ---- 39 | This test checks scenario where one set of points 40 | is masked.The computed distance in such in such a case be 0. 41 | """ 42 | 43 | mask = [[[[True, True, True]]]] 44 | p1s = ma.array([[[[1, 2, 3]]]], mask=mask, dtype=np.float32) 45 | p2s = ma.array([[[[4, 5, 6]]]], dtype=np.float32) 46 | distances = representation(p1s, p2s) 47 | self.assertEqual(float(distances[0][0][0]), 0) 48 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | py_library( 4 | name = "pose_representation", 5 | srcs = ["pose_representation.py"], 6 | srcs_version = "PY3", 7 | visibility = ["//visibility:public"], 8 | deps = [ 9 | "//:pose_representation", 10 | "//third_party/py/tensorflow", 11 | ], 12 | ) 13 | 14 | py_test( 15 | name = "pose_representation_test", 16 | srcs = ["pose_representation_test.py"], 17 | python_version = "PY3", 18 | srcs_version = "PY3", 19 | visibility = ["//visibility:public"], 20 | deps = [ 21 | ":pose_representation", 22 | "//testing/pybase", 23 | "//:pose_header", 24 | "//:pose_representation", 25 | "//tensorflow/representation:angle", 26 | "//tensorflow/representation:distance", 27 | "//tensorflow/representation:inner_angle", 28 | "//tensorflow/representation:point_line_distance", 29 | "//utils:openpose", 30 | "//third_party/py/tensorflow", 31 | ], 32 | ) 33 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/pose_format/tensorflow/__init__.py -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/masked/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | py_library( 4 | name = "tensor", 5 | srcs = ["tensor.py"], 6 | srcs_version = "PY3", 7 | visibility = ["//visibility:public"], 8 | deps = [ 9 | "//third_party/py/tensorflow", 10 | ], 11 | ) 12 | 13 | py_library( 14 | name = "tensorflow", 15 | srcs = ["tensorflow.py"], 16 | srcs_version = "PY3", 17 | visibility = ["//visibility:public"], 18 | deps = [ 19 | ":tensor", 20 | "//third_party/py/tensorflow", 21 | ], 22 | ) 23 | 24 | py_test( 25 | name = "tensorflow_test", 26 | srcs = ["tensorflow_test.py"], 27 | python_version = "PY3", 28 | srcs_version = "PY3", 29 | visibility = ["//visibility:public"], 30 | deps = [ 31 | ":tensor", 32 | ":tensorflow", 33 | "//testing/pybase", 34 | ], 35 | ) 36 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/masked/__init__.py: -------------------------------------------------------------------------------- 1 | from pose_format.tensorflow.masked.tensor import MaskedTensor 2 | from pose_format.tensorflow.masked.tensorflow import MaskedTensorflow 3 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/masked/tensor_graph_mode_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | import tensorflow as tf 4 | 5 | from pose_format.tensorflow.masked.tensor import MaskedTensor 6 | from pose_format.tensorflow.masked.tensor_test import \ 7 | create_random_numpy_tensor_and_mask 8 | 9 | 10 | class TestMaskedTensor(TestCase): 11 | """ 12 | Unit tests for `MaskedTensor` class in TensorFlow. 13 | """ 14 | 15 | def test_float_graph_execution_fails(self): 16 | """ 17 | Test if attempting to convert a MaskedTensor to float raises a TypeError during graph execution. 18 | 19 | Note 20 | ---- 21 | This test ensures that if a user attempts to convert a MaskedTensor instance 22 | to a floating point number during TensorFlow graph execution, 23 | a TypeError should be raised. 24 | """ 25 | with tf.Graph().as_default(): 26 | 27 | assert tf.executing_eagerly() is False 28 | 29 | input_shape = (1,) 30 | 31 | tensor, mask = create_random_numpy_tensor_and_mask(shape=input_shape, probability_for_masked=0.1) 32 | 33 | masked_tf = MaskedTensor(tensor=tf.constant(tensor), mask=tf.constant(mask)) 34 | 35 | with self.assertRaises(TypeError): 36 | float(masked_tf) 37 | 38 | def test_eq_graph_execution(self): 39 | """ 40 | Test the equality operation on a MaskedTensor during graph execution. 41 | 42 | Note 43 | ---- 44 | This test evaluates if the MaskedTensor can be compared with a 45 | float number during TensorFlow graph execution without any errors. 46 | """ 47 | 48 | with tf.Graph().as_default(): 49 | 50 | assert tf.executing_eagerly() is False 51 | 52 | input_shape = (7, 6) 53 | 54 | tensor, mask = create_random_numpy_tensor_and_mask(shape=input_shape, probability_for_masked=0.1) 55 | 56 | masked_tf = MaskedTensor(tensor=tf.constant(tensor), mask=tf.constant(mask)) 57 | 58 | _ = masked_tf == 6.0 59 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/masked/tensorflow.py: -------------------------------------------------------------------------------- 1 | from typing import List, Union 2 | 3 | import tensorflow 4 | 5 | from pose_format.tensorflow.masked.tensor import MaskedTensor 6 | 7 | 8 | class TensorflowFallback(type): 9 | """A metaclass for managing the fallback operations on MaskedTensors with Tensorflow functions.""" 10 | 11 | doesnt_change_mask = {"sqrt", "square", "cos", "sin", "tan", "acos", "asin", "atan"} 12 | 13 | def __getattr__(cls, attr): 14 | """ 15 | to return Tensorflow functions that can work on MaskedTensors. 16 | 17 | Parameters 18 | ---------- 19 | attr : str 20 | Tensorflow function name 21 | 22 | Returns 23 | ------- 24 | function 25 | function that can handle both MaskedTensor and regular/unmasked Tensorflow Tensor objects. 26 | """ 27 | 28 | def func(*args, **kwargs): 29 | if len(args) > 0 and isinstance(args[0], MaskedTensor): 30 | args = list(args) 31 | mask = args[0].mask 32 | args[0] = args[0].tensor 33 | 34 | res = getattr(tensorflow, attr)(*args, **kwargs) 35 | if attr in TensorflowFallback.doesnt_change_mask: 36 | return MaskedTensor(res, mask) 37 | else: 38 | return res 39 | 40 | else: # If this action is done on an unmasked tensor 41 | return getattr(tensorflow, attr)(*args, **kwargs) 42 | 43 | return func 44 | 45 | 46 | class MaskedTensorflow(metaclass=TensorflowFallback): 47 | """ 48 | Class that performs Tensorflow operations on MaskedTensors. 49 | It uses the TensorflowFallback metaclass to handle functions not explicitly defined in this class. 50 | """ 51 | 52 | @staticmethod 53 | def concat(tensors: List[Union[MaskedTensor, tensorflow.Tensor]], axis: int) -> MaskedTensor: 54 | """ 55 | Concatenates a list of tensors along a specified axis. 56 | 57 | Parameters 58 | ---------- 59 | tensors : list 60 | List of MaskedTensor or tensorflow.Tensor objects. 61 | axis : int 62 | The axis along which to concatenate the tensors. 63 | 64 | Returns 65 | ------- 66 | :class:`~pose_format.tensorflow.masked.tensor.MaskedTensor` 67 | concatenated Maskedtensor 68 | """ 69 | tensors: List[MaskedTensor] = [t if isinstance(t, MaskedTensor) else MaskedTensor(tensor=t) for t in tensors] 70 | tensor = tensorflow.concat([t.tensor for t in tensors], axis=axis) 71 | mask = tensorflow.concat([t.mask for t in tensors], axis=axis) 72 | return MaskedTensor(tensor=tensor, mask=mask) 73 | 74 | @staticmethod 75 | def stack(tensors: List[MaskedTensor], axis: int) -> MaskedTensor: 76 | """ 77 | Stacks a list of tensors along a specified axis. 78 | 79 | Parameters 80 | ---------- 81 | tensors : list 82 | List of MaskedTensor objects. 83 | axis : int 84 | The axis along which to stack the tensors. 85 | 86 | Returns 87 | ------- 88 | :class:`~pose_format.tensorflow.masked.tensor.MaskedTensor` 89 | masekd stacked tensor. 90 | """ 91 | tensor = tensorflow.stack([t.tensor for t in tensors], axis=axis) 92 | mask = tensorflow.stack([t.mask for t in tensors], axis=axis) 93 | return MaskedTensor(tensor=tensor, mask=mask) 94 | 95 | @staticmethod 96 | def zeros(size, dtype=tensorflow.float32) -> MaskedTensor: 97 | """ 98 | Returns a MaskedTensor of zeros with the specified size and dtype. 99 | 100 | Parameters 101 | ---------- 102 | size : tuple 103 | The shape of the output tensor. 104 | dtype : tensorflow datatype, optional 105 | The datatype of the output tensor, default is tensorflow.float32. 106 | 107 | Returns 108 | ------- 109 | :class:`~pose_format.tensorflow.masked.tensor.MaskedTensor` 110 | masked tensor of zeros. 111 | """ 112 | tensor = tensorflow.zeros(size, dtype=dtype) 113 | mask = tensorflow.zeros(size, dtype=tensorflow.bool) 114 | return MaskedTensor(tensor=tensor, mask=mask) 115 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/masked/tensorflow_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | import tensorflow as tf 4 | 5 | from pose_format.tensorflow.masked.tensor import MaskedTensor 6 | from pose_format.tensorflow.masked.tensorflow import MaskedTensorflow 7 | 8 | 9 | class TestMaskedTensorflow(TestCase): 10 | """ 11 | A test class for checking the operations in the MaskedTensorflow class. 12 | """ 13 | 14 | # cat 15 | def test_cat(self): 16 | """Test the `concat` concatination method for two tensors along axis=0.""" 17 | tensor1 = MaskedTensor(tf.constant([1, 2])) 18 | tensor2 = MaskedTensor(tf.constant([3, 4])) 19 | stack = MaskedTensorflow.concat([tensor1, tensor2], axis=0) 20 | res = MaskedTensor(tf.constant([[1, 2, 3, 4]])) 21 | self.assertTrue(tf.reduce_all(stack == res), msg="Cat is not equal to expected") 22 | 23 | # stack 24 | def test_stack(self): 25 | """ 26 | Test the `stack` method for two tensors along axis=0. 27 | 28 | """ 29 | tensor1 = MaskedTensor(tf.constant([1, 2])) 30 | tensor2 = MaskedTensor(tf.constant([3, 4])) 31 | stack = MaskedTensorflow.stack([tensor1, tensor2], axis=0) 32 | res = MaskedTensor(tf.constant([[1, 2], [3, 4]])) 33 | self.assertTrue(tf.reduce_all(stack == res), msg="Stack is not equal to expected") 34 | 35 | # zeros 36 | def test_zeros_tensor_shape(self): 37 | """ 38 | Test the shape of the tensor obtained from `zeros` method. 39 | 40 | """ 41 | zeros = MaskedTensorflow.zeros((1, 2, 3)) 42 | self.assertEqual(zeros.shape, (1, 2, 3)) 43 | 44 | def test_zeros_tensor_value(self): 45 | """ 46 | Test the values of the tensor obtained from `zeros` method. 47 | 48 | """ 49 | zeros = MaskedTensorflow.zeros((1, 2, 3)) 50 | self.assertTrue(tf.reduce_all(zeros == 0), msg="Zeros are not all zeros") 51 | 52 | def test_zeros_tensor_type_float(self): 53 | """ 54 | Test dtype of tensor obtained from `zeros` method with dtype=tf.float32. 55 | """ 56 | dtype = tf.float32 57 | zeros = MaskedTensorflow.zeros((1, 2, 3), dtype=dtype) 58 | self.assertEqual(zeros.tensor.dtype, dtype) 59 | 60 | def test_zeros_tensor_type_bool(self): 61 | """Test the dtype of the tensor obtained from `zeros` method with dtype=tf.bool.""" 62 | dtype = tf.bool 63 | zeros = MaskedTensorflow.zeros((1, 2, 3), dtype=dtype) 64 | self.assertEqual(zeros.tensor.dtype, dtype) 65 | 66 | def test_zeros_mask_value(self): 67 | """ 68 | Test the mask values of the tensor obtained from `zeros` method. 69 | """ 70 | zeros = MaskedTensorflow.zeros((1, 2, 3)) 71 | self.assertTrue(tf.reduce_all(zeros.mask == tf.zeros((1, 2, 3), dtype=tf.dtypes.bool)), 72 | msg="Zeros mask are not all zeros") 73 | 74 | # Fallback 75 | def test_not_implemented_method(self): 76 | """ 77 | Test a method that is not explicitly defined for MaskedTensor but is implemented via fallback. 78 | """ 79 | tensor = MaskedTensor(tensor=tf.constant([1, 2, 3])) 80 | tensor_square = MaskedTensorflow.square(tensor) 81 | self.assertTrue(tf.reduce_all(tensor_square == tf.constant([1, 4, 9])), msg="Square is not equal to expected") 82 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/pose_body_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | import tensorflow as tf 4 | 5 | from pose_format.tensorflow.masked.tensor import MaskedTensor 6 | from pose_format.tensorflow.pose_body import TensorflowPoseBody 7 | 8 | 9 | class TestTensorflowPoseBody(TestCase): 10 | """TestsCases for the `TensorflowPoseBody` class.""" 11 | 12 | def test_tf_pose_body_zero_filled_fills_in_zeros(self): 13 | """ 14 | Test the `zero_filled` method of `TensorflowPoseBody` class. 15 | 16 | Test constructs a `TensorflowPoseBody` instance with specified fps, data (as a MaskedTensor), 17 | and confidence. It then calls `zero_filled` method and checks if data is filled with zeros. 18 | 19 | Raises 20 | ------ 21 | AssertionError 22 | If sum of zero-filled body data is not equal to the expected sum. 23 | """ 24 | tensor = tf.ones(7) 25 | confidence = tf.ones(7) 26 | mask = tf.constant([1, 1, 1, 0, 0, 1, 1]) 27 | 28 | masked_tensor = MaskedTensor(tensor=tensor, mask=mask) 29 | body = TensorflowPoseBody(fps=10, data=masked_tensor, confidence=confidence) 30 | 31 | zero_filled_body = body.zero_filled() 32 | 33 | expected_sum = 5 34 | 35 | actual_sum = sum(zero_filled_body.data.numpy()) 36 | 37 | self.assertEqual(actual_sum, expected_sum) 38 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/pose_representation.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import tensorflow as tf 4 | 5 | from ..pose_representation import PoseRepresentation 6 | 7 | 8 | class TensorflowPoseRepresentation(PoseRepresentation): 9 | """ 10 | Class for pose representations using TensorFlow tensors. 11 | 12 | * Inherites from ``PoseRepresentation`` 13 | 14 | This class extends PoseRepresentation and provides methods for manipulating pose representations 15 | using TensorFlow tensors. 16 | """ 17 | 18 | def group_embeds(self, embeds: List[tf.Tensor]): 19 | """ 20 | Group embeddings (list of tensors) along the first dimension. 21 | 22 | Parameters 23 | ---------- 24 | embeds : List[tf.Tensor] 25 | List of tensors, each with shape (embed_size, Batch, Len). 26 | 27 | Returns 28 | ------- 29 | tf.Tensor 30 | Tensor with shape (Batch, Len, embed_size). 31 | 32 | """ 33 | group = tf.concat(embeds, axis=0) # (embed_size, Batch, Len) 34 | return tf.transpose(group, perm=[1, 2, 0]) 35 | 36 | def get_points(self, tensor: tf.Tensor, points: List): 37 | """ 38 | Get specific points from a tensor. 39 | 40 | Parameters 41 | ---------- 42 | tensor : tf.Tensor 43 | Tensor. 44 | points : List[int] 45 | Indices/points needed from Tensor 46 | 47 | Returns 48 | ------- 49 | tf.Tensor 50 | Get values from the tensor using the given indices/points 51 | 52 | """ 53 | return tf.gather(tensor, points) 54 | 55 | def permute(self, src, shape: tuple): 56 | """ 57 | Permute dimensions of a tensor according to a given shape (tuple). 58 | 59 | Parameters 60 | ---------- 61 | src : tf.Tensor 62 | tensor to permute 63 | shape : tuple 64 | Desired shape to permute to. 65 | 66 | Returns 67 | ------- 68 | tf.Tensor 69 | The permuted tensor. 70 | 71 | """ 72 | return tf.transpose(src, perm=shape) 73 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/pose_representation_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | import tensorflow as tf 4 | 5 | from pose_format.pose_header import PoseHeader, PoseHeaderDimensions 6 | from pose_format.tensorflow.pose_representation import \ 7 | TensorflowPoseRepresentation 8 | from pose_format.tensorflow.representation.angle import AngleRepresentation 9 | from pose_format.tensorflow.representation.distance import \ 10 | DistanceRepresentation 11 | from pose_format.tensorflow.representation.inner_angle import \ 12 | InnerAngleRepresentation 13 | from pose_format.tensorflow.representation.point_line_distance import \ 14 | PointLineDistanceRepresentation 15 | from pose_format.utils.openpose import OpenPose_Hand_Component 16 | 17 | dimensions = PoseHeaderDimensions(width=0, height=0, depth=0) 18 | components = [OpenPose_Hand_Component("hand_left_keypoints_2d")] 19 | header: PoseHeader = PoseHeader(version=0.2, dimensions=dimensions, components=components) 20 | 21 | representation = TensorflowPoseRepresentation( 22 | header=header, 23 | rep_modules1=[], 24 | rep_modules2=[AngleRepresentation(), DistanceRepresentation()], 25 | rep_modules3=[InnerAngleRepresentation(), PointLineDistanceRepresentation()]) 26 | 27 | 28 | class TestTensorflowPoseRepresentation(TestCase): 29 | """ 30 | Testcases for TensorflowPoseRepresentation class. 31 | 32 | Validates the functionalities associated with the Tensorflow representation 33 | of pose data, such as ensuring that input sizes, output calculations, and representations are accurate. 34 | """ 35 | 36 | def test_input_size(self): 37 | """Checks that the input size is correct""" 38 | input_size = representation.input_size 39 | self.assertEqual(input_size, 21) 40 | 41 | def test_calc_output_size(self): 42 | """Checks that the output size is correct""" 43 | output_size = representation.calc_output_size() 44 | self.assertEqual(output_size, 70) 45 | 46 | def test_call_return_shape(self): 47 | """Checks that the shape of the returned representation is correct""" 48 | input_size = representation.input_size 49 | output_size = representation.calc_output_size() 50 | 51 | points = tf.random.normal(shape=(1, 2, input_size, 3), dtype=tf.float32) 52 | rep = representation(points) 53 | self.assertEqual(rep.shape, (1, 2, output_size)) 54 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/representation/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | py_library( 4 | name = "angle", 5 | srcs = ["angle.py"], 6 | srcs_version = "PY3", 7 | visibility = ["//visibility:public"], 8 | deps = [ 9 | "//third_party/py/tensorflow", 10 | ], 11 | ) 12 | 13 | py_test( 14 | name = "angle_test", 15 | srcs = ["angle_test.py"], 16 | python_version = "PY3", 17 | srcs_version = "PY3", 18 | visibility = ["//visibility:public"], 19 | deps = [ 20 | ":angle", 21 | "//testing/pybase", 22 | "//third_party/py/tensorflow", 23 | ], 24 | ) 25 | 26 | py_library( 27 | name = "distance", 28 | srcs = ["distance.py"], 29 | srcs_version = "PY3", 30 | visibility = ["//visibility:public"], 31 | deps = [ 32 | "//third_party/py/tensorflow", 33 | ], 34 | ) 35 | 36 | py_test( 37 | name = "distance_test", 38 | srcs = ["distance_test.py"], 39 | python_version = "PY3", 40 | srcs_version = "PY3", 41 | visibility = ["//visibility:public"], 42 | deps = [ 43 | ":distance", 44 | "//testing/pybase", 45 | "//third_party/py/tensorflow", 46 | ], 47 | ) 48 | 49 | py_library( 50 | name = "inner_angle", 51 | srcs = ["inner_angle.py"], 52 | srcs_version = "PY3", 53 | visibility = ["//visibility:public"], 54 | deps = [ 55 | "//third_party/py/tensorflow", 56 | ], 57 | ) 58 | 59 | py_test( 60 | name = "inner_angle_test", 61 | srcs = ["inner_angle_test.py"], 62 | python_version = "PY3", 63 | srcs_version = "PY3", 64 | visibility = ["//visibility:public"], 65 | deps = [ 66 | ":inner_angle", 67 | "//testing/pybase", 68 | "//third_party/py/tensorflow", 69 | ], 70 | ) 71 | 72 | py_library( 73 | name = "point_line_distance", 74 | srcs = ["point_line_distance.py"], 75 | srcs_version = "PY3", 76 | visibility = ["//visibility:public"], 77 | deps = [ 78 | ":distance", 79 | "//third_party/py/tensorflow", 80 | ], 81 | ) 82 | 83 | py_test( 84 | name = "point_line_distance_test", 85 | srcs = ["point_line_distance_test.py"], 86 | python_version = "PY3", 87 | srcs_version = "PY3", 88 | visibility = ["//visibility:public"], 89 | deps = [ 90 | ":point_line_distance", 91 | "//testing/pybase", 92 | "//third_party/py/tensorflow", 93 | ], 94 | ) 95 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/representation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/pose_format/tensorflow/representation/__init__.py -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/representation/angle.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | class AngleRepresentation: 5 | """ 6 | A class to represent the angle between the X/Y axis and line formed by two points. 7 | """ 8 | 9 | def __call__(self, p1s: tf.Tensor, p2s: tf.Tensor) -> tf.Tensor: 10 | """ 11 | Computes the angle of the X/Y axis between two points. 12 | 13 | Parameters 14 | ---------- 15 | p1s : tf.Tensor 16 | First set of points with shape (Points, Batch, Len, Dims). 17 | p2s : tf.Tensor 18 | Second set of points with shape (Points, Batch, Len, Dims). 19 | 20 | Returns 21 | ------- 22 | tf.Tensor 23 | A tensor representing the angle of the X/Y axis between two points 24 | with shape (Points, Batch, Len). 25 | 26 | Note 27 | ---- 28 | The function computes the difference between the two point sets, 29 | splits the difference into X and Y components, and then calculates 30 | the slope and the angle using the arctan function. 31 | If the x difference is zero, the function returns a result that 32 | avoids NaN by using `tf.math.divide_no_nan`. 33 | """ 34 | dims = p1s.shape[-1] 35 | 36 | d = p2s - p1s # (Points, Batch, Len, Dims) 37 | xs, ys = tf.split(d, [1] * dims, axis=3)[:2] # (Points, Batch, Len, 1) 38 | slopes = tf.math.divide_no_nan(ys, xs) # Divide, no NaN 39 | # TODO add .zero_filled() 40 | slopes = tf.squeeze(slopes, axis=3) 41 | 42 | return tf.math.atan(slopes) 43 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/representation/angle_test.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os 3 | from unittest import TestCase 4 | 5 | os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' 6 | 7 | import tensorflow as tf 8 | 9 | from pose_format.tensorflow.representation.angle import AngleRepresentation 10 | 11 | representation = AngleRepresentation() 12 | 13 | 14 | class TestAngleRepresentation(TestCase): 15 | """Test case for AngleRepresentation class""" 16 | 17 | def test_call_value_should_be_angle(self): 18 | """Test if the calculated angles are correct.""" 19 | p1s = tf.constant([[[[1, 2, 3]]]], dtype=tf.float32) 20 | p2s = tf.constant([[[[4, 5, 6]]]], dtype=tf.float32) 21 | angles = representation(p1s, p2s) 22 | self.assertAlmostEqual(float(angles[0][0][0]), 1 / 4 * math.pi) 23 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/representation/distance.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | class DistanceRepresentation: 5 | """A class to represent the Euclidean distance between two sets of points.""" 6 | 7 | def distance(self, p1s: tf.Tensor, p2s: tf.Tensor) -> tf.Tensor: 8 | """ 9 | Computes the Euclidean distance between two sets of points. 10 | 11 | Parameters 12 | ---------- 13 | p1s : tf.Tensor 14 | First set of points with shape (Points, Batch, Len, Dims). 15 | p2s : tf.Tensor 16 | Second set of points with shape (Points, Batch, Len, Dims). 17 | 18 | Returns 19 | ------- 20 | tf.Tensor 21 | A tensor representing the Euclidean distance between the two points 22 | with shape (Points, Batch, Len). 23 | 24 | Note 25 | ---- 26 | The function computes the difference between the two sets of points, 27 | squares the differences, sums the squared differences along the last axis, 28 | and then takes the square root to calculate the Euclidean distance. 29 | """ 30 | diff = p1s - p2s # (Points, Batch, Len, Dims) 31 | square = tf.square(diff) 32 | sum_squares = tf.reduce_sum(square, axis=-1) 33 | # TODO add .zero_filled() 34 | 35 | return tf.sqrt(sum_squares) 36 | 37 | def __call__(self, p1s: tf.Tensor, p2s: tf.Tensor) -> tf.Tensor: 38 | """ 39 | Computes the Euclidean distance between two sets of points. 40 | 41 | Parameters 42 | ---------- 43 | p1s : tf.Tensor 44 | First set of points with shape (Points, Batch, Len, Dims). 45 | p2s : tf.Tensor 46 | Second set of points with shape (Points, Batch, Len, Dims). 47 | 48 | Returns 49 | ------- 50 | tf.Tensor 51 | A tensor representing the Euclidean distance between the two points 52 | with shape (Points, Batch, Len). 53 | 54 | Note 55 | ---- 56 | This method is essentially an alias for the `distance` method. 57 | """ 58 | return self.distance(p1s, p2s) 59 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/representation/distance_test.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os 3 | from unittest import TestCase 4 | 5 | os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' 6 | 7 | import tensorflow as tf 8 | 9 | from pose_format.tensorflow.representation.distance import \ 10 | DistanceRepresentation 11 | 12 | representation = DistanceRepresentation() 13 | 14 | 15 | class TestDistanceRepresentation(TestCase): 16 | """Test case for DistanceRepresentation class.""" 17 | 18 | def test_call_value_should_be_distance(self): 19 | """Test if the calculated distances are correct.""" 20 | p1s = tf.constant([[[[1, 2, 3]]]], dtype=tf.float32) 21 | p2s = tf.constant([[[[4, 5, 6]]]], dtype=tf.float32) 22 | distances = representation(p1s, p2s) 23 | self.assertAlmostEqual(float(distances[0][0][0]), math.sqrt(27), places=6) 24 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/representation/inner_angle.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | def get_vectors_norm(vectors): 5 | """ 6 | Computes the normalized version of the input vectors. 7 | 8 | Parameters 9 | ---------- 10 | vectors : tf.Tensor 11 | A tensor containing vectors. 12 | 13 | Returns 14 | ------- 15 | tf.Tensor 16 | The normalized vectors. 17 | 18 | Notes 19 | ----- 20 | This function transposes the input vectors, computes the magnitude (norm) 21 | of the vectors, and then returns the normalized version by dividing 22 | each vector by its magnitude. 23 | """ 24 | transposed = tf.transpose(vectors) 25 | v_mag = tf.sqrt(tf.math.reduce_sum(transposed * transposed, axis=0)) 26 | return tf.transpose(tf.math.divide_no_nan(transposed, v_mag)) 27 | 28 | 29 | class InnerAngleRepresentation: 30 | """A class to represent the inner angle formed at a point for a given triangle. """ 31 | 32 | def __call__(self, p1s: tf.Tensor, p2s: tf.Tensor, p3s: tf.Tensor) -> tf.Tensor: 33 | """ 34 | Computes the angle at point `p2s` for the triangle formed by `p1s`, `p2s`, and `p3s`. 35 | 36 | Parameters 37 | ---------- 38 | p1s : tf.Tensor 39 | First set of points with shape (Points, Batch, Len, Dims). 40 | p2s : tf.Tensor 41 | Second set of points, where the angle is formed, 42 | with shape (Points, Batch, Len, Dims). 43 | p3s : tf.Tensor 44 | Third set of points with shape (Points, Batch, Len, Dims). 45 | 46 | Returns 47 | ------- 48 | tf.Tensor 49 | A tensor representing the angle (in radians) at point `p2s` 50 | for the triangle with shape (Points, Batch, Len). 51 | 52 | Note 53 | ---- 54 | This method determines the vectors pointing towards `p1s` and `p3s` 55 | from the point `p2s`, normalizes these vectors, and then computes 56 | the dot product between them. The angle between these vectors is 57 | computed using the arccosine function on the dot product. 58 | 59 | Refrences: 60 | * https://stackoverflow.com/questions/19729831/angle-between-3-points-in-3d-space 61 | """ 62 | 63 | # Following https://stackoverflow.com/questions/19729831/angle-between-3-points-in-3d-space 64 | v1 = p1s - p2s # (Points, Batch, Len, Dims) 65 | v2 = p3s - p2s # (Points, Batch, Len, Dims) 66 | 67 | v1_norm = get_vectors_norm(v1) 68 | v2_norm = get_vectors_norm(v2) 69 | 70 | slopes = tf.reduce_sum(v1_norm * v2_norm, axis=3) 71 | angles = tf.acos(slopes) 72 | 73 | angles = tf.where(tf.math.is_nan(angles), 0., angles) # Fix NaN, TODO think of faster way 74 | return angles 75 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/representation/inner_angle_test.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os 3 | from unittest import TestCase 4 | 5 | os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' 6 | 7 | import tensorflow as tf 8 | 9 | from pose_format.tensorflow.representation.inner_angle import \ 10 | InnerAngleRepresentation 11 | 12 | representation = InnerAngleRepresentation() 13 | 14 | 15 | class TestInnerAngleRepresentation(TestCase): 16 | """Test case for InnerAngleRepresentation class""" 17 | 18 | def test_call_value_should_be_inner_angle(self): 19 | """Test if the calculated inner angles are correct.""" 20 | p1s = tf.constant([[[[2, 3, 4]]]], dtype=tf.float32) 21 | p2s = tf.constant([[[[1, 1, 1]]]], dtype=tf.float32) 22 | p3s = tf.constant([[[[3, 4, 2]]]], dtype=tf.float32) 23 | angles = representation(p1s, p2s, p3s) 24 | self.assertAlmostEqual(float(angles[0][0][0]), math.acos(11 / 14)) 25 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/representation/point_line_distance.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | from .distance import DistanceRepresentation 4 | 5 | 6 | class PointLineDistanceRepresentation: 7 | """ 8 | A class to compute the distance between a point and a line segment. 9 | 10 | Parameters 11 | ---------- 12 | distance : :class:`~pose_format.tensorflow.representation.distance.DistanceRepresentation` 13 | Instance of the `DistanceRepresentation` class to compute the Euclidean distance. 14 | 15 | """ 16 | 17 | def __init__(self): 18 | """ 19 | Initializes the PointLineDistanceRepresentation with an instance of DistanceRepresentation. 20 | """ 21 | self.distance = DistanceRepresentation() 22 | 23 | def __call__(self, p1s: tf.Tensor, p2s: tf.Tensor, p3s: tf.Tensor) -> tf.Tensor: 24 | """ 25 | Computes the distance between the point `p1s` and the line segment formed by `p2s` and `p3s`. 26 | 27 | Parameters 28 | ---------- 29 | p1s : tf.Tensor 30 | The point for which we want to calculate the distance from the line segment 31 | with shape (Points, Batch, Len, Dims). 32 | p2s : tf.Tensor 33 | One of the endpoints of the line segment with shape (Points, Batch, Len, Dims). 34 | p3s : tf.Tensor 35 | The other endpoint of the line segment with shape (Points, Batch, Len, Dims). 36 | 37 | Returns 38 | ------- 39 | tf.Tensor 40 | A tensor representing the distance of point `p1s` from the line segment with shape (Points, Batch, Len). 41 | 42 | Note 43 | ---- 44 | This method computes the distance using Heron's formula to first compute the area of the 45 | triangle formed by the three points, and then determines the "height" of this triangle 46 | with respect to the base formed by the line segment. 47 | 48 | * References: 49 | Following Heron's Formula https://en.wikipedia.org/wiki/Heron%27s_formula 50 | """ 51 | 52 | # Following Heron's Formula https://en.wikipedia.org/wiki/Heron%27s_formula 53 | a = self.distance.distance(p1s, p2s) 54 | b = self.distance.distance(p2s, p3s) 55 | c = self.distance.distance(p1s, p3s) 56 | s: tf.Tensor = (a + b + c) / 2 57 | squared = s * (s - a) * (s - b) * (s - c) 58 | area = tf.sqrt(squared) 59 | 60 | # Calc "height" of the triangle 61 | square_area: tf.Tensor = area * 2 62 | distance = tf.math.divide_no_nan(square_area, b) 63 | # TODO add .zero_filled() 64 | 65 | return distance 66 | -------------------------------------------------------------------------------- /src/python/pose_format/tensorflow/representation/point_line_distance_test.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os 3 | from unittest import TestCase 4 | 5 | os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' 6 | 7 | import tensorflow as tf 8 | 9 | from pose_format.tensorflow.representation.point_line_distance import \ 10 | PointLineDistanceRepresentation 11 | 12 | representation = PointLineDistanceRepresentation() 13 | 14 | 15 | class TestPointLineDistanceRepresentation(TestCase): 16 | """ 17 | Test case for PointLineDistanceRepresentation class 18 | """ 19 | 20 | def test_call_value_should_be_distance(self): 21 | """Test if the calculated distances are correct""" 22 | p1s = tf.constant([[[[2, 3, 4]]]], dtype=tf.float32) 23 | p2s = tf.constant([[[[1, 1, 1]]]], dtype=tf.float32) 24 | p3s = tf.constant([[[[3, 4, 2]]]], dtype=tf.float32) 25 | distances = representation(p1s, p2s, p3s) 26 | self.assertAlmostEqual(float(distances[0][0][0]), math.sqrt(75 / 14), places=6) 27 | -------------------------------------------------------------------------------- /src/python/pose_format/testing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/pose_format/testing/__init__.py -------------------------------------------------------------------------------- /src/python/pose_format/testing/pybase/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["restricted"]) 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | py_library( 6 | name = "pybase", 7 | srcs = ["pybase_dummy.py"], 8 | srcs_version = "PY2AND3", 9 | ) 10 | 11 | genrule( 12 | name = "dummy", 13 | outs = ["pybase_dummy.py"], 14 | cmd = "touch $@", 15 | visibility = ["//visibility:private"], 16 | ) -------------------------------------------------------------------------------- /src/python/pose_format/third_party/OpenCVX/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["restricted"]) 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | py_library( 6 | name = "core", 7 | srcs = ["cv2_dummy.py"], 8 | srcs_version = "PY2AND3", 9 | ) 10 | 11 | py_library( 12 | name = "video", 13 | srcs = ["cv2_dummy.py"], 14 | srcs_version = "PY2AND3", 15 | ) 16 | 17 | genrule( 18 | name = "dummy", 19 | outs = ["cv2_dummy.py"], 20 | cmd = "touch $@", 21 | visibility = ["//visibility:private"], 22 | ) -------------------------------------------------------------------------------- /src/python/pose_format/third_party/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/pose_format/third_party/__init__.py -------------------------------------------------------------------------------- /src/python/pose_format/third_party/py/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/pose_format/third_party/py/__init__.py -------------------------------------------------------------------------------- /src/python/pose_format/third_party/py/dataclasses/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["restricted"]) 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | py_library( 6 | name = "dataclasses", 7 | srcs = ["dataclasses_dummy.py"], 8 | srcs_version = "PY2AND3", 9 | ) 10 | 11 | genrule( 12 | name = "dummy", 13 | outs = ["dataclasses_dummy.py"], 14 | cmd = "touch $@", 15 | visibility = ["//visibility:private"], 16 | ) -------------------------------------------------------------------------------- /src/python/pose_format/third_party/py/numpy/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["restricted"]) 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | py_library( 6 | name = "numpy", 7 | srcs = ["numpy_dummy.py"], 8 | srcs_version = "PY2AND3", 9 | ) 10 | 11 | genrule( 12 | name = "dummy", 13 | outs = ["numpy_dummy.py"], 14 | cmd = "touch $@", 15 | visibility = ["//visibility:private"], 16 | ) -------------------------------------------------------------------------------- /src/python/pose_format/third_party/py/scipy/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["restricted"]) 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | py_library( 6 | name = "scipy", 7 | srcs = ["scipy_dummy.py"], 8 | srcs_version = "PY2AND3", 9 | ) 10 | 11 | genrule( 12 | name = "dummy", 13 | outs = ["scipy_dummy.py"], 14 | cmd = "touch $@", 15 | visibility = ["//visibility:private"], 16 | ) -------------------------------------------------------------------------------- /src/python/pose_format/third_party/py/tensorflow/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["restricted"]) 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | py_library( 6 | name = "tensorflow", 7 | srcs = ["tensorflow_dummy.py"], 8 | srcs_version = "PY2AND3", 9 | ) 10 | 11 | genrule( 12 | name = "dummy", 13 | outs = ["tensorflow_dummy.py"], 14 | cmd = "touch $@", 15 | visibility = ["//visibility:private"], 16 | ) -------------------------------------------------------------------------------- /src/python/pose_format/third_party/py/torch/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["restricted"]) 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | py_library( 6 | name = "pytorch", 7 | srcs = ["torch_dummy.py"], 8 | srcs_version = "PY2AND3", 9 | ) 10 | 11 | genrule( 12 | name = "dummy", 13 | outs = ["torch_dummy.py"], 14 | cmd = "touch $@", 15 | visibility = ["//visibility:private"], 16 | ) -------------------------------------------------------------------------------- /src/python/pose_format/third_party/py/tqdm/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["restricted"]) 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | py_library( 6 | name = "tqdm", 7 | srcs = ["tqdm_dummy.py"], 8 | srcs_version = "PY2AND3", 9 | ) 10 | 11 | genrule( 12 | name = "dummy", 13 | outs = ["tqdm_dummy.py"], 14 | cmd = "touch $@", 15 | visibility = ["//visibility:private"], 16 | ) -------------------------------------------------------------------------------- /src/python/pose_format/torch/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | py_library( 4 | name = "pose_body", 5 | srcs = ["pose_body.py"], 6 | srcs_version = "PY3", 7 | visibility = ["//visibility:public"], 8 | deps = [ 9 | "//third_party/py/numpy", 10 | "//:pose_body", 11 | "//:pose_header", 12 | "//torch/masked:tensor", 13 | "//utils:reader", 14 | "//third_party/py/torch:pytorch", 15 | ], 16 | ) 17 | 18 | py_library( 19 | name = "pose_representation", 20 | srcs = ["pose_representation.py"], 21 | srcs_version = "PY3", 22 | visibility = ["//visibility:public"], 23 | deps = [ 24 | "//:pose_header", 25 | "//:pose_representation", 26 | "//third_party/py/torch:pytorch", 27 | ], 28 | ) 29 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/pose_format/torch/__init__.py -------------------------------------------------------------------------------- /src/python/pose_format/torch/masked/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | py_library( 4 | name = "tensor", 5 | srcs = ["tensor.py"], 6 | srcs_version = "PY3", 7 | visibility = ["//visibility:public"], 8 | deps = [ 9 | "//third_party/py/torch:pytorch", 10 | ], 11 | ) 12 | 13 | py_library( 14 | name = "torch", 15 | srcs = ["torch.py"], 16 | srcs_version = "PY3", 17 | visibility = ["//visibility:public"], 18 | deps = [ 19 | ":tensor", 20 | "//third_party/py/torch:pytorch", 21 | ], 22 | ) 23 | 24 | py_test( 25 | name = "torch_test", 26 | srcs = ["torch_test.py"], 27 | python_version = "PY3", 28 | srcs_version = "PY3", 29 | visibility = ["//visibility:public"], 30 | deps = [ 31 | ":tensor", 32 | ":torch", 33 | "//testing/pybase", 34 | ], 35 | ) 36 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/masked/__init__.py: -------------------------------------------------------------------------------- 1 | from pose_format.torch.masked.tensor import MaskedTensor 2 | from pose_format.torch.masked.torch import MaskedTorch 3 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/masked/collator.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Tuple, Union 2 | 3 | import numpy as np 4 | import torch 5 | from pose_format.torch.masked import MaskedTensor, MaskedTorch 6 | 7 | 8 | def pad_tensors(batch: List[Union[torch.Tensor, MaskedTensor]], pad_value=0): 9 | datum = batch[0] 10 | torch_cls = MaskedTorch if isinstance(datum, MaskedTensor) else torch 11 | 12 | max_len = max(len(t) for t in batch) 13 | if max_len == 1: 14 | return torch_cls.stack(batch, dim=0) 15 | 16 | new_batch = [] 17 | for tensor in batch: 18 | missing = list(tensor.shape) 19 | missing[0] = max_len - tensor.shape[0] 20 | 21 | if missing[0] > 0: 22 | padding_tensor = torch.full(missing, fill_value=pad_value, dtype=tensor.dtype, device=tensor.device) 23 | if isinstance(tensor, MaskedTensor): 24 | padding_tensor = MaskedTensor(tensor=padding_tensor, mask=torch.zeros_like(padding_tensor, dtype=torch.bool)) 25 | tensor = torch_cls.cat([tensor, padding_tensor], dim=0) 26 | 27 | new_batch.append(tensor) 28 | 29 | return torch_cls.stack(new_batch, dim=0) 30 | 31 | 32 | def collate_tensors(batch: List, pad_value=0) -> Union[torch.Tensor, List]: 33 | datum = batch[0] 34 | 35 | if isinstance(datum, dict): # Recurse over dictionaries 36 | return zero_pad_collator(batch) 37 | 38 | if isinstance(datum, (int, np.int32)): 39 | return torch.tensor(batch, dtype=torch.long) 40 | 41 | if isinstance(datum, (MaskedTensor, torch.Tensor)): 42 | return pad_tensors(batch, pad_value=pad_value) 43 | 44 | return batch 45 | 46 | 47 | def zero_pad_collator(batch) -> Union[Dict[str, torch.Tensor], Tuple[torch.Tensor, ...]]: 48 | datum = batch[0] 49 | 50 | # For strings 51 | if isinstance(datum, str): 52 | return batch 53 | 54 | # For tuples 55 | if isinstance(datum, tuple): 56 | return tuple(collate_tensors([b[i] for b in batch]) for i in range(len(datum))) 57 | 58 | # For tensors 59 | if isinstance(datum, MaskedTensor): 60 | return collate_tensors(batch) 61 | 62 | # For dictionaries 63 | keys = datum.keys() 64 | return {k: collate_tensors([b[k] for b in batch]) for k in keys} 65 | 66 | 67 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/masked/torch.py: -------------------------------------------------------------------------------- 1 | from typing import List, Union 2 | 3 | import torch 4 | 5 | from pose_format.torch.masked.tensor import MaskedTensor 6 | 7 | 8 | class TorchFallback(type): 9 | """Meta class that gives a fallback mechanism to use torch functions on :class:`~pose_format.torch.masked.tensor.MaskedTensor` objects. :noindex:""" 10 | doesnt_change_mask = {"sqrt", "square", "unsqueeze", "cos", "sin", "tan", "acos", "asin", "atan"} 11 | 12 | def __getattr__(cls, attr): 13 | """ 14 | Redirects calls to PyTorch functions to handle :class:`~pose_format.torch.masked.tensor.MaskedTensor` instances. 15 | 16 | If the first argument is a :class:`~pose_format.torch.masked.tensor.MaskedTensor`, its mask is taken into account. 17 | """ 18 | 19 | def func(*args, **kwargs): 20 | if len(args) > 0 and isinstance(args[0], MaskedTensor): 21 | args = list(args) 22 | mask = args[0].mask 23 | args[0] = args[0].tensor 24 | 25 | res = getattr(torch, attr)(*args, **kwargs) 26 | if attr in TorchFallback.doesnt_change_mask: 27 | return MaskedTensor(res, mask) 28 | else: 29 | return res 30 | 31 | else: # If this action is done on an unmasked tensor 32 | return getattr(torch, attr)(*args, **kwargs) 33 | 34 | return func 35 | 36 | 37 | class MaskedTorch(metaclass=TorchFallback): 38 | """class mimicing torch functions and giving support for :class:`~pose_format.torch.masked.tensor.MaskedTensor`.""" 39 | 40 | @staticmethod 41 | def cat(tensors: List[Union[MaskedTensor, torch.Tensor]], dim: int) -> MaskedTensor: 42 | """ 43 | Concatenate :class:`~pose_format.torch.masked.tensor.MaskedTensor` objects along a specified dimension. 44 | 45 | Parameters 46 | ---------- 47 | tensors : list 48 | List of tensors or :class:`~pose_format.torch.masked.tensor.MaskedTensor` objects to be concatenated. 49 | dim : int 50 | Dimension along to concatenate. 51 | 52 | Returns 53 | ------- 54 | :class:`~pose_format.torch.masked.tensor.MaskedTensor` 55 | Concatenated tensor. 56 | """ 57 | tensors: List[MaskedTensor] = [t if isinstance(t, MaskedTensor) else MaskedTensor(tensor=t) for t in tensors] 58 | tensor = torch.cat([t.tensor for t in tensors], dim=dim) 59 | mask = torch.cat([t.mask for t in tensors], dim=dim) 60 | return MaskedTensor(tensor=tensor, mask=mask) 61 | 62 | @staticmethod 63 | def stack(tensors: List[MaskedTensor], dim: int) -> MaskedTensor: 64 | """ 65 | Stack :class:`~pose_format.torch.masked.tensor.MaskedTensor` objects along a new dimension. 66 | 67 | Parameters 68 | ---------- 69 | tensors : list 70 | List of :class:`~pose_format.torch.masked.tensor.MaskedTensor` objects to be stacked. 71 | dim : int 72 | New dimension along which to stack. 73 | 74 | Returns 75 | ------- 76 | :class:`~pose_format.torch.masked.tensor.MaskedTensor` 77 | Stacked maked tensor. 78 | """ 79 | tensor = torch.stack([t.tensor for t in tensors], dim=dim) 80 | mask = torch.stack([t.mask for t in tensors], dim=dim) 81 | return MaskedTensor(tensor=tensor, mask=mask) 82 | 83 | @staticmethod 84 | def zeros(*size, dtype=None) -> MaskedTensor: 85 | """ 86 | Creates a :class:`~pose_format.torch.masked.tensor.MaskedTensor` of zeros with a given shape and data type. 87 | 88 | Parameters 89 | ---------- 90 | *size : ints 91 | Dimensions of desired tensor. 92 | dtype : torch.dtype, optional 93 | Data type of the tensor. If None, defaults to `torch.float`. 94 | 95 | Returns 96 | ------- 97 | :class:`~pose_format.torch.masked.tensor.MaskedTensor` 98 | masked tensor filled with zeros. 99 | """ 100 | tensor = torch.zeros(*size, dtype=dtype) 101 | mask = torch.zeros(*size, dtype=torch.bool) 102 | return MaskedTensor(tensor=tensor, mask=mask) 103 | 104 | @staticmethod 105 | def squeeze(masked_tensor: MaskedTensor) -> MaskedTensor: 106 | """ 107 | Remove dimensions of size 1 from :class:`~pose_format.torch.masked.tensor.MaskedTensor`. 108 | 109 | Parameters 110 | ---------- 111 | masked_tensor : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 112 | tensor from which dimensions are to be removed. 113 | 114 | Returns 115 | ------- 116 | :class:`~pose_format.torch.masked.tensor.MaskedTensor` 117 | Squeezed masked tensor. 118 | """ 119 | tensor = torch.squeeze(masked_tensor.tensor) 120 | mask = torch.squeeze(masked_tensor.mask) 121 | return MaskedTensor(tensor=tensor, mask=mask) 122 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/masked/torch_test.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | from unittest import TestCase 3 | 4 | import torch 5 | 6 | from pose_format.torch.masked.tensor import MaskedTensor 7 | from pose_format.torch.masked.torch import MaskedTorch 8 | 9 | 10 | class TestMaskedTorch(TestCase): 11 | """Test cases for the :class:`~pose_format.torch.masked.tensor.MaskedTensor` class """ 12 | 13 | # cat 14 | def test_cat(self): 15 | """Test `cat` method for concatenating :class:`~pose_format.torch.masked.tensor.MaskedTensor` objects along a specified dimension.""" 16 | tensor1 = MaskedTensor(torch.tensor([1, 2])) 17 | tensor2 = MaskedTensor(torch.tensor([3, 4])) 18 | stack = MaskedTorch.cat([tensor1, tensor2], dim=0) 19 | res = MaskedTensor(torch.tensor([[1, 2, 3, 4]])) 20 | self.assertTrue(torch.all(stack == res), msg="Cat is not equal to expected") 21 | 22 | # stack 23 | def test_stack(self): 24 | """Tests `stack` method for stacking :class:`~pose_format.torch.masked.tensor.MaskedTensor` objects along a new dimension.""" 25 | tensor1 = MaskedTensor(torch.tensor([1, 2])) 26 | tensor2 = MaskedTensor(torch.tensor([3, 4])) 27 | stack = MaskedTorch.stack([tensor1, tensor2], dim=0) 28 | res = MaskedTensor(torch.tensor([[1, 2], [3, 4]])) 29 | self.assertTrue(torch.all(stack == res), msg="Stack is not equal to expected") 30 | 31 | # zeros 32 | def test_zeros_tensor_shape(self): 33 | """Test if `zeros` method correctly produces a :class:`~pose_format.torch.masked.tensor.MaskedTensor` with the desired shape.""" 34 | zeros = MaskedTorch.zeros(1, 2, 3) 35 | self.assertEqual(zeros.shape, (1, 2, 3)) 36 | 37 | def test_zeros_tensor_value(self): 38 | """Test if the `zeros` method produces a :class:`~pose_format.torch.masked.tensor.MaskedTensor` with all zero values.""" 39 | zeros = MaskedTorch.zeros(1, 2, 3) 40 | self.assertTrue(torch.all(zeros == 0), msg="Zeros are not all zeros") 41 | 42 | def test_zeros_tensor_type_float(self): 43 | """Test if the `zeros` method produces a :class:`~pose_format.torch.masked.tensor.MaskedTensor` with the correct float data type.""" 44 | dtype = torch.float 45 | zeros = MaskedTorch.zeros(1, 2, 3, dtype=dtype) 46 | self.assertEqual(zeros.tensor.dtype, dtype) 47 | 48 | def test_zeros_tensor_type_bool(self): 49 | """Test if the `zeros` method produces a :class:`~pose_format.torch.masked.tensor.MaskedTensor` with the correct boolean data type.""" 50 | dtype = torch.bool 51 | zeros = MaskedTorch.zeros(1, 2, 3, dtype=dtype) 52 | self.assertEqual(zeros.tensor.dtype, dtype) 53 | 54 | def test_zeros_mask_value(self): 55 | """Test if the mask in the produced `zeros` :class:`~pose_format.torch.masked.tensor.MaskedTensor` is initialized with zero values.""" 56 | zeros = MaskedTorch.zeros(1, 2, 3) 57 | self.assertTrue(torch.all(zeros.mask == 0), msg="Zeros mask are not all zeros") 58 | 59 | # Fallback 60 | def test_not_implemented_method(self): 61 | """Tests behavior when invoking an unimplemented method on a :class:`~pose_format.torch.masked.tensor.MaskedTensor`.""" 62 | tensor = MaskedTensor(tensor=torch.tensor([1, 2, 3])) 63 | torch_sum = MaskedTorch.sum(tensor) 64 | self.assertEqual(torch_sum, torch.tensor(6)) 65 | 66 | def test_pickle(self): 67 | tensor = MaskedTensor(torch.tensor([1, 2, 3])) 68 | pickled_tensor = pickle.dumps(tensor) 69 | loaded_tensor = pickle.loads(pickled_tensor) 70 | self.assertTrue(torch.all(tensor.tensor == loaded_tensor.tensor), 71 | msg="Pickled tensor is not equal to original tensor") 72 | self.assertTrue(torch.all(tensor.mask == loaded_tensor.mask), 73 | msg="Pickled tensor mask is not equal to original tensor") 74 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/pose_body.py: -------------------------------------------------------------------------------- 1 | from typing import List, Union 2 | 3 | import numpy as np 4 | import torch 5 | 6 | from ..pose_body import POINTS_DIMS, PoseBody 7 | from .masked.tensor import MaskedTensor 8 | 9 | 10 | class TorchPoseBody(PoseBody): 11 | """ 12 | TorchPoseBody class of pose information with PyTorch tensors. 13 | 14 | This class extends the PoseBody class and provides methods for manipulating pose data using PyTorch tensors. 15 | """ 16 | 17 | """str: Reader format for unpacking Torch tensors.""" 18 | tensor_reader = 'unpack_torch' 19 | 20 | def __init__(self, fps: float, data: Union[MaskedTensor, torch.Tensor], confidence: torch.Tensor): 21 | if isinstance(data, torch.Tensor): # If array is not masked 22 | mask = confidence > 0 23 | stacked_mask = torch.stack([mask] * data.shape[-1], dim=3) 24 | data = MaskedTensor(data, stacked_mask) 25 | 26 | super().__init__(fps, data, confidence) 27 | 28 | def cuda(self): 29 | """Move data and confidence of tensors to GPU""" 30 | self.data = self.data.cuda() 31 | self.confidence = self.confidence.cuda() 32 | 33 | def copy(self) -> 'TorchPoseBody': 34 | data_copy = MaskedTensor(tensor=self.data.tensor.detach().clone().to(self.data.tensor.device), 35 | mask=self.data.mask.detach().clone().to(self.data.mask.device), 36 | ) 37 | confidence_copy = self.confidence.detach().clone().to(self.confidence.device) 38 | 39 | return self.__class__(fps=self.fps, 40 | data=data_copy, 41 | confidence=confidence_copy) 42 | 43 | 44 | def zero_filled(self) -> 'TorchPoseBody': 45 | """ 46 | Fill invalid values with zeros. 47 | 48 | Returns 49 | ------- 50 | TorchPoseBody 51 | TorchPoseBody instance with masked data filled with zeros. 52 | 53 | """ 54 | copy = self.copy() 55 | copy.data = copy.data.zero_filled() 56 | return copy 57 | 58 | def matmul(self, matrix: np.ndarray) -> 'TorchPoseBody': 59 | """ 60 | Matrix multiplication on pose data. 61 | 62 | Parameters 63 | ---------- 64 | matrix : np.ndarray 65 | matrix to perform multiplication with 66 | 67 | Returns 68 | ------- 69 | TorchPoseBody 70 | A new TorchPoseBody instance with results of matrix multiplication. 71 | 72 | """ 73 | data = self.data.matmul(torch.from_numpy(matrix)) 74 | return self.__class__(fps=self.fps, data=data, confidence=self.confidence) 75 | 76 | def points_perspective(self): 77 | """ 78 | Get pose data with dimensions permuted according to POINTS_DIMS. 79 | 80 | Returns 81 | ------- 82 | :class:`~pose_format.torch.masked.tensor.MaskedTensor` 83 | A :class:`~pose_format.torch.masked.tensor.MaskedTensor` instance with dimensions permuted for points perspective. 84 | 85 | """ 86 | return self.data.permute(POINTS_DIMS) 87 | 88 | def get_points(self, indexes: List[int]): 89 | """ 90 | Get specific points from pose data. 91 | 92 | Parameters 93 | ---------- 94 | indexes : List[int] 95 | List of indexes specifying the points that you need. 96 | 97 | Returns 98 | ------- 99 | TorchPoseBody 100 | New TorchPoseBody instance containing specified points and associated confidence values. 101 | 102 | """ 103 | data = self.points_perspective() 104 | new_data = data[indexes].permute(POINTS_DIMS) 105 | 106 | confidence_reshape = (2, 1, 0) 107 | confidence = self.confidence.permute(confidence_reshape) 108 | new_confidence = confidence[indexes].permute(confidence_reshape) 109 | 110 | return self.__class__(self.fps, new_data, new_confidence) 111 | 112 | def flatten(self): 113 | """ 114 | Flatten pose data along the associated confidence values. 115 | 116 | Returns 117 | ------- 118 | torch.Tensor 119 | Flattened tensor containing indexes, confidence values, and data. 120 | 121 | """ 122 | shape = self.data.shape 123 | data = self.data.tensor.reshape(-1, shape[-1]) # Not masked data 124 | confidence = self.confidence.flatten() 125 | indexes = torch.tensor(list(np.ndindex(shape[:-1])), dtype=torch.float32, device=data.device) 126 | flat = torch.cat([indexes, torch.unsqueeze(confidence, dim=1), data], dim=1) 127 | # Filter data from flat 128 | flat = flat[confidence != 0.] 129 | # Scale the first axis by fps 130 | scalar = torch.ones(len(shape) + shape[-1], device=data.device) 131 | scalar[0] = 1 / self.fps 132 | return flat * scalar 133 | 134 | 135 | 136 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/pose_representation.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import torch 4 | 5 | from ..pose_header import PoseHeader 6 | from ..pose_representation import PoseRepresentation 7 | 8 | 9 | class TorchPoseRepresentation(PoseRepresentation): 10 | """ 11 | TorchPoseRepresentation class representing pose information using PyTorch tensors. 12 | 13 | This class extends the PoseRepresentation class and provides methods for manipulating and representing pose data 14 | using PyTorch tensors. 15 | 16 | Parameters 17 | ---------- 18 | header : PoseHeader 19 | Header describing the pose data structure. 20 | rep_modules1 : List 21 | List of additional representation modules (level 1) to apply to pose data. 22 | rep_modules2 : List 23 | List of additional representation modules (level 2) to apply to pose data. 24 | rep_modules3 : List 25 | List of additional representation modules (level 3) to apply to pose data. 26 | """ 27 | 28 | def __init__(self, header: PoseHeader, rep_modules1: List = [], rep_modules2: List = [], rep_modules3: List = []): 29 | super(TorchPoseRepresentation, self).__init__(header, rep_modules1, rep_modules2, rep_modules3) 30 | 31 | # Change limb points to torch 32 | self.limb_pt1s = torch.tensor(self.limb_pt1s, dtype=torch.long) 33 | self.limb_pt2s = torch.tensor(self.limb_pt2s, dtype=torch.long) 34 | 35 | # Change triangle points to torch 36 | self.triangle_pt1s = torch.tensor(self.triangle_pt1s, dtype=torch.long) 37 | self.triangle_pt2s = torch.tensor(self.triangle_pt2s, dtype=torch.long) 38 | self.triangle_pt3s = torch.tensor(self.triangle_pt3s, dtype=torch.long) 39 | 40 | def group_embeds(self, embeds: List[torch.Tensor]): 41 | """ 42 | Group and reshape embedded tensors for batch processing. 43 | 44 | Parameters 45 | ---------- 46 | embeds : List[torch.Tensor] 47 | List of embedded tensors of size (embed_size, Batch, Len). 48 | 49 | Returns 50 | ------- 51 | torch.Tensor 52 | A tensor of size (Batch, Len, embed_size) with grouped and reshaped embedded tensors. 53 | 54 | """ 55 | group = torch.cat(embeds, dim=0) # (embed_size, Batch, Len) 56 | return group.permute(dims=[1, 2, 0]) 57 | 58 | def permute(self, src, shape: tuple): 59 | """ 60 | Permute dimensions of tensor according to a specified shape (tuple). 61 | 62 | Parameters 63 | ---------- 64 | src : torch.Tensor 65 | tensor to permute 66 | shape : tuple 67 | desired shape of the tensor after permutation. 68 | 69 | Returns 70 | ------- 71 | torch.Tensor 72 | tensor with permuted dimensions according to specified shape. 73 | 74 | """ 75 | return src.permute(shape) 76 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/representation/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | py_library( 4 | name = "angle", 5 | srcs = ["angle.py"], 6 | srcs_version = "PY3", 7 | visibility = ["//visibility:public"], 8 | deps = [ 9 | "//third_party/py/torch:pytorch", 10 | ], 11 | ) 12 | 13 | py_test( 14 | name = "angle_test", 15 | srcs = ["angle_test.py"], 16 | python_version = "PY3", 17 | srcs_version = "PY3", 18 | visibility = ["//visibility:public"], 19 | deps = [ 20 | ":angle", 21 | "//testing/pybase", 22 | "//third_party/py/torch:pytorch", 23 | ], 24 | ) 25 | 26 | py_library( 27 | name = "distance", 28 | srcs = ["distance.py"], 29 | srcs_version = "PY3", 30 | visibility = ["//visibility:public"], 31 | deps = [ 32 | "//third_party/py/torch:pytorch", 33 | ], 34 | ) 35 | 36 | py_test( 37 | name = "distance_test", 38 | srcs = ["distance_test.py"], 39 | python_version = "PY3", 40 | srcs_version = "PY3", 41 | visibility = ["//visibility:public"], 42 | deps = [ 43 | ":distance", 44 | "//testing/pybase", 45 | "//third_party/py/torch:pytorch", 46 | ], 47 | ) 48 | 49 | py_library( 50 | name = "inner_angle", 51 | srcs = ["inner_angle.py"], 52 | srcs_version = "PY3", 53 | visibility = ["//visibility:public"], 54 | deps = [ 55 | "//third_party/py/torch:pytorch", 56 | ], 57 | ) 58 | 59 | py_test( 60 | name = "inner_angle_test", 61 | srcs = ["inner_angle_test.py"], 62 | python_version = "PY3", 63 | srcs_version = "PY3", 64 | visibility = ["//visibility:public"], 65 | deps = [ 66 | ":inner_angle", 67 | "//testing/pybase", 68 | "//third_party/py/torch:pytorch", 69 | ], 70 | ) 71 | 72 | py_library( 73 | name = "point_line_distance", 74 | srcs = ["point_line_distance.py"], 75 | srcs_version = "PY3", 76 | visibility = ["//visibility:public"], 77 | deps = [ 78 | ":distance", 79 | "//third_party/py/torch:pytorch", 80 | ], 81 | ) 82 | 83 | py_test( 84 | name = "point_line_distance_test", 85 | srcs = ["point_line_distance_test.py"], 86 | python_version = "PY3", 87 | srcs_version = "PY3", 88 | visibility = ["//visibility:public"], 89 | deps = [ 90 | ":point_line_distance", 91 | "//testing/pybase", 92 | "//third_party/py/torch:pytorch", 93 | ], 94 | ) 95 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/representation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/pose_format/torch/representation/__init__.py -------------------------------------------------------------------------------- /src/python/pose_format/torch/representation/angle.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | from ..masked.tensor import MaskedTensor 5 | 6 | 7 | class AngleRepresentation(nn.Module): 8 | """ 9 | Class to compute the angle between the X/Y axis and the line segments formed by two sets of points. 10 | """ 11 | 12 | def forward(self, p1s: MaskedTensor, p2s: MaskedTensor) -> torch.Tensor: 13 | """ 14 | Computes angle in radians between X/Y axis and line segments made by two sets of points. 15 | 16 | Parameters 17 | ---------- 18 | p1s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 19 | A tensor representing the first set of points with shape (Points, Batch, Len, Dims). 20 | 21 | p2s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 22 | A tensor representing the second set of points with the same shape as `p1s`. 23 | 24 | Returns 25 | ------- 26 | torch.Tensor 27 | A tensor of angles (in radians) with shape (Points, Batch, Len). 28 | 29 | Note 30 | ---- 31 | The slope is determined for each pair of points. The arctangent function is then applied to calculate the angle in radians. 32 | """ 33 | dims = p1s.shape[-1] 34 | 35 | d = p2s - p1s # (Points, Batch, Len, Dims) 36 | xs, ys = d.split([1] * dims, dim=3)[:2] # (Points, Batch, Len, 1) 37 | slopes = ys.div(xs).fix_nan().zero_filled().squeeze(axis=3) 38 | 39 | return torch.atan(slopes) 40 | 41 | 42 | if __name__ == "__main__": 43 | representation = AngleRepresentation() 44 | p1s = MaskedTensor(torch.tensor([[[[1, 2, 3]]]], dtype=torch.float)) 45 | print(p1s.shape) 46 | 47 | p2s = MaskedTensor(torch.tensor([[[[4, 5, 6]]]], dtype=torch.float)) 48 | angles = representation(p1s, p2s) 49 | print(angles) 50 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/representation/angle_test.py: -------------------------------------------------------------------------------- 1 | import math 2 | from unittest import TestCase 3 | 4 | import torch 5 | 6 | from pose_format.torch.masked.tensor import MaskedTensor 7 | from pose_format.torch.representation.angle import AngleRepresentation 8 | 9 | representation = AngleRepresentation() 10 | 11 | 12 | class TestAngleRepresentation(TestCase): 13 | """ 14 | Test cases to make sure the correct functioning of ``AngleRepresentation``. 15 | """ 16 | 17 | def test_call_value_should_be_angle(self): 18 | """ 19 | Tests the computed angle value matches expected value. 20 | """ 21 | p1s = MaskedTensor(torch.tensor([[[[1, 2, 3]]]], dtype=torch.float)) 22 | p2s = MaskedTensor(torch.tensor([[[[4, 5, 6]]]], dtype=torch.float)) 23 | angles = representation(p1s, p2s) 24 | self.assertAlmostEqual(float(angles[0][0][0]), 1 / 4 * math.pi) 25 | 26 | def test_call_masked_value_should_be_zero(self): 27 | """ 28 | Test if the masked values in computed angle tensor are zeros. 29 | """ 30 | mask = torch.tensor([[[[0, 1, 1]]]], dtype=torch.bool) 31 | p1s = MaskedTensor(torch.tensor([[[[1, 2, 3]]]], dtype=torch.float), mask) 32 | p2s = MaskedTensor(torch.tensor([[[[4, 5, 6]]]], dtype=torch.float)) 33 | angles = representation(p1s, p2s) 34 | self.assertEqual(float(angles[0][0][0]), 0) 35 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/representation/distance.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | from pose_format.torch.masked.tensor import MaskedTensor 5 | from pose_format.torch.masked.torch import MaskedTorch 6 | 7 | 8 | class DistanceRepresentation(nn.Module): 9 | """ 10 | Represents the Euclidean distance between two points in space. 11 | """ 12 | 13 | def distance(self, p1s: MaskedTensor, p2s: MaskedTensor) -> MaskedTensor: 14 | """ 15 | Calculate the Euclidean distance between two sets of points. 16 | 17 | Parameters 18 | ---------- 19 | p1s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 20 | Tensor representing the first set of points. 21 | 22 | p2s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 23 | Tensor representing the second set of points. 24 | 25 | Returns 26 | ------- 27 | :class:`~pose_format.torch.masked.tensor.MaskedTensor` 28 | Tensor representing the calculated distances. 29 | """ 30 | diff = p1s - p2s # (..., Len, Dims) 31 | square = diff.pow_(2) 32 | sum_squares = square.sum(dim=-1) 33 | return MaskedTorch.sqrt(sum_squares) 34 | 35 | def forward(self, p1s: MaskedTensor, p2s: MaskedTensor) -> torch.Tensor: 36 | """ 37 | Computes Euclidean distance between two sets of points. 38 | 39 | Parameters 40 | ---------- 41 | p1s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 42 | Tensor representing the first set of points. Shape: (Points, Batch, Len, Dims). 43 | 44 | p2s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 45 | Tensor representing the second set of points. Shape: (Points, Batch, Len, Dims). 46 | 47 | Returns 48 | ------- 49 | torch.Tensor 50 | Tensor representing the Euclidean distances. Shape: (Points, Batch, Len). 51 | """ 52 | return self.distance(p1s, p2s).zero_filled() 53 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/representation/distance_test.py: -------------------------------------------------------------------------------- 1 | import math 2 | from unittest import TestCase 3 | 4 | import torch 5 | 6 | from pose_format.torch.masked.tensor import MaskedTensor 7 | from pose_format.torch.representation.distance import DistanceRepresentation 8 | 9 | representation = DistanceRepresentation() 10 | 11 | 12 | class TestDistanceRepresentation(TestCase): 13 | """ 14 | Test cases of distance representation between points. 15 | """ 16 | 17 | def test_call_value_should_be_distance(self): 18 | """ 19 | Tests if the returned distance is as expected for given non-masked/unmasked points. 20 | """ 21 | p1s = MaskedTensor(torch.tensor([[[[1, 2, 3]]]], dtype=torch.float)) 22 | p2s = MaskedTensor(torch.tensor([[[[4, 5, 6]]]], dtype=torch.float)) 23 | distances = representation(p1s, p2s) 24 | self.assertAlmostEqual(float(distances[0][0][0]), math.sqrt(27), places=6) 25 | 26 | def test_call_masked_value_should_be_zero(self): 27 | """ 28 | Test if masked values return a distance of zero. 29 | """ 30 | mask = torch.tensor([[[[0, 1, 1]]]], dtype=torch.bool) 31 | p1s = MaskedTensor(torch.tensor([[[[1, 2, 3]]]], dtype=torch.float), mask) 32 | p2s = MaskedTensor(torch.tensor([[[[4, 5, 6]]]], dtype=torch.float)) 33 | distances = representation(p1s, p2s) 34 | self.assertEqual(float(distances[0][0][0]), 0) 35 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/representation/inner_angle.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | from ..masked.tensor import MaskedTensor 5 | from ..masked.torch import MaskedTorch 6 | 7 | 8 | def get_vectors_norm(vectors: MaskedTensor): 9 | """ 10 | Computes the normalized vectors from the given masked vectors. 11 | 12 | Parameters 13 | ---------- 14 | 15 | vectors : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 16 | The input masked vectors with any shape. 17 | 18 | Returns 19 | ------- 20 | :class:`~pose_format.torch.masked.tensor.MaskedTensor` 21 | The normalized masked vectors of the same shape as the input. 22 | Notes 23 | ----- 24 | The function squares the input vectors, then sums along the last dimension. 25 | Taking the square root of the sum provides the magnitude. The original vectors 26 | are then divided by this magnitude to normalize. 27 | """ 28 | square = MaskedTorch.square(vectors) 29 | summed = square.sum(dim=-1) 30 | v_mag = MaskedTorch.sqrt(summed) 31 | mag_stack = MaskedTorch.stack([v_mag] * vectors.shape[-1], dim=-1) 32 | return vectors.div(mag_stack) 33 | 34 | 35 | class InnerAngleRepresentation(nn.Module): 36 | """ 37 | A neural network module to compute the inner angle at a point for a triangle. 38 | """ 39 | 40 | def forward(self, p1s: MaskedTensor, p2s: MaskedTensor, p3s: MaskedTensor) -> torch.Tensor: 41 | """ 42 | Computes the angle in point `p2s` for the triangle defined by the points . 43 | 44 | Parameters 45 | ---------- 46 | p1s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 47 | A tensor representing the first set of points, with shape (Points, Batch, Len, Dims). 48 | p2s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 49 | A tensor representing the second set of points (at which the angle is calculated), with shape (Points, Batch, Len, Dims). 50 | p3s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 51 | A tensor representing the third set of points, with shape (Points, Batch, Len, Dims). 52 | 53 | Returns 54 | ------- 55 | torch.Tensor 56 | A tensor representing the computed angles at point `p2s`, with shape (Points, Batch, Len). 57 | 58 | Note 59 | ---- 60 | The method is based on the approach suggested in: 61 | https://stackoverflow.com/questions/19729831/angle-between-3-points-in-3d-space 62 | 63 | The function first computes the vectors v1 and v2 by subtracting points p1s and p3s 64 | from p2s, respectively. The vectors are then normalized. The angle is calculated by 65 | finding the arccosine of the dot product of the normalized vectors. NaN values in 66 | the resulting tensor are set to zero. 67 | """ 68 | 69 | # Following https://stackoverflow.com/questions/19729831/angle-between-3-points-in-3d-space 70 | v1 = p1s - p2s # (Points, Batch, Len, Dims) 71 | v2 = p3s - p2s # (Points, Batch, Len, Dims) 72 | 73 | v1_norm = get_vectors_norm(v1) 74 | v2_norm = get_vectors_norm(v2) 75 | 76 | slopes = (v1_norm * v2_norm).sum(dim=-1) 77 | angles = MaskedTorch.acos(slopes) 78 | 79 | angles = angles.zero_filled() 80 | angles[angles != angles] = 0 # Fix NaN, TODO think of faster way 81 | 82 | return angles 83 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/representation/inner_angle_test.py: -------------------------------------------------------------------------------- 1 | import math 2 | from unittest import TestCase 3 | 4 | import torch 5 | 6 | from pose_format.torch.masked.tensor import MaskedTensor 7 | from pose_format.torch.representation.inner_angle import \ 8 | InnerAngleRepresentation 9 | 10 | representation = InnerAngleRepresentation() 11 | 12 | 13 | class TestInnerAngleRepresentation(TestCase): 14 | """ 15 | Unit test for the `InnerAngleRepresentation` class. 16 | 17 | This test class verifies that the angle calculations for the InnerAngleRepresentation 18 | class are correct and handle edge cases appropriately. 19 | 20 | Methods 21 | ------- 22 | test_call_value_should_be_inner_angle(): 23 | Tests if the calculated angle matches the expected angle value. 24 | 25 | test_call_masked_value_should_be_zero(): 26 | Tests if a masked value in the input results in a zero output angle. 27 | """ 28 | 29 | def test_call_value_should_be_inner_angle(self): 30 | """ 31 | Tests if the computed angle from the `InnerAngleRepresentation` matches the expected value. 32 | 33 | This test sets up three points and expects the computed angle at the middle point to be 34 | approximately equal to the angle calculated via arccosine of a predefined value. 35 | """ 36 | p1s = MaskedTensor(torch.tensor([[[[2, 3, 4]]]], dtype=torch.float)) 37 | p2s = MaskedTensor(torch.tensor([[[[1, 1, 1]]]], dtype=torch.float)) 38 | p3s = MaskedTensor(torch.tensor([[[[3, 4, 2]]]], dtype=torch.float)) 39 | angles = representation(p1s, p2s, p3s) 40 | self.assertAlmostEqual(float(angles[0][0][0]), math.acos(11 / 14)) 41 | 42 | def test_call_masked_value_should_be_zero(self): 43 | """ 44 | Tests if masking an input value results in an output angle of zero. 45 | 46 | This test masks one of the input points and expects the computed angle at the middle 47 | point to be zero. 48 | """ 49 | mask = torch.tensor([[[[0, 1, 1]]]], dtype=torch.bool) 50 | p1s = MaskedTensor(torch.tensor([[[[2, 3, 4]]]], dtype=torch.float), mask) 51 | p2s = MaskedTensor(torch.tensor([[[[1, 1, 1]]]], dtype=torch.float)) 52 | p3s = MaskedTensor(torch.tensor([[[[3, 4, 2]]]], dtype=torch.float)) 53 | angles = representation(p1s, p2s, p3s) 54 | self.assertEqual(float(angles[0][0][0]), 0) 55 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/representation/point_line_distance.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | from ..masked.tensor import MaskedTensor 5 | from ..masked.torch import MaskedTorch 6 | from .distance import DistanceRepresentation 7 | 8 | 9 | class PointLineDistanceRepresentation(nn.Module): 10 | """ 11 | Class computing distance between a point and a line segment. 12 | 13 | Parameters 14 | ---------- 15 | distance : :class:`~pose_format.torch.representation.distance.DistanceRepresentation` 16 | Instance of the `DistanceRepresentation` class to compute the Euclidean distance. 17 | 18 | """ 19 | 20 | def __init__(self): 21 | super(PointLineDistanceRepresentation, self).__init__() 22 | self.distance = DistanceRepresentation() 23 | 24 | def forward(self, p1s: MaskedTensor, p2s: MaskedTensor, p3s: MaskedTensor) -> torch.Tensor: 25 | """ 26 | Computes distance from the point `p1s` to the line formed by points `p2s` and `p3s`. 27 | 28 | The method uses Heron's Formula to find the area of the triangle formed by the three points 29 | and then calculates the height of the triangle to determine the distance from the point 30 | `p1s` to the line . 31 | 32 | Parameters 33 | ---------- 34 | p1s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 35 | Tensor representing the point for which the distance to the line is calculated. 36 | Shape: (Points, Batch, Len, Dims). 37 | 38 | p2s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 39 | Tensor representing one end-point of the line. 40 | Shape: (Points, Batch, Len, Dims). 41 | 42 | p3s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 43 | Tensor representing the other end-point of the line. 44 | Shape: (Points, Batch, Len, Dims). 45 | 46 | Returns 47 | ------- 48 | torch.Tensor 49 | Tensor representing the distances from the point `p1s` to the line . 50 | Shape: (Points, Batch, Len). 51 | 52 | Note 53 | ---- 54 | This is following Heron's Formula: https://en.wikipedia.org/wiki/Heron%27s_formula. 55 | """ 56 | # Following Heron's Formula https://en.wikipedia.org/wiki/Heron%27s_formula 57 | a = self.distance.distance(p1s, p2s) 58 | b = self.distance.distance(p2s, p3s) 59 | c = self.distance.distance(p1s, p3s) 60 | s: MaskedTensor = (a + b + c) / 2 61 | squared = s * (s - a) * (s - b) * (s - c) 62 | area = MaskedTorch.sqrt(squared) 63 | 64 | # Calc "height" of the triangle 65 | square_area: MaskedTensor = area * 2 66 | distance = square_area / b 67 | distance.fix_nan() 68 | 69 | return distance.zero_filled() 70 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/representation/point_line_distance_test.py: -------------------------------------------------------------------------------- 1 | import math 2 | from unittest import TestCase 3 | 4 | import torch 5 | 6 | from pose_format.torch.masked.tensor import MaskedTensor 7 | from pose_format.torch.representation.point_line_distance import \ 8 | PointLineDistanceRepresentation 9 | 10 | representation = PointLineDistanceRepresentation() 11 | 12 | 13 | class TestPointLineDistanceRepresentation(TestCase): 14 | """Test suite for the ``PointLineDistanceRepresentation``.""" 15 | 16 | def test_call_value_should_be_distance(self): 17 | """Tests that computed distance between a point and a line is as expected.""" 18 | p1s = MaskedTensor(torch.tensor([[[[2, 3, 4]]]], dtype=torch.float)) 19 | p2s = MaskedTensor(torch.tensor([[[[1, 1, 1]]]], dtype=torch.float)) 20 | p3s = MaskedTensor(torch.tensor([[[[3, 4, 2]]]], dtype=torch.float)) 21 | distances = representation(p1s, p2s, p3s) 22 | self.assertAlmostEqual(float(distances[0][0][0]), math.sqrt(75 / 14), places=6) 23 | 24 | def test_call_masked_value_should_be_zero(self): 25 | """ 26 | Tests distance for masked values is zero. 27 | """ 28 | mask = torch.tensor([[[[0, 1, 1]]]], dtype=torch.bool) 29 | p1s = MaskedTensor(torch.tensor([[[[2, 3, 4]]]], dtype=torch.float), mask) 30 | p2s = MaskedTensor(torch.tensor([[[[1, 1, 1]]]], dtype=torch.float)) 31 | p3s = MaskedTensor(torch.tensor([[[[3, 4, 2]]]], dtype=torch.float)) 32 | distances = representation(p1s, p2s, p3s) 33 | self.assertEqual(float(distances[0][0][0]), 0) 34 | -------------------------------------------------------------------------------- /src/python/pose_format/torch/representation/points.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | from ..masked.tensor import MaskedTensor 5 | 6 | 7 | class PointsRepresentation(nn.Module): 8 | """ 9 | Class to represent points in a tensor format for processing. 10 | """ 11 | 12 | def forward(self, p1s: MaskedTensor) -> torch.Tensor: 13 | """ 14 | Transforms input tensor representing points into a desired tensor format. 15 | 16 | The transformation process with zero-filling the masked values in input tensor 17 | and reshaping tensor by transposing its dimensions to match the desired output format. 18 | 19 | Parameters 20 | ---------- 21 | p1s : :class:`~pose_format.torch.masked.tensor.MaskedTensor` 22 | Tensor representing a set of points. 23 | Shape: (Points, Batch, Len, Dims). 24 | 25 | Returns 26 | ------- 27 | torch.Tensor 28 | Transformed tensor representing the points. 29 | Shape: (Points*Dims, Batch, Len). 30 | 31 | Note 32 | ---- 33 | This method first fills masked values in input tensor with zeros. 34 | Then, it reshapes tensor by transposing dimensions to match its desired output format 35 | """ 36 | 37 | p1s = p1s.zero_filled() 38 | p1s = p1s.transpose(1, 3) # (Points, Dims, Len, Batch) 39 | p1s = p1s.transpose(2, 3) # (Points, Dims, Batch, Len) 40 | shape = p1s.shape 41 | 42 | return p1s.view((-1, shape[2], shape[3])) 43 | -------------------------------------------------------------------------------- /src/python/pose_format/utils/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | py_library( 4 | name = "fast_math", 5 | srcs = ["fast_math.py"], 6 | srcs_version = "PY3", 7 | visibility = ["//visibility:public"], 8 | deps = [ 9 | "//third_party/py/numpy", 10 | ], 11 | ) 12 | 13 | py_library( 14 | name = "openpose", 15 | srcs = ["openpose.py"], 16 | srcs_version = "PY3", 17 | visibility = ["//visibility:public"], 18 | deps = [ 19 | "//third_party/py/numpy", 20 | "//:pose", 21 | "//:pose_body", 22 | "//:pose_header", 23 | ], 24 | ) 25 | 26 | py_library( 27 | name = "reader", 28 | srcs = ["reader.py"], 29 | srcs_version = "PY3", 30 | visibility = ["//visibility:public"], 31 | deps = [ 32 | "//third_party/py/dataclasses", 33 | "//third_party/py/numpy", 34 | "//third_party/py/tensorflow", 35 | "//third_party/py/torch:pytorch", 36 | ], 37 | ) 38 | 39 | py_test( 40 | name = "reader_test", 41 | srcs = ["reader_test.py"], 42 | python_version = "PY3", 43 | srcs_version = "PY3", 44 | visibility = ["//visibility:public"], 45 | deps = [ 46 | ":reader", 47 | "//testing/pybase", 48 | ], 49 | ) 50 | -------------------------------------------------------------------------------- /src/python/pose_format/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/pose_format/utils/__init__.py -------------------------------------------------------------------------------- /src/python/pose_format/utils/conftest.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from typing import List, get_args 3 | import pytest 4 | from pose_format.pose import Pose 5 | from pose_format.utils.generic import get_standard_components_for_known_format, fake_pose, KnownPoseFormat 6 | 7 | @pytest.fixture 8 | def fake_poses(request) -> List[Pose]: 9 | # Access the parameter passed to the fixture 10 | known_format = request.param 11 | count = getattr(request, "count", 3) 12 | known_formats = get_args(KnownPoseFormat) 13 | if known_format in known_formats: 14 | 15 | components = get_standard_components_for_known_format(known_format) 16 | return copy.deepcopy([fake_pose(i * 10 + 10, components=components) for i in range(count)]) 17 | else: 18 | # get openpose 19 | fake_poses_list = [fake_pose(i * 10 + 10) for i in range(count)] 20 | for i, pose in enumerate(fake_poses_list): 21 | for component in pose.header.components: 22 | component.name = f"unknown_component_{i}_formerly_{component.name}" 23 | return [pose.copy() for pose in fake_poses_list] 24 | -------------------------------------------------------------------------------- /src/python/pose_format/utils/fast_math.py: -------------------------------------------------------------------------------- 1 | def distance_batch(p1s, p2s): 2 | """ 3 | Computes Euclidean distance between two sets of points in batch 4 | 5 | Parameters 6 | ---------- 7 | p1s : array-like 8 | array of shape (N, D) where N; number of points & D ; dimensionality of each point 9 | p2s : array-like 10 | array of shape (N, D) with N; number of points & D; dimensionality of each point 11 | 12 | Returns 13 | ------- 14 | array-like 15 | array of shape (N,) with euclidean distances between points in `p1s` and `p2s` 16 | 17 | Examples 18 | -------- 19 | >>> distance_batch(np.array([[0, 0], [1, 1]]), np.array([[1, 1], [2, 2]])) 20 | array([1.41421356, 1.41421356]) 21 | 22 | Note 23 | ---- 24 | Function assumes that the inputs `p1s` and `p2s` have the same shape 25 | """ 26 | squared = (p1s - p2s)**2 27 | summed = squared.sum(axis=-1) 28 | return summed**0.5 29 | -------------------------------------------------------------------------------- /src/python/pose_format/utils/optical_flow.py: -------------------------------------------------------------------------------- 1 | class OpticalFlowCalculator: 2 | """ 3 | Classe used for computing optical flow between frames using distance function 4 | 5 | Parameters 6 | ---------- 7 | fps : float 8 | frames per second; used to normalize optical flow computation 9 | distance : callable 10 | function to compute distance (or optical flow) between two frames (post/pre-src) 11 | """ 12 | 13 | def __init__(self, fps, distance): 14 | self.fps = fps 15 | self.distance = distance 16 | 17 | def __call__(self, src): 18 | """ 19 | Calculate the optical flow norm between frames, normalized by fps 20 | 21 | Parameters 22 | ---------- 23 | src : torch.Tensor 24 | source tensor representing the frames 25 | 26 | Returns 27 | ------- 28 | torch.Tensor 29 | normalized optical flow values between consecutive frames (pre-/post-src) 30 | """ 31 | 32 | pre_src = src[:-1] 33 | post_src = src[1:] 34 | 35 | # Calculate distance 36 | src = self.distance(post_src, pre_src) 37 | 38 | # Normalize distance by fps 39 | src = src * self.fps 40 | 41 | return src 42 | -------------------------------------------------------------------------------- /src/python/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pose_format" 3 | description = "Library for viewing, augmenting, and handling .pose files" 4 | version = "v0.9.1" 5 | keywords = ["Pose Files", "Pose Interpolation", "Pose Augmentation"] 6 | authors = [ 7 | { name = "Amit Moryossef", email = "amitmoryossef@gmail.com" }, 8 | { name = "Mathias Müller", email = "anon1@ymous.com" }, 9 | { name = "Rebecka Fahrni", email = "anon2@ymous.com" } 10 | ] 11 | dependencies = [ 12 | "numpy", 13 | "scipy", 14 | "tqdm" 15 | ] 16 | requires-python = ">= 3.8" 17 | 18 | [project.optional-dependencies] 19 | dev = [ 20 | "pylint", 21 | "pytest", 22 | "opencv-python==4.5.5.64", 23 | "vidgear", 24 | "mediapipe", 25 | "torch", 26 | "tensorflow", 27 | "matplotlib" 28 | ] 29 | 30 | [tool.setuptools] 31 | packages = [ 32 | "pose_format", 33 | "pose_format.bin", 34 | "pose_format.utils", 35 | "pose_format.testing", 36 | "pose_format.numpy", 37 | "pose_format.numpy.representation", 38 | "pose_format.torch", 39 | "pose_format.torch.masked", 40 | "pose_format.torch.representation", 41 | "pose_format.tensorflow", 42 | "pose_format.tensorflow.masked", 43 | "pose_format.tensorflow.representation" 44 | ] 45 | 46 | 47 | [tool.pytest.ini_options] 48 | addopts = "-v" 49 | filterwarnings = [ 50 | "ignore::DeprecationWarning:flatbuffers" 51 | ] 52 | testpaths = [ 53 | "pose_format", 54 | "tests" 55 | ] 56 | 57 | [tool.yapf] 58 | based_on_style = "google" 59 | column_limit = 120 60 | 61 | [tool.pylint.format] 62 | max-line-length = 120 63 | disable = [ 64 | "C0114", # Missing module docstring 65 | "C0115", # Missing class docstring 66 | "C0116", # Missing function or method docstring 67 | "W0511", # TODO 68 | "W1203", # use lazy % formatting in logging functions 69 | ] 70 | 71 | [project.scripts] 72 | pose_info = "pose_format.bin.pose_info:main" 73 | video_to_pose = "pose_format.bin.pose_estimation:main" 74 | videos_to_poses = "pose_format.bin.directory:main" 75 | visualize_pose = "pose_format.bin.pose_visualizer:main" 76 | -------------------------------------------------------------------------------- /src/python/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/tests/__init__.py -------------------------------------------------------------------------------- /src/python/tests/data/mediapipe.pose: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/tests/data/mediapipe.pose -------------------------------------------------------------------------------- /src/python/tests/data/mediapipe_hand_normalized.pose: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/tests/data/mediapipe_hand_normalized.pose -------------------------------------------------------------------------------- /src/python/tests/data/mediapipe_long.pose: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/tests/data/mediapipe_long.pose -------------------------------------------------------------------------------- /src/python/tests/data/mediapipe_long_hand_normalized.pose: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/tests/data/mediapipe_long_hand_normalized.pose -------------------------------------------------------------------------------- /src/python/tests/data/openpose.pose: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/tests/data/openpose.pose -------------------------------------------------------------------------------- /src/python/tests/data/optical_flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sign-language-processing/pose/d2194ccd2f90da97d46b15d9ec1bdf09ad16f213/src/python/tests/data/optical_flow.png -------------------------------------------------------------------------------- /src/python/tests/optical_flow_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for testing optical flow computations. Provides test cases for optical flow computation from pose landmarks data. 3 | """ 4 | import tempfile 5 | from unittest import TestCase 6 | 7 | from matplotlib.testing.compare import compare_images 8 | 9 | from pose_format.numpy.representation.distance import DistanceRepresentation 10 | from pose_format.pose import Pose 11 | from pose_format.utils.optical_flow import OpticalFlowCalculator 12 | 13 | 14 | class TestOpticalFlow(TestCase): 15 | """ 16 | Test cases for optical flow computation. 17 | """ 18 | 19 | def test_optical_flow(self): 20 | """ 21 | Tests optical flow from pose landmarks data and 22 | visualize the computed flow using a heatmap. 23 | 24 | Compares the generated heatmap with the reference image provided in `data/optical_flow.png` to validate the correctness of the omputed optical flow. 25 | 26 | Raises: 27 | AssertionError: If the computed optical flow visualization does not match the reference image. 28 | """ 29 | calculator = OpticalFlowCalculator(fps=30, distance=DistanceRepresentation()) 30 | 31 | with open('tests/data/mediapipe.pose', 'rb') as f: 32 | pose = Pose.read(f.read()) 33 | pose = pose.get_components(["POSE_LANDMARKS", "RIGHT_HAND_LANDMARKS", "LEFT_HAND_LANDMARKS"]) 34 | 35 | flow = calculator(pose.body.data) 36 | flow = flow.squeeze(axis=1) 37 | print(flow.shape) 38 | 39 | # Matplotlib heatmap 40 | import matplotlib.pyplot as plt 41 | plt.imshow(flow.T) 42 | plt.tight_layout() 43 | 44 | fp = tempfile.NamedTemporaryFile() 45 | plt.savefig(fp.name, format='png') 46 | 47 | self.assertTrue(compare_images('tests/data/optical_flow.png', fp.name, 0.001) is None) 48 | -------------------------------------------------------------------------------- /src/python/tests/pose_tf_graph_mode_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | import tensorflow as tf 4 | 5 | from tests.pose_test import _get_random_pose_object_with_tf_posebody 6 | 7 | 8 | class TestPose(TestCase): 9 | """ 10 | Testcases for the Pose object with TensorFlow operations in graph mode. 11 | """ 12 | 13 | def test_pose_tf_posebody_normalize_graph_mode_does_not_fail(self): 14 | """ 15 | Tests if the normalization of Pose object with TensorFlow PoseBody in graph mode does not fail. 16 | """ 17 | with tf.Graph().as_default(): 18 | 19 | assert tf.executing_eagerly() is False 20 | 21 | pose = _get_random_pose_object_with_tf_posebody(num_keypoints=5) 22 | 23 | # in the mock data header components are named 0, 1, and so on 24 | # individual points are named 0_a, 0_b, and so on 25 | pose.normalize(pose.header.normalization_info(p1=("0", "0_a"), p2=("0", "0_b"))) 26 | 27 | def test_pose_tf_posebody_frame_dropout_uniform_graph_mode_does_not_fail(self): 28 | """ 29 | Tests if frame dropout using uniform distribution in Pose object with TensorFlow PoseBody in graph mode does not fail. 30 | """ 31 | with tf.Graph().as_default(): 32 | 33 | assert tf.executing_eagerly() is False 34 | 35 | pose = _get_random_pose_object_with_tf_posebody(num_keypoints=5) 36 | pose.frame_dropout_uniform() 37 | 38 | def test_pose_tf_posebody_frame_dropout_normal_graph_mode_does_not_fail(self): 39 | """ 40 | tests if frame dropout using normal distribution in Pose object with TensorFlow PoseBody in graph mode does not fail. 41 | """ 42 | with tf.Graph().as_default(): 43 | 44 | assert tf.executing_eagerly() is False 45 | 46 | pose = _get_random_pose_object_with_tf_posebody(num_keypoints=5) 47 | pose.frame_dropout_normal() 48 | -------------------------------------------------------------------------------- /src/python/tests/visualization_test.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | import os 3 | from unittest import TestCase 4 | 5 | from pose_format.pose import Pose 6 | from pose_format.pose_visualizer import PoseVisualizer 7 | 8 | 9 | class TestPoseVisualizer(TestCase): 10 | """ 11 | Test cases for PoseVisualizer functionality. 12 | """ 13 | 14 | def test_save_gif(self): 15 | """ 16 | Test saving pose visualization as GIF. 17 | """ 18 | with open("tests/data/mediapipe.pose", "rb") as f: 19 | pose = Pose.read(f.read()) 20 | 21 | v = PoseVisualizer(pose) 22 | 23 | with tempfile.NamedTemporaryFile(suffix='.gif', delete=False) as temp_gif: 24 | v.save_gif(temp_gif.name, v.draw()) 25 | self.assertTrue(os.path.exists(temp_gif.name)) 26 | self.assertGreater(os.path.getsize(temp_gif.name), 0) 27 | 28 | def test_save_png(self): 29 | """ 30 | Test saving pose visualization as PNG. 31 | """ 32 | with open("tests/data/mediapipe_long.pose", "rb") as f: 33 | pose = Pose.read(f.read()) 34 | 35 | v = PoseVisualizer(pose) 36 | 37 | with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_png: 38 | v.save_png(temp_png.name, v.draw(transparency=True)) 39 | self.assertTrue(os.path.exists(temp_png.name)) 40 | self.assertGreater(os.path.getsize(temp_png.name), 0) 41 | 42 | def test_save_mp4(self): 43 | """ 44 | Test saving pose visualization as MP4 video. 45 | """ 46 | with open("tests/data/mediapipe_hand_normalized.pose", "rb") as f: 47 | pose = Pose.read(f.read()) 48 | 49 | v = PoseVisualizer(pose) 50 | 51 | with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_mp4: 52 | v.save_video(temp_mp4.name, v.draw()) 53 | self.assertTrue(os.path.exists(temp_mp4.name)) 54 | self.assertGreater(os.path.getsize(temp_mp4.name), 0) 55 | 56 | def test_save_to_memory(self): 57 | """ 58 | Test saving pose visualization as bytes. 59 | """ 60 | with open("tests/data/mediapipe_long_hand_normalized.pose", "rb") as f: 61 | pose = Pose.read(f.read()) 62 | 63 | v = PoseVisualizer(pose) 64 | file_bytes = v.save_png(None, v.draw()) 65 | self.assertGreater(len(file_bytes), 0) 66 | --------------------------------------------------------------------------------