├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yml └── workflows │ └── ghPages.yml ├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── __init__.py ├── docs ├── .gitignore ├── Makefile ├── doctrees │ ├── environment.pickle │ └── index.doctree ├── make.bat ├── requirements.txt └── source │ ├── conf.py │ ├── detection.rst │ ├── freemocap.rst │ ├── index.rst │ ├── installation │ ├── index.rst │ ├── installation.rst │ └── manual_dependency_installation.rst │ └── transfer │ ├── create_config.rst │ ├── index.rst │ └── transfer.rst ├── requirements.txt ├── setup.sh └── src ├── README.md ├── __init__.py ├── cgt_core ├── README.md ├── __init__.py ├── cgt_bpy │ ├── __init__.py │ ├── cgt_bpy_utils.py │ ├── cgt_collection.py │ ├── cgt_constraints.py │ ├── cgt_drivers.py │ ├── cgt_fc_actions.py │ ├── cgt_object_prop.py │ └── cgt_object_trie.py ├── cgt_calculators_nodes │ ├── README.md │ ├── __init__.py │ ├── calc_utils.py │ ├── cgt_math.py │ ├── mp_calc_face_rot.py │ ├── mp_calc_hand_rot.py │ └── mp_calc_pose_rot.py ├── cgt_core_chains.py ├── cgt_data │ ├── Rigify_Humanoid_DefaultFace_v0.6.1_bu.json │ ├── collections.json │ ├── face.json │ ├── hand.json │ └── pose.json ├── cgt_defaults.json ├── cgt_interface │ ├── __init__.py │ ├── cgt_core_panel.py │ └── cgt_core_registration.py ├── cgt_naming.py ├── cgt_output_nodes │ ├── __init__.py │ ├── blender_output.md │ ├── mp_face_out.py │ ├── mp_hand_out.py │ ├── mp_out_utils.py │ └── mp_pose_out.py ├── cgt_patterns │ ├── __init__.py │ ├── cgt_nodes.py │ └── observer_pattern.py └── cgt_utils │ ├── __init__.py │ ├── cgt_json.py │ ├── cgt_logging.py │ ├── cgt_timers.py │ ├── cgt_user_prefs.py │ └── prefs.json ├── cgt_freemocap ├── README.md ├── __init__.py ├── fm_interface.py ├── fm_operators.py ├── fm_paths.py ├── fm_registration.py ├── fm_session_loader.py ├── fm_subprocess_cmd_receiver.py └── fm_utils.py ├── cgt_imports.py ├── cgt_mediapipe ├── README.md ├── __init__.py ├── cgt_dependencies.py ├── cgt_mp_core │ ├── __init__.py │ ├── cv_stream.py │ ├── mp_detector_node.py │ ├── mp_face_detector.py │ ├── mp_hand_detector.py │ ├── mp_holistic_detector.py │ ├── mp_pose_detector.py │ └── test.py ├── cgt_mp_detection_operator.py ├── cgt_mp_interface.py ├── cgt_mp_preferences.py ├── cgt_mp_properties.py └── cgt_mp_registration.py ├── cgt_registration.py ├── cgt_socket_ipc ├── __init__.py ├── cgt_ipc_persistent_fns.py └── cgt_ipc_registration.py ├── cgt_tests ├── __init__.py ├── test_cgt_json.py └── test_cgt_math.py └── cgt_transfer ├── README.md ├── __init__.py ├── cgt_tf_3dview_panel.py ├── cgt_tf_io_config.py ├── cgt_tf_object_properties.py ├── cgt_tf_operators.py ├── cgt_tf_properties_panel.py ├── cgt_transfer_registration.py ├── core_transfer ├── __init__.py ├── tf_check_object_properties.py ├── tf_get_object_properties.py ├── tf_load_object_properties.py ├── tf_reflect_object_properties.py ├── tf_save_object_properties.py ├── tf_set_object_properties.py └── tf_transfer_management.py ├── data └── Rigify_Humanoid_DefaultFace_v0.6.1.json ├── legacy └── Rigify_Humanoid_DefaultFace_v0.6.1.json └── setup_helper ├── __init__.py ├── tf_face_mapping_helper.py ├── tf_hand_mapping_helper.py └── tf_pose_mapping_helper.py /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | Always copy the log message from the system console if an error occurs. Do not create screenshots of error messages. If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. Windows] 28 | - Blender Version 29 | - BlendArMocap Version 30 | 31 | **Additional context** 32 | Add any other context about the problem here. 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "github-actions" 9 | directory: "/" 10 | schedule: 11 | interval: "monthly" 12 | -------------------------------------------------------------------------------- /.github/workflows/ghPages.yml: -------------------------------------------------------------------------------- 1 | # https://github.com/marketplace/actions/deploy-to-github-pages 2 | name: Build and Deploy 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | release: 9 | types: 10 | - published 11 | 12 | permissions: 13 | contents: write 14 | 15 | jobs: 16 | build-and-deploy: 17 | runs-on: ubuntu-latest 18 | concurrency: ci-${{ github.ref }} 19 | steps: 20 | - uses: actions/setup-python@v5 21 | with: 22 | python-version: '3.9' 23 | architecture: x64 24 | - name: Checkout 25 | uses: actions/checkout@v4 26 | - name: Install and Build 27 | run: | 28 | cd docs 29 | mkdir build 30 | pip install -r requirements.txt 31 | make html 32 | touch build/html/.nojekyll 33 | 34 | - name: Deploy 35 | uses: JamesIves/github-pages-deploy-action@v4 36 | with: 37 | folder: docs/build/html 38 | clean: true 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #Only push Scripts, not blends 2 | *.blend 3 | *.blend1 4 | *.DS_Store 5 | 6 | #Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | pip-wheel-metadata/ 28 | share/python-wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | MANIFEST 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .nox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | *.py,cover 55 | .hypothesis/ 56 | .pytest_cache/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | *.log 64 | local_settings.py 65 | db.sqlite3 66 | db.sqlite3-journal 67 | 68 | # Flask stuff: 69 | instance/ 70 | .webassets-cache 71 | 72 | # Scrapy stuff: 73 | .scrapy 74 | 75 | # Sphinx documentation 76 | docs/_build/ 77 | 78 | # PyBuilder 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | .python-version 90 | 91 | # pipenv 92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 95 | # install all needed dependencies. 96 | #Pipfile.lock 97 | 98 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 99 | __pypackages__/ 100 | 101 | # Celery stuff 102 | celerybeat-schedule 103 | celerybeat.pid 104 | 105 | # SageMath parsed files 106 | *.sage.py 107 | 108 | # Environments 109 | .env 110 | .venv 111 | env/ 112 | venv/ 113 | ENV/ 114 | env.bak/ 115 | venv.bak/ 116 | 117 | # Spyder project settings 118 | .spyderproject 119 | .spyproject 120 | 121 | # Rope project settings 122 | .ropeproject 123 | 124 | # mkdocs documentation 125 | /site 126 | 127 | # mypy 128 | .mypy_cache/ 129 | .dmypy.json 130 | dmypy.json 131 | 132 | # Pyre type checker 133 | .pyre/ 134 | /src/visualize/ 135 | 136 | /idea/ 137 | .idea/ 138 | /src/cgt_tests/data/ 139 | /src/test.py 140 | /BlendArMocap_release_160.zip 141 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "src/cgt_socket_ipc/BlendPyNet"] 2 | path = src/cgt_socket_ipc/BlendPyNet 3 | url = git@github.com:cgtinker/BlendPyNet.git 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BlendArMocap
2 | 3 | BlendArMocap is a tool preform markerless tracking within Blender using Google’s [Mediapipe](https://google.github.io/mediapipe/). The main goal of the add-on is to efficiently transfer the generated detection results to rigs.
4 | 5 | For more information, please refer to the [documentation](https://cgtinker.github.io/BlendArMocap/). 6 | 7 | # Discontinued 8 | I cannot activly maintain BlendArMocap anymore, I may accept PR's and consider to transfer the ownership if someone plans to activly maintain it. 9 | 10 | ### Features 11 | - Detection of [Mediapipe](https://google.github.io/mediapipe/) detection results in stream or video 12 | - Calculation of rotations for mediapipe data 13 | - Import of [Freemocap](https://freemocap.org) mediapipe session data 14 | - Transfer tracking data to rigs and generate new transfer configurations 15 | - currently, officially supports the transfer to generated [rifigy rigs](https://docs.blender.org/manual/en/latest/addons/rigging/rigify/index.html) 16 | 17 | 18 | ### Mediapipe Detection 19 | 20 | Run Mediapipe within Blender to detect pose, hand, face or holistic features. 21 | BlendArMocap calculates rotation data based on the detection results at runtime to drive rigs.
22 | 23 | **Caution:** Requires external dependencies which can be installed via the add-on preferences with elevated privileges. 24 | 25 | 26 | ### Freemocap import 27 | 28 | [Freemocap](https://freemocap.org) session data can be saved in a `session folder` which then can be imported using BlendArMocap. 29 | To import session data to blender, set the path to the session directory and press the import button. 30 | 31 | 32 | ### Transfer 33 | 34 | Currently there's a preset configuration to transfer detection results to generated humanoid [rifigy rigs](https://docs.blender.org/manual/en/latest/addons/rigging/rigify/index.html). 35 | To transfer, just select the generated humaniod rig as transfer target and press the `Transfer` Button. 36 | The transfer is based on mapping objects which you can modify. The modifications you can save as own configurations.
37 | 38 | You'll find the mapping objects in the collections generated while tracking such as `cgt_HANDS`. 39 | Mapping instructions are stored as object properties and can be modified in the `object data properties` panel (where the constraints live). 40 | Here the concept of mapping objects: 41 | 42 | ```` 43 | mapping_object: object with instructions and constraints 44 | driver_object: generated driver based on instructions 45 | target_object: copies values from driver_object via constraints 46 | ```` 47 | 48 | If you happen to create a configuration to support another rig, feel free to send it to me for sharing via hello@cgtinker.com.
49 | 50 | 51 | # License 52 | This program is free software: you can redistribute it and/or modify 53 | it under the terms of the GNU General Public License as published by 54 | the Free Software Foundation, either version 3 of the License, or 55 | (at your option) any later version. 56 | 57 | This program is distributed in the hope that it will be useful, 58 | but WITHOUT ANY WARRANTY; without even the implied warranty of 59 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 60 | GNU General Public License for more details. 61 | 62 | You should have received a copy of the GNU General Public License 63 | along with this program. If not, see . 64 | 65 | Copyright (C) Denys Hsu - cgtinker, cgtinker.com, hello@cgtinker.com 66 | 67 | 68 |

69 | For tutorials regarding my tools may check out my [YouTube-Channel](https://www.youtube.com/user/MrSerAdos). 70 | If you want to support me you can become a [Patreon](https://www.patreon.com/cgtinker). 71 | 72 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) Denys Hsu, cgtinker, cgtinker.com, hello@cgtinker.com 3 | 4 | This program is free software: you can redistribute it and/or modify 5 | it under the terms of the GNU General Public License as published by 6 | the Free Software Foundation, either version 3 of the License, or 7 | (at your option) any later version. 8 | 9 | This program is distributed in the hope that it will be useful, 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | GNU General Public License for more details. 13 | 14 | You should have received a copy of the GNU General Public License 15 | along with this program. If not, see . 16 | ''' 17 | 18 | 19 | bl_info = { 20 | "name": "BlendArMocap", 21 | "description": "Mediapipe and Freemocap animation transfer implementation for Blender 3.0+.", 22 | "author": "cgtinker", 23 | "version": (1, 6, 1), 24 | "blender": (3, 0, 0), 25 | "location": "3D View > Tool", 26 | "doc_url": "https://cgtinker.github.io/BlendArMocap/", 27 | "tracker_url": "https://github.com/cgtinker/BlendArMocap/issues", 28 | "support": "COMMUNITY", 29 | "category": "Animation" 30 | } 31 | 32 | 33 | def reload_modules(): 34 | from .src import cgt_imports 35 | cgt_imports.manage_imports() 36 | 37 | 38 | def register(): 39 | from .src import cgt_registration 40 | cgt_registration.register() 41 | 42 | 43 | def unregister(): 44 | from .src import cgt_registration 45 | cgt_registration.unregister() 46 | 47 | 48 | if __name__ == '__main__': 49 | from src.cgt_core.cgt_utils import cgt_logging 50 | # cgt_logging.init('') 51 | register() 52 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | pids 2 | logs 3 | node_modules 4 | npm-debug.log 5 | coverage/ 6 | run 7 | dist 8 | .DS_Store 9 | .nyc_output 10 | .basement 11 | config.local.js 12 | basement_dist 13 | 14 | .env 15 | /build/* 16 | 17 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/docs/doctrees/environment.pickle -------------------------------------------------------------------------------- /docs/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/docs/doctrees/index.doctree -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx==5.3.0 2 | sphinx_rtd_theme 3 | sphinx-autoapi 4 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | 9 | project = 'BlendArMocap' 10 | copyright = '2023, cgtinker' 11 | author = 'cgtinker' 12 | release = '0.0.1' 13 | 14 | # -- General configuration --------------------------------------------------- 15 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 16 | 17 | extensions = [] 18 | # extensions = ['autoapi.extension'] 19 | # autoapi_dirs=['../../src/'] 20 | 21 | templates_path = ['_templates'] 22 | exclude_patterns = [] 23 | 24 | 25 | 26 | # -- Options for HTML output ------------------------------------------------- 27 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 28 | html_theme="sphinx_rtd_theme" 29 | #html_theme = 'alabaster' 30 | #html_static_path = ['_static'] 31 | -------------------------------------------------------------------------------- /docs/source/detection.rst: -------------------------------------------------------------------------------- 1 | Mediapipe Detection 2 | =================== 3 | 4 | `Google's Mediapipe 5 | `_ 6 | is used to detect features withing a stream or a video file. 7 | The detection options are location in the ``3D Viewport > BlendAr > Mediapipe``. 8 | 9 | .. warning:: 10 | Running Mediapipe within Blender requires external dependencies. 11 | More about the :ref:`installation-label` of dependencies. 12 | 13 | Apple User 14 | Blender has to be started using the terminal if you 15 | plan to use the webcam on mac os as blenders plist 16 | doesn't contain a camera permissions request. 17 | 18 | Settings 19 | -------- 20 | 21 | Type 22 | Movie 23 | Import movie files, preferably of type .mov. 24 | Reducing the dimensions before detecting may improves preformance. 25 | File Path 26 | Set the filepath to the movie which should be imported. 27 | 28 | Webcam 29 | Use the Webcam buildin or connected to your computer 30 | Webcam Device Slot 31 | Defaults to 0. If you have multiple cameras connected you might have to change the slot. 32 | 33 | Key Step 34 | The *Key Step* determines in which frequency keyframes are getting set. 35 | This has slightly different effects depending on the used input type. 36 | 37 | Movie 38 | As every frame can get calculated, you can use a *Key Step* of 1. 39 | Using a *Key Step* of one means, there is no smoothing at all. 40 | The higher the *Key Step* the more smoothing gets applied. 41 | 42 | 43 | Webcam 44 | While the Webcam is running, frames are getting grabbed at runtime on which the detection is preformed. 45 | In this case, the *Key Step* determines the rate of which detected frames are getting keyframed. 46 | For starting out, I recommend trying a *Key Step* of 2-4 for Hand, Face and Pose detection. 47 | As Holistic detection is more performance heavy I recommend a *Key Step* of 6-8. 48 | The *Key Step* which suits your system the most, highly depends on your hardware. 49 | 50 | Target 51 | Hands 52 | Preform Hand detection. 53 | Face 54 | Preform Face detection. 55 | Pose 56 | Preform Pose detection. 57 | Holistic 58 | Preform Hand, Face and Pose detection. 59 | 60 | Start Detection 61 | Button to start the detection. When using movie files as input, it may be called `Detect Movie`. 62 | While when using stream detection it may be called `Start Detection`. 63 | 64 | Stop Detection 65 | Once the detection has been started, you can stop the detection 66 | by either pressing the Button again or pressing :kbd:`Q`. 67 | 68 | 69 | 70 | Advanced 71 | -------- 72 | 73 | Model Complexity 74 | You can choose between a model complexity of [0, 1]. 75 | The lower the complexity, the faster the detection. 76 | The higher the complexity, the better the detection results. 77 | 78 | Min Detection Confidence 79 | Minimum confidence value ([0.0, 1.0]) from the detection model 80 | for the detection to be considered successful. Default to 0.5. 81 | -------------------------------------------------------------------------------- /docs/source/freemocap.rst: -------------------------------------------------------------------------------- 1 | Freemocap 2 | ========= 3 | 4 | The Free Motion Capture Project `Freemocap 5 | `_ 6 | aims to provide research-grade 7 | markerless motion capture software to everyone for free. 8 | 9 | It features multicam detection using Mediapipe and is currently in alpha. 10 | You can import generated **session data** using the freemocap data import options. 11 | 12 | 13 | Path 14 | Select the directory path to the *freemocap session folder directory*. 15 | 16 | Load Session folder 17 | Loads the tracking results. 18 | 19 | Load synchronized videos 20 | Load session videos as planes. 21 | 22 | Quickload 23 | Disable Viewport update while importing. 24 | While this improves the import speed drastically, it will freeze blender. 25 | 26 | Raw 27 | Import the raw data which may not be used to transfer. 28 | Mainly for scientific purposes. -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | BlendArMocap's Documations 2 | ========================== 3 | 4 | BlendArMocap is a tool preform markerless tracking within Blender using Google's mediapipe. 5 | The main goal of the add-on is to efficiently transfer the generated detection results to rigs. 6 | 7 | While rigify rigs are supported by default, BlendArMocap allows to create custom configurations to support various rigs. 8 | 9 | .. warning:: 10 | The documentation is in currently in active development. 11 | 12 | Contents 13 | -------- 14 | 15 | .. toctree:: 16 | installation/index 17 | detection 18 | transfer/index 19 | freemocap 20 | 21 | 22 | -------------------------------------------------------------------------------- /docs/source/installation/index.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | 5 | .. toctree:: 6 | installation 7 | manual_dependency_installation -------------------------------------------------------------------------------- /docs/source/installation/installation.rst: -------------------------------------------------------------------------------- 1 | .. _installation-label: 2 | 3 | Installation 4 | ============ 5 | 6 | Download the most recent official release at `Github 7 | `_. 8 | Don't unzip the downloaded zip. 9 | 10 | To install the add-on, navigate to ``Edit > Preferences > Add-Ons``. 11 | Then seach for *BlendArMocap* using the search bar. 12 | Finally activate the add-on by pressing the checkbox. 13 | 14 | .. warning:: 15 | Running *Googles Mediapipe* within Blender requires external dependencies. 16 | Preferably install them via the add-ons preferences with elevated privileges. 17 | 18 | 19 | Starting Blender with elevated permissions 20 | ------------------------------------------ 21 | 22 | Installing dependencies may requires elevated privileges. 23 | 24 | Windows 25 | Right-click the blender application and choose: "Run as administrator" 26 | 27 | Mac 28 | #. Start Blender as admin by using the terminal: 29 | #. Navigate to Blender: ``cd /Applications/Blender/Contents/MacOS`` 30 | #. Run Blender as admin: ``sudo ./Blender`` 31 | 32 | The Terminal request may be blocked even with 33 | elevated privileges, if that happens navigate to 34 | ``System Settings > Privacy and Security > Full Disk Access`` 35 | then activate your Terminal. 36 | After doing so, the sudo tag shouldn't be required anymore. 37 | 38 | Linux 39 | #. Start Blender as admin using the terminal: 40 | #. Navigate to Blender: ``cd /usr/bin`` 41 | #. Run Blender as admin: ``sudo ./blender`` 42 | 43 | 44 | Install External Dependencies 45 | ----------------------------- 46 | 47 | To run mediapipe, you need to install the required dependencies 48 | opencv and mediapipe via the **add-ons preferences**. 49 | Internet connection is required to install the required packages. 50 | It's recommended to disable VPN's during the 51 | installation processes. Blender may to started with elevated 52 | privileges during the installation process. 53 | 54 | Default 55 | Install dependencies within Blenders site-packages. 56 | 57 | Local (Linux) 58 | Install dependencies to the local user and link local user site 59 | packages to blender. 60 | 61 | 62 | -------------------------------------------------------------------------------- /docs/source/installation/manual_dependency_installation.rst: -------------------------------------------------------------------------------- 1 | Manual Installation 2 | =================== 3 | 4 | In some cases, mainly due to any system related issues, 5 | it may be required to install dependencies manually. 6 | 7 | .. note:: 8 | Before attempting to install dependencies manually, ensure 9 | that you've had the required permissions when trying to install 10 | the dependencies to run mediapipe. 11 | 12 | Gather information 13 | ------------------ 14 | 15 | Fire up blender, navigate to the ``Scripting`` workspace and 16 | gather some information in the ``interactive python console``. 17 | 18 | 1. Find blenders python executable 19 | 20 | .. code-block:: python 21 | 22 | import sys 23 | sys.executable() 24 | 25 | 2. Find blenders site-packages 26 | 27 | .. code-block:: python 28 | 29 | import site 30 | site.getusersitepackages() 31 | 32 | Install via system console 33 | -------------------------- 34 | 35 | Next up, start the terminal or powershell on windows to install 36 | the dependencies manually. 37 | 38 | Windows 39 | .. code-block:: bash 40 | 41 | `blenders python executable` -m ensurepip 42 | `blenders python executable` -m pip install mediapipe 43 | `blenders python executable` -m pip install opencv-contrib-python==4.7.0.68 44 | `blenders python executable` -m pip install protobuf==3.20.3 45 | 46 | Apple-Silicone 47 | .. code-block:: bash 48 | 49 | `blenders python executable` -m ensurepip 50 | `blenders python executable` -m pip install mediapipe-silicon 51 | `blenders python executable` -m pip install opencv-contrib-python==4.7.0.68 52 | 53 | Apple-Legacy 54 | .. code-block:: bash 55 | 56 | `blenders python executable` -m ensurepip 57 | `blenders python executable` -m pip install mediapipe==0.8.10 58 | `blenders python executable` -m pip install opencv-contrib-python==4.5.5.64 59 | `blenders python executable` -m pip install protobuf==3.20.3 60 | 61 | Linux 62 | Ensure to activate the ``local`` flag in the add-on preferences, 63 | as blender on linux usually doesn't have it's own site-packages. 64 | 65 | .. code-block:: bash 66 | 67 | `blenders python executable` -m ensurepip 68 | `blenders python executable` -m pip install mediapipe==0.8.10 69 | `blenders python executable` -m pip install opencv-contrib-python==4.2.0.34 70 | `blenders python executable` -m pip install protobuf==3.20.3 71 | 72 | In some cases the pipy open-cv package causes issues on linux. 73 | Consider to install open-cv via your local package manager. 74 | -------------------------------------------------------------------------------- /docs/source/transfer/create_config.rst: -------------------------------------------------------------------------------- 1 | Create and customize Configurations 2 | =================================== 3 | 4 | Setup New Configs 5 | ----------------- 6 | 7 | The setup process is quite unique, so lets break it down in steps. 8 | Objects which have been generated during detection contain an ID to display 9 | additional properties in the *object properties panel*. 10 | Those object properties can be modified to either change the current 11 | mapping configuration or even create completely new configurations! 12 | 13 | The setup options are location in the ``Object constraint properties > BlendArMocap``. 14 | 15 | .. note:: 16 | - **mapping object** - an object with instructions and constraints 17 | - **driver object** - a generated driver object based on instructions 18 | - **target object** - copies values from driver_object using constraints 19 | 20 | 21 | 22 | Concept 23 | ------- 24 | 25 | Target 26 | Target bone or object which should be driven by constraints. 27 | - Target Type *[Armature, Mesh, Any]*: Apply constraints to target (copies constraints from this object) 28 | - Sub Target *[Object, Bone]*: Target may be a Bone. 29 | 30 | Drivers 31 | There are three core concepts to set up drivers. 32 | In a nutshell *[Remap Drivers, IK Chain Driver, Remap by Distance Drivers]*. 33 | 34 | 35 | Remap Drivers 36 | Object values (especially rotations) may get remapped using a remap driver. 37 | To do so, navigate to *Value Mapping* and select the properties you want to remap - for example *rotation x, y, z*. 38 | *From min, from max, to min, to max* is used to define slopes, those are similar to the *map range convertor node* in the shader editor. 39 | Therefore, input the min and max values *in radians* from the selected object and the to min and max values *in radians* you want to have as result. (pi = 180°) 40 | If necessary, you can also use the *factor* and *offset* however, slopes can deliver better control once you got the hang of them. 41 | You can do so for all axis separately if necessary, press the *+* icon to do so. 42 | To get some information about the current select object, navigate to *Tools* and press the *Log Object Info* Button, you'll find min and max values from the object in the info panel. 43 | 44 | .. note:: 45 | Remapping object values driver properties 46 | 47 | without remapping object 48 | slope = (to_max - to_min) / (from_max - from_min) 49 | offset = to_min - slope * from_min 50 | f(x) = (slope * property_value + offset) * factor + offset 51 | 52 | with remapping object 53 | slope = (to_max * remapping_value - to_min * remapping_value) / (from_max - from_min) 54 | offset = to_min - slope * from_min * remapping_value 55 | f(x) = (slope * property_value + offset) * factor + offset 56 | 57 | IK Chain Drivers 58 | The idea of ik chain drivers basically is to construct an ik chain in reverse order. 59 | The end of the ik chain neither has a parent and nor gets remapped - it's mainly a reference. 60 | The next chain link has the chain end as parent, and gets remapped by the bone length which separates itself from the parent. 61 | Repeat this till you are at the ik start. As Example: 62 | 63 | .. note:: 64 | objects: shoulder_obj -> elbow_obj -> hand_obj 65 | bones: shoulder_bone -> elbow_bone -> hand_bone 66 | 67 | shoulder_obj(target=shoulder_bone, parent=None, remap_bone=None) 68 | elbow_obj(target=elbow_bone, parent=shoulder_obj, remap_bone=shoulder_bone.length) 69 | hand_obj(target=hand_bone, parent=elbow_obj, remap_bone=elbow_bone.length) 70 | 71 | Recursivly applies: 72 | dist = distance between parent and obj 73 | f(x) = (dist / remap_bone_length) * (obj_location - previous_chain_obj_location) 74 | 75 | Checkout the rigify rig implementation of the arms as example setup. 76 | 77 | 78 | Remap Drivers by Distance 79 | When using remap drivers by distance, we aren't using any of the object values. 80 | In this case, the distance between two objects gets used as driver value, which then gets remapped similar to the remap driver. 81 | The single difference is that the offset gets multiplied by the remapping value, which allows to basically offset in % (which usually isn't wanted for rotations). 82 | However, this time it makes a lot of sense to remap the value by a bone of the target armature as we are working with position and not rotation data - this makes remapping the values a lot easier. 83 | Again, to get some information about the current select object, navigate to *Tools* and press the *Log Object Info* Button, you'll find min and max values from the object in the info panel. 84 | 85 | .. note:: 86 | slope = (to_max * remapping_value - to_min * remapping_value) / (from_max - from_min) 87 | offset = to_min * remapping_value - slope * from_min 88 | f(x) = (slope * property_value + offset) * factor + offset * remapping_value 89 | 90 | Constraints 91 | Finally, add constraints to the object - the constraints will get applied to the target when transferring. 92 | As mentioned in the beginning, the target objects copies values from the driver using constraints. 93 | You may use any constraint to do so and modify the constraint properties to your likings to adjust transfer results. 94 | 95 | 96 | Tools 97 | ----- 98 | 99 | Transfer Selection 100 | Run transfer only for selected objects. When transferring chains the entire chain has to be selected, not just a link. 101 | 102 | Smooth Animation Data 103 | Run Blenders internal smooth f-Curves for selected objects. 104 | 105 | Log Object Info 106 | Log min, max values and information about the selected object to the *info panel*. 107 | 108 | 109 | 110 | I/O Transfer Configuration 111 | -------------------------- 112 | 113 | Transfer configurations can be imported and exported. 114 | If you create a new configuration and want to share it with the community let me know via hello@cgtinker.com. 115 | 116 | -------------------------------------------------------------------------------- /docs/source/transfer/index.rst: -------------------------------------------------------------------------------- 1 | Transfer 2 | ======== 3 | 4 | 5 | .. toctree:: 6 | transfer 7 | create_config -------------------------------------------------------------------------------- /docs/source/transfer/transfer.rst: -------------------------------------------------------------------------------- 1 | Basics 2 | ====== 3 | 4 | Mapping instructions are stored as object properties and can be modified in the `objects constraints` panel. 5 | *Only valid for objects containing a specific cgt_id property.* 6 | 7 | 8 | Transfer Motion 9 | --------------- 10 | 11 | The transfer options are location in the ``3D Viewport > BlendAr > Mediapipe``. 12 | 13 | Armature 14 | Select the target armature (b.e. a generated rigify rig for Rigify_Humanoid_DefaultFace_v0.6.1). 15 | 16 | Driver Collection 17 | Select a collection containing driver objects - *cgt_Drivers* to transfer all. 18 | In some cases you might want to just transfer pose, hands or face results - in this case select a specific collection to do so. 19 | 20 | Transfer Type 21 | Select a transfer configuration. 22 | 23 | Load 24 | Loads the currently selected type to the transfer objects. 25 | 26 | Save Config 27 | Save a new config based on the objects in the *cgt_Drivers* collection. 28 | 29 | Transfer Animation 30 | Load currently selected config for objects in selected collection and transfer the data. 31 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py 2 | attrs>=19.1.0 3 | matplotlib 4 | opencv-contrib-python>=4.5.5.64 5 | protobuf==3.20.2 6 | mediapipe>=0.8.10 7 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | name="BlendArMocap" 4 | 5 | branch=$(git symbolic-ref --short HEAD) 6 | version_str=$(grep -r '"version":' __init__.py | tr -d '[a-z,_."(): ]') 7 | 8 | prefix=("_"$branch"_"$version_str) 9 | dirpath=$(pwd) 10 | 11 | zipcmd() { 12 | zip -r $name/$name$prefix.zip $name \ 13 | -x "__MAXOSX/*" -x "*.DS_Store" \ 14 | -x "*venv*" -x "*.idea*" -x "*.git*" \ 15 | -x "*__pycache__*" -x "*docs*" -x "*setup*" \ 16 | -x "*swp", -x "*test*" 17 | } 18 | 19 | cd .. 20 | zipcmd 21 | cd $name 22 | -------------------------------------------------------------------------------- /src/README.md: -------------------------------------------------------------------------------- 1 | # Modular structure 2 | 3 | 4 | BlendArMocap is split into separate modules: 5 | - mediapipe 6 | - freemocap 7 | - transfer 8 | - socket (to be implemented)
9 | 10 | The modules have their own registration functions which gets called by `cgt_registration`. 11 | To add new tabs in the BlendAR UI-Panel use the `parent_id: 'UI_PT_CGT_Panel'`. 12 | In case you need to add new entries to the add-ons preferences, add the 13 | draw function to `cgt_core.cgt_interface.cgt_core_panel.addon_prefs`.
14 | 15 | While all modules may access the core, try to keep modules as standalone as possible. 16 | You may access other modules by using their public operators. Ensure to not weave code around other modules. -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/__init__.py -------------------------------------------------------------------------------- /src/cgt_core/README.md: -------------------------------------------------------------------------------- 1 | # Core - Node Chains and Utils... 2 | 3 | All registered modules may require the core to run. 4 | When modifying the core, all modules may require testing.
5 | While many core functions are just endpoints, the core introduces a node chain concept. 6 | 7 | ### Node and Node Chains 8 | On update a node inputs and outputs `data: Any, frame: int`. 9 | The data may change shape during the process.
10 | 11 | Benefit of the chain concept: 12 | - composition of nodes instead of inheritance 13 | - get information about every node in the chain or the chain itself by (slightly) modifying `cgt_patterns.cgt_nodes`. 14 | - can just use a node and ignore the chain 15 | 16 | ````` 17 | Sample Setup: 18 | InputNode -> CalculatorNode -> OutputNode 19 | 20 | Sample Node Chain Group: 21 | mp_holistic_detector 22 | -> Face Node Chain 23 | -> Pose Node Chain 24 | -> Hand Node Chain 25 | ````` 26 | 27 | Check `cgt_patterns.cgt_nodes` for more information 28 | or `cgt_mp_detection_operator` for implementation. 29 | 30 | Here a little overview: 31 | - **cgt_bpy** contains tools to modify, access, get and set data within blender 32 | - **cgt_interface** includes base panels which other modules are getting attached to 33 | - **cgt_patterns** contains node pattern 34 | - **cgt_calculator_nodes** to calculate rotations for mediapipe output data 35 | - **cgt_output_nodes** to output processed mediapipe data 36 | - **cgt_utils** features some useful tools (timers, json) -------------------------------------------------------------------------------- /src/cgt_core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_core/__init__.py -------------------------------------------------------------------------------- /src/cgt_core/cgt_bpy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_core/cgt_bpy/__init__.py -------------------------------------------------------------------------------- /src/cgt_core/cgt_bpy/cgt_bpy_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import logging 3 | from typing import Optional, List 4 | import mathutils 5 | import bpy 6 | 7 | 8 | def add_empties(data: dict, size: float, prefix: str = "", suffix: str = "") -> List[bpy.types.Object]: 9 | return [add_empty(size=size, name=suffix + value + prefix) for key, value in data.items()] 10 | 11 | 12 | def add_empty(size, name, display='ARROWS') -> bpy.types.Object: 13 | ob = get_object_by_name(name) 14 | if ob is not None: 15 | return ob 16 | 17 | obj = bpy.data.objects.new(name, None) 18 | bpy.context.scene.collection.objects.link(obj) 19 | obj.empty_display_size = size 20 | obj.empty_display_type = display 21 | return obj 22 | 23 | 24 | def get_object_by_name(name) -> Optional[bpy.types.Object]: 25 | if name in bpy.data.objects: 26 | return bpy.data.objects[name] 27 | return None 28 | 29 | 30 | def purge_orphan_data(): 31 | # remove all orphan data blocks 32 | for block in bpy.data.meshes: 33 | if block.users == 0: 34 | bpy.data.meshes.remove(block) 35 | 36 | # remove all orphan armatures 37 | for armature in bpy.data.armatures: 38 | if armature.users == 0: 39 | bpy.data.armatures.remove(armature) 40 | 41 | 42 | def get_pbone_worldspace( 43 | pose_bone: bpy.types.PoseBone, 44 | rig: bpy.types.Object) -> List[float]: 45 | """ Gets world space position of a pose bone. """ 46 | 47 | world_space = rig.convert_space( 48 | pose_bone=pose_bone, 49 | matrix=pose_bone.matrix, 50 | from_space='POSE', 51 | to_space='WORLD' 52 | ) 53 | 54 | return world_space 55 | 56 | 57 | def set_pbone_worldspace( 58 | pose_bone: bpy.types.PoseBone, 59 | rig: bpy.types.Object, position: mathutils.Vector) -> None: 60 | """ Sets a pose bone to target world space position. """ 61 | 62 | world_space = get_pbone_worldspace(pose_bone, rig) 63 | world_space.translation = position 64 | pose_bone.matrix = rig.convert_space(pose_bone=pose_bone, 65 | matrix=world_space, 66 | from_space='WORLD', 67 | to_space='POSE') 68 | 69 | 70 | def set_mode(mode: str = None) -> bool: 71 | """ MODES: 'EDIT', 'OBJECT', 'POSE' """ 72 | if mode is None: 73 | raise KeyError 74 | 75 | if bpy.context.mode == mode: 76 | return True 77 | try: 78 | bpy.ops.object.mode_set(mode=mode, toggle=False) 79 | return True 80 | 81 | except RuntimeError: 82 | logging.error("RuntimeError: Operator bpy.ops.object.mode_set.poll() Context missing active object") 83 | return False 84 | 85 | 86 | def user_pref(): 87 | return bpy.context.scene.m_cgtinker_mediapipe 88 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_bpy/cgt_collection.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Optional, List 3 | import bpy 4 | import logging 5 | 6 | 7 | def set_viewport_visibility(collection_name: str, active: bool) -> None: 8 | """ Sets the visibility of a collection. """ 9 | if collection_exists(collection_name): 10 | collection = bpy.data.collections.get(collection_name) 11 | collection.hide_viewport = active 12 | 13 | 14 | def collection_exists(collection_name: str) -> bool: 15 | """ Returns if the collection exists. """ 16 | if bpy.data.collections.get(collection_name) is not None: 17 | return True 18 | return False 19 | 20 | 21 | def get_collection(collection_name: str) -> bpy.types.Collection: 22 | return bpy.data.get(collection_name) 23 | 24 | 25 | def create_collection(collection_name: str, parent_collection: Optional[str], link: bool = True) -> bool: 26 | """ Creates a collection which may is child of a parent. """ 27 | if collection_exists(collection_name): 28 | return False 29 | 30 | collection = bpy.data.collections.new(collection_name) 31 | if link and parent_collection is None: 32 | bpy.context.scene.collection.children.link(collection) 33 | else: 34 | create_collection(parent_collection, None, True) 35 | parent = bpy.data.collections.get(parent_collection) 36 | parent.children.link(collection) 37 | return True 38 | 39 | 40 | def remove_collection(collection_name: str, remove_objects: bool = True) -> None: 41 | """ Removes a collection and the objects it contains. """ 42 | if not collection_exists(collection_name): 43 | return 44 | 45 | collection = bpy.data.collections.get(collection_name) 46 | obs = [] 47 | if remove_objects: 48 | obs = [o for o in collection.cgt_bpy_utils if o.users == 1] 49 | while len(obs) > 0: 50 | bpy.data.objects.remove(obs.pop()) 51 | 52 | bpy.data.collections.remove(collection) 53 | 54 | 55 | def add_list_to_collection( 56 | collection_name: str, objects: List[bpy.types.Object], 57 | parent_collection: Optional[str] = None, link: bool = True) -> None: 58 | """ Adds a list of objects to a collection. 59 | Creates a new collection if it doesn't exist. """ 60 | if not collection_exists(collection_name): 61 | create_collection(collection_name, parent_collection, link) 62 | 63 | for obj in objects: 64 | _obj_to_collection(collection_name, obj) 65 | 66 | 67 | def add_object_to_collection( 68 | collection_name: str, obj: bpy.types.Object, 69 | parent_collection=None, link=True) -> bool: 70 | """ Adds an Object to a collection, creates a new collection if it doesn't exist. """ 71 | if not collection_exists(collection_name): 72 | create_collection(collection_name, parent_collection, link) 73 | 74 | return _obj_to_collection(collection_name, obj) 75 | 76 | 77 | def _obj_to_collection(collection_name: str, obj: bpy.types.Object, from_collection=None) -> bool: 78 | """ Internal: Links object to target collection. """ 79 | for col in obj.users_collection: 80 | if col.name == collection_name: 81 | continue 82 | col.objects.unlink(obj) 83 | collection = bpy.data.collections.get(collection_name) 84 | collection.objects.link(obj) 85 | return True 86 | 87 | 88 | def get_child_collections(col_name: str): 89 | """ Returns array of child collection names or parent name if no children found. """ 90 | # attempt to get child collections 91 | if not collection_exists(col_name): 92 | return [col_name] 93 | 94 | collection = bpy.data.collections.get(col_name) 95 | if collection.children > 0: 96 | return [col.name for col in collection.children] 97 | 98 | return [col_name] 99 | 100 | 101 | def get_objects_from_collection(col_name): 102 | """ Returns objects from collection. """ 103 | if collection_exists(col_name): 104 | col = bpy.data.collections[col_name] 105 | return [ob for ob in col.all_objects] 106 | else: 107 | return None 108 | 109 | 110 | def move_list_to_collection(to_collection: str, objects: List[bpy.types.Object], from_collection: str = None) -> None: 111 | """ Move list of elements from, to a collection. """ 112 | assert from_collection is not None 113 | for ob in objects: 114 | _obj_to_collection(to_collection, ob, from_collection) 115 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_bpy/cgt_constraints.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import logging 3 | import bpy 4 | from typing import Optional 5 | 6 | 7 | constraints = ['CAMERA_SOLVER', 'FOLLOW_TRACK', 'OBJECT_SOLVER', 'COPY_LOCATION', 'COPY_LOCATION_OFFSET', 8 | 'COPY_LOCATION_WORLD', 'COPY_LOCATION_WORLD_OFFSET', 'COPY_ROTATION', 'COPY_ROTATION_WORLD', 9 | 'COPY_SCALE', 'COPY_TRANSFORMS', 'LIMIT_DISTANCE', 'LIMIT_LOCATION', 'LIMIT_ROTATION', 'LIMIT_SCALE', 10 | 'MAINTAIN_VOLUME', 'TRANSFORM', 'TRANSFORM_CACHE', 'CLAMP_TO', 'DAMPED_TRACK', 'IK', 'LOCKED_TRACK', 11 | 'SPLINE_IK', 'STRETCH_TO', 'TRACK_TO', 'ACTION', 'ARMATURE', 'CHILD_OF', 'FLOOR', 'FOLLOW_PATH', 'PIVOT', 12 | 'SHRINKWRAP'] 13 | 14 | 15 | def check_constraint(obj, **kwargs) -> bool: 16 | """ Determine if a constraint is already active on an object or 17 | if it should be added. Takes every kwarg into account. """ 18 | 19 | assert 'constraint' in kwargs 20 | constraint_name = kwargs.pop('constraint') 21 | assert constraint_name in constraints 22 | 23 | def compare_kwargs(constraint, **kwargs) -> bool: 24 | # Compare keyword args of the target constraint to the existing constraint. 25 | # Returns False if any value doesn't match. 26 | for k, v in kwargs.items(): 27 | try: 28 | attr_val = getattr(constraint, k) 29 | if attr_val != v: 30 | return False 31 | except AttributeError: 32 | logging.warning(f"Attribute Error: {obj} has no attribute {k}: {v}") 33 | return True 34 | 35 | for obj_constraint in obj.constraints: 36 | cur_constraint = obj_constraint.name.upper().replace(' ', '_') 37 | # Check only if constraint types match 38 | if cur_constraint == constraint_name: 39 | if compare_kwargs(obj_constraint, **kwargs): 40 | return True 41 | return False 42 | 43 | 44 | def set_constraint(obj, **kwargs) -> Optional[bpy.types.Constraint]: 45 | """ Adds a constraint to the target object if the object 46 | doesn't contain a constraint with the same arguments added. 47 | The constraint props are passed by dicts, sample usage: 48 | props={"constraint": "COPY_ROTATION", target=bpy.data.objects["Sphere"], use_x=True} 49 | set_constraint(obj, **props) 50 | """ 51 | 52 | if check_constraint(obj, **kwargs): 53 | logging.warning(f"Set Constraint Aborted: {obj.name} already has a constraint with matching keyword arguments.") 54 | return None 55 | 56 | constraint_name = kwargs.pop('constraint') 57 | 58 | def set_kwargs(constraint, **kwargs): 59 | # set constraint values 60 | for k, v in kwargs.items(): 61 | try: 62 | setattr(constraint, k, v) 63 | except AttributeError: 64 | logging.warning(f"Attribute Error: {obj} has no attribute {k}: {v}") 65 | 66 | con = obj.constraints.new(constraint_name) 67 | set_kwargs(con, **kwargs) 68 | return con 69 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_bpy/cgt_fc_actions.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from typing import List 3 | from collections import namedtuple 4 | 5 | 6 | class FCurveHelper: 7 | def __init__(self): 8 | """ Helper class to easily set and insert data to an objects f-curves. """ 9 | self.location, self.scale, self.rotation_euler = [None, None, None], [None, None, None], [None, None, None] 10 | self.rotation_quaternion = [None]*4 11 | 12 | def get_f_curves(self, data_path) -> List[bpy.types.FCurve]: 13 | if not hasattr(self, data_path): 14 | raise KeyError 15 | return getattr(self, data_path, []) 16 | 17 | def insert(self, data_path: str, frame: int, *args: float): 18 | """ data_path: String Enum [location, scale, rotation_euler, rotation_quaternion] """ 19 | f_curves = self.get_f_curves(data_path) 20 | for fc, sample in zip(f_curves, args): 21 | fc.keyframe_points.insert( 22 | frame=frame, value=sample, options={'FAST'}, keyframe_type='JITTER') 23 | 24 | def foreach_set(self, data_path: str, frames: List[int], *args: List[float]): 25 | """ Set multiple keyframes at once. 26 | data_path: String Enum [location, scale, rotation_euler, rotation_quaternion] 27 | frames: flat list of int 28 | args: flat lists of float """ 29 | f_curves = self.get_f_curves(data_path) 30 | 31 | for samples, fc in zip(args, f_curves): 32 | if hasattr(fc.keyframe_points, 'clear'): 33 | fc.keyframe_points.clear() 34 | fc.keyframe_points.add(count=len(frames)) 35 | fc.keyframe_points.foreach_set("co", [x for co in zip(frames, samples) for x in co]) 36 | fc.update() 37 | 38 | def update(self, data_path: str): 39 | if not hasattr(self, data_path): 40 | raise KeyError 41 | 42 | for fc in getattr(self, data_path, []): 43 | fc.update() 44 | 45 | def __str__(self): 46 | s = f'locations: {self.location}\n' 47 | s += f'scale: {self.scale}\n' 48 | s += f'rotation_euler: {self.rotation_euler}\n' 49 | s += f'rotation_quaternion: {self.rotation_quaternion}\n' 50 | return s 51 | 52 | 53 | def create_actions(objects, overwrite: bool = True): 54 | actions = [] 55 | 56 | # get or create actions for objs 57 | for ob in objects: 58 | action_name = ob.name 59 | ad = ob.animation_data_create() 60 | 61 | # remove old action from objects animation data (default) 62 | action_data = bpy.data.actions 63 | if action_name in action_data: 64 | if overwrite is True: 65 | action_data.remove(action_data[action_name]) 66 | else: 67 | actions.append(action_data[action_name]) 68 | ad.action = action_data[action_name] 69 | continue 70 | 71 | # create new action 72 | new_action = bpy.data.actions.new(action_name) 73 | actions.append(new_action) 74 | ad.action = new_action 75 | 76 | fc_helpers = [] 77 | 78 | # get or create fcurves for actions 79 | for action in actions: 80 | # add existing data_paths to helper obj 81 | helper = FCurveHelper() 82 | offset, last = 0, None 83 | 84 | fc_data_paths = set() 85 | for i, fc in action.fcurves.items(): 86 | if fc.group.name != last: 87 | fc_data_paths.add(fc.group.name) 88 | last = fc.group.name 89 | offset = i 90 | m_data_path = getattr(helper, fc.group.name) 91 | m_data_path[i - offset] = fc 92 | 93 | # add new fcurve 94 | for data_path, indexes in [('location', 3), ('rotation_euler', 3), ('scale', 3), ('rotation_quaternion', 4)]: 95 | if data_path in fc_data_paths: 96 | continue 97 | 98 | for i in range(0, indexes): 99 | try: 100 | fc = action.fcurves.new( 101 | data_path=data_path, 102 | index=i, 103 | action_group=data_path) 104 | 105 | m_data_path = getattr(helper, fc.group.name) 106 | m_data_path[i] = fc 107 | except RuntimeError: 108 | pass 109 | 110 | fc_helpers.append(helper) 111 | return fc_helpers 112 | 113 | 114 | def main(): 115 | helpers = create_actions(bpy.data.objects) 116 | helpers[0].insert('location', 1, *[3, 2, 1]) 117 | helpers[0].insert('location', 13, *[1, 2, 4]) 118 | helpers[0].insert('location', 21, *[10, 3, 0]) 119 | helpers[0].insert('location', 32, *[4, 1, 1]) 120 | helpers[0].update('location') 121 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_bpy/cgt_object_prop.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from dataclasses import dataclass 4 | from typing import Optional, Any 5 | 6 | import bpy 7 | from typing import Union 8 | 9 | 10 | # Get and set custom properties in Blender. 11 | # While it's been tested for Objects and bones it may be used with any property in blender. 12 | 13 | def get_custom_property(target_obj: Union[bpy.types.Object, bpy.types.PoseBone], prop_name: str) -> Optional[Any]: 14 | """ Returns the custom property by name or None. """ 15 | return target_obj.get(prop_name) 16 | 17 | 18 | def set_custom_property( 19 | obj: Union[bpy.types.Object, bpy.types.PoseBone], prop_name: str, value: Any, 20 | v_min: Optional[float] = None, v_max: Optional[float] = None, 21 | use_soft: bool = False, overwrite: bool = False) -> bool: 22 | 23 | if get_custom_property(obj, prop_name) is not None and not overwrite: 24 | return False 25 | 26 | obj[prop_name] = value 27 | if "_RNA_UI" not in obj.keys(): 28 | obj["_RNA_UI"] = {} 29 | 30 | if use_soft: 31 | obj["_RNA_UI"].update({prop_name: {"use_soft_limits": use_soft, "soft_min": v_min, "soft_max": v_max}}) 32 | else: 33 | obj["_RNA_UI"].update({prop_name: {"min": v_min, "max": v_max}}) 34 | return True 35 | 36 | 37 | @dataclass(repr=True) 38 | class CustomProps: 39 | """ Custom property data for Blender Objects. """ 40 | name: str 41 | value: float 42 | v_min: float 43 | v_max: float 44 | use_soft: bool 45 | 46 | def __init__(self, name, value, v_min=None, v_max=None, use_soft=False): 47 | self.name = name 48 | self.value = value 49 | self.v_min = v_min 50 | self.v_max = v_max 51 | self.use_soft = use_soft -------------------------------------------------------------------------------- /src/cgt_core/cgt_calculators_nodes/README.md: -------------------------------------------------------------------------------- 1 | # Calculators for Mediapipe data 2 | 3 | Calculators specifically for mediapipe landmark list data. 4 | Every calculator has an update function which takes and returns a `Tuple[data: List[Any], frame: int]`.
5 | 6 | The calculators heavily rely on `cgt_utils.cgt_math` and blenders internal `mathutils`. 7 | Many functions used from `mathutils` have and equivalent in `cgt_utils.cgt_math` with lower performance.
8 | 9 | The calculators main purpose is to create `Rotation Data` for remapping motions. 10 | Therefore, the input shape and output shape are _not_ consistent.
11 | 12 | Input Data
13 | Landmark: `[idx: int, [x: float, y: float, z: float]]` 14 | 15 | Pose: `List[Landmarks], Optional[frame: int]`
16 | Face: `List[List[Landmarks]], Optional[frame: int]`
17 | Pose: `List[List[Landmarks], List[Landmarks]], Optional[frame: int]`
18 | 19 | 20 | Output Data
21 | Location: `[idx: int, List[x: float, y: float, z: float]], Optional[frame: int]`
22 | Rotation: `[idx: int, Euler(x: float, y: float, z: float)], Optional[frame: int]`
23 | Scale: `[idx: int, List[x: float, y: float, z: float]], Optional[frame: int]`
24 | 25 | Pose: `List[Location, Rotation, Scale], Optional[frame: int], Optional[frame: int]`
26 | Face: `List[Location, Rotation, Scale], Optional[frame: int], Optional[frame: int]`
27 | Pose: `List[[Location, Location], [Rotation, Rotation], [Scale, Scale]], Optional[frame: int]`
28 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_calculators_nodes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_core/cgt_calculators_nodes/__init__.py -------------------------------------------------------------------------------- /src/cgt_core/cgt_calculators_nodes/calc_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import logging 3 | from mathutils import Euler 4 | from . import cgt_math 5 | 6 | 7 | class CustomData: 8 | idx = None 9 | loc = None 10 | rot = None 11 | sca = None 12 | 13 | def __init__(self, idx): 14 | self.idx = idx 15 | 16 | 17 | class ProcessorUtils: 18 | data = None 19 | # array for comparison, as noise is present every frame values should change 20 | frame = 0 21 | prev_rotation = {} 22 | prev_sum = [0.0, 0.0] 23 | 24 | def has_duplicated_results(self, data=None, detector_type=None, idx=0): 25 | """ Sums data array values and compares them each frame to avoid duplicated values 26 | in the timeline. This fixes duplicated frame issue mainly occurring on Windows. """ 27 | summed = np.sum([v[1] for v in data[:21]]) 28 | if summed == self.prev_sum[idx]: 29 | return True 30 | 31 | self.prev_sum[idx] = summed 32 | return False 33 | 34 | def quart_to_euler_combat(self, quart, idx, idx_offset=0, axis='XYZ'): 35 | """ Converts quart to euler rotation while comparing with previous rotation. """ 36 | if len(self.prev_rotation) > 0: 37 | try: 38 | combat = self.prev_rotation[idx + idx_offset] 39 | return cgt_math.to_euler(quart, combat, axis) 40 | except KeyError: 41 | logging.debug(f"Invalid id to euler combat {idx}, {self.frame}") 42 | return cgt_math.to_euler(quart) 43 | else: 44 | return cgt_math.to_euler(quart) 45 | 46 | @staticmethod 47 | def offset_euler(euler, offset: list = None): 48 | """ Offsets an euler rotation using euler radians *pi. """ 49 | if offset is None: 50 | return euler 51 | 52 | rotation = Euler(( 53 | euler[0] + np.pi * offset[0], 54 | euler[1] + np.pi * offset[1], 55 | euler[2] + np.pi * offset[2], 56 | )) 57 | return rotation 58 | 59 | def try_get_euler(self, quart_rotation, offset: list = None, prev_rot_idx: int = None): 60 | """ Gets an euler rotation from quaternion with using the previously 61 | created rotation as combat to avoid discontinuity. """ 62 | if prev_rot_idx is None: 63 | return cgt_math.to_euler(quart_rotation) 64 | 65 | # initialize prev rotation 66 | elif prev_rot_idx not in self.prev_rotation: 67 | self.prev_rotation[prev_rot_idx] = cgt_math.to_euler(quart_rotation) 68 | return self.prev_rotation[prev_rot_idx] 69 | 70 | # get euler with combat 71 | if offset is None: 72 | euler_rot = cgt_math.to_euler( 73 | quart_rotation, 74 | self.prev_rotation[prev_rot_idx] 75 | ) 76 | self.prev_rotation[prev_rot_idx] = euler_rot 77 | return self.prev_rotation[prev_rot_idx] 78 | 79 | else: 80 | tmp_offset = [-o for o in offset] 81 | euler_rot = cgt_math.to_euler( 82 | quart_rotation, 83 | self.offset_euler(self.prev_rotation[prev_rot_idx], tmp_offset) 84 | ) 85 | 86 | self.prev_rotation[prev_rot_idx] = self.offset_euler(euler_rot, offset) 87 | return self.prev_rotation[prev_rot_idx] 88 | 89 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_calculators_nodes/mp_calc_face_rot.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import numpy as np 3 | from mathutils import Euler 4 | 5 | from .calc_utils import ProcessorUtils, CustomData 6 | from . import cgt_math 7 | from ..cgt_patterns import cgt_nodes 8 | 9 | 10 | class FaceRotationCalculator(cgt_nodes.CalculatorNode, ProcessorUtils): 11 | # processed results 12 | def __init__(self): 13 | # increase shape to add specific driver data (maybe not required for the face) 14 | n = 468 15 | self.rotation_data = [] 16 | custom_data_arr = [CustomData(idx+n) for idx in range(0, 5)] 17 | self.pivot, self.chin_driver, self.left_mouth_corner, self.right_mouth_corner, *_ = custom_data_arr 18 | 19 | def update(self, data, frame=-1): 20 | """ Process the landmark detection results. """ 21 | """ Assign the data processed data to references. """ 22 | # remove nesting and set landmarks to custom origin 23 | try: 24 | if len(data[0][0]) == 0: 25 | return [[], [], []], frame 26 | except IndexError: 27 | logging.error(f"Index Error occurred: {data}, {frame} - check face nodes") 28 | return [[], [], []], frame 29 | 30 | self.data = data[0] 31 | if len(self.data) < 468: 32 | return [[], [], []], frame 33 | 34 | # increase the data size to hold custom data (check __init__) 35 | for i in range(4): 36 | self.data.append([468+i, [0., 0., 0.]]) 37 | self.custom_landmark_origin() 38 | 39 | # get distances and rotations to determine movements 40 | self.set_rotation_driver_data() 41 | if self.has_duplicated_results(self.data, "face"): 42 | return [[], [], []], frame 43 | return [self.data, self.rotation_data, []], frame 44 | 45 | def get_processed_data(self): 46 | """ Returns the processed data """ 47 | return self.data, self.rotation_data, [], self.frame, self.has_duplicated_results(self.data) 48 | 49 | def mouth_corners(self): 50 | """ Calculates the angle from the mouth center to the mouth corner """ 51 | # center point of mouth corners gets projected on vector from upper to lower lip 52 | corner_center = cgt_math.center_point(self.data[61][1], self.data[291][1]) 53 | projected_center = cgt_math.project_point_on_vector(corner_center, self.data[0][1], self.data[17][1]) 54 | # center point between upper and lower lip 55 | mouth_height_center = cgt_math.center_point(self.data[0][1], self.data[17][1]) 56 | 57 | # vectors from center points to mouth corners 58 | left_vec = cgt_math.to_vector(projected_center, self.data[61][1]) 59 | left_hv = cgt_math.to_vector(mouth_height_center, self.data[61][1]) 60 | right_vec = cgt_math.to_vector(projected_center, self.data[291][1]) 61 | right_hv = cgt_math.to_vector(mouth_height_center, self.data[291][1]) 62 | 63 | # angle between the vectors expecting users don't record upside down 64 | if mouth_height_center[2] > projected_center[2]: 65 | right_corner_angle = cgt_math.angle_between(left_vec, left_hv) 66 | left_corner_angle = cgt_math.angle_between(right_vec, right_hv) 67 | else: 68 | right_corner_angle = -cgt_math.angle_between(left_vec, left_hv) 69 | left_corner_angle = -cgt_math.angle_between(right_vec, right_hv) 70 | 71 | self.left_mouth_corner.loc = [0, 0, left_corner_angle] 72 | self.right_mouth_corner.loc = [0, 0, right_corner_angle] 73 | self.data.append([self.left_mouth_corner.idx, self.left_mouth_corner.loc]) 74 | self.data.append([self.right_mouth_corner.idx, self.right_mouth_corner.loc]) 75 | 76 | def set_rotation_driver_data(self): 77 | """ Get face and chin rotation """ 78 | self.face_mesh_rotation() 79 | try: 80 | head_rotation = self.try_get_euler(self.pivot.rot, prev_rot_idx=self.pivot.idx) 81 | # head_rotation = self.quart_to_euler_combat(self.pivot.rot, self.pivot.idx, axis='XZY') 82 | except AttributeError: 83 | logging.warning("Exchange method in cgt_maths for other targets than blender.") 84 | head_rotation = [0, 0, 0] 85 | 86 | self.chin_rotation() 87 | chin_rotation = self.chin_driver.rot 88 | # store rotation data 89 | self.rotation_data = [ 90 | [self.pivot.idx, head_rotation], 91 | [self.chin_driver.idx, chin_rotation], 92 | # [self._mouth_corner_driver.idx, self._mouth_corner_driver.rot] 93 | ] 94 | 95 | def chin_rotation(self): 96 | """ Calculate the chin rotation. """ 97 | # draw vector from point between eyes to mouth and chin 98 | nose_dir = cgt_math.to_vector(self.data[168][1], self.data[2][1]) 99 | chin_dir = cgt_math.to_vector(self.data[168][1], self.data[200][1]) 100 | 101 | # calculate the Z rotation 102 | nose_dir_z, chin_dir_z = cgt_math.null_axis([nose_dir, chin_dir], 'X') 103 | z_angle = cgt_math.angle_between(nose_dir_z, chin_dir_z) * 1.8 104 | 105 | # in the detection results is no X-rotation available 106 | # nose_dir_x, chin_dir_x = m_V.null_axis([nose_dir, chin_dir], 'Z') 107 | # chin_rotation = m_V.rotate_towards(self.data[152][1], self.data[6][1], 'Y', 'Z') 108 | 109 | # due to the base angle it's required to offset the rotation 110 | self.chin_driver.rot = Euler(((z_angle - 3.14159 * .07) * 1.175, 0, 0)) 111 | 112 | def face_mesh_rotation(self): 113 | """ Calculate face quaternion using 114 | points to approximate the transformation matrix. """ 115 | origin = np.array([0, 0, 0]) 116 | 117 | forward_point = cgt_math.center_point(np.array(self.data[1][1]), np.array(self.data[4][1])) # nose 118 | right_point = cgt_math.center_point(np.array(self.data[447][1]), np.array(self.data[366][1])) # temple.R 119 | down_point = np.array(self.data[152][1]) # chin 120 | 121 | # direction vectors from imaginary origin 122 | normal = cgt_math.normalize(cgt_math.to_vector(origin, forward_point)) 123 | tangent = cgt_math.normalize(cgt_math.to_vector(origin, right_point)) 124 | binormal = cgt_math.normalize(cgt_math.to_vector(origin, down_point)) 125 | 126 | # generate matrix to decompose it and access quaternion rotation 127 | try: 128 | matrix = cgt_math.generate_matrix(tangent, normal, binormal) 129 | loc, quart, scale = cgt_math.decompose_matrix(matrix) 130 | except TypeError: 131 | logging.warning("Exchange method in cgt_math for other targets than Blender.") 132 | quart = None 133 | self.pivot.rot = quart 134 | 135 | # region cgt_utils 136 | def custom_landmark_origin(self): 137 | """ Sets face mesh position to approximate origin """ 138 | self.data = [[idx, np.array([-lmrk[0], lmrk[2], -lmrk[1]])] for idx, lmrk in self.data[:468]] 139 | self.approximate_pivot_location() 140 | self.data = [[idx, np.array(lmrk) - np.array(self.pivot.loc)] for idx, lmrk in self.data[:468]] 141 | 142 | def approximate_pivot_location(self): 143 | """ Sets to approximate origin based on canonical face mesh geometry """ 144 | right = cgt_math.center_point(np.array(self.data[447][1]), np.array(self.data[366][1])) # temple.R 145 | left = cgt_math.center_point(np.array(self.data[137][1]), np.array(self.data[227][1])) # temple.L 146 | self.pivot.loc = cgt_math.center_point(right, left) # approximate origin 147 | # endregion 148 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_calculators_nodes/mp_calc_pose_rot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from mathutils import Euler 3 | from typing import List 4 | from . import calc_utils, cgt_math 5 | from ..cgt_patterns import cgt_nodes 6 | 7 | 8 | class PoseRotationCalculator(cgt_nodes.CalculatorNode, calc_utils.ProcessorUtils): 9 | shoulder_center = None 10 | hip_center = None 11 | 12 | rotation_data = [] 13 | scale_data = [] 14 | 15 | def __init__(self): 16 | self.shoulder_center = calc_utils.CustomData(34) 17 | self.pose_offset = calc_utils.CustomData(35) 18 | self.hip_center = calc_utils.CustomData(33) 19 | 20 | def update(self, data: List, frame: int=-1): 21 | """ Apply the processed data to references. """ 22 | if not data or len(data) < 33: 23 | return [[], [], []], frame 24 | self.data = data 25 | 26 | # increase the data size to hold custom data (check __init__) 27 | for i in range(2): 28 | self.data.append([33 + i, [0., 0., 0.]]) 29 | 30 | self.rotation_data = [] 31 | self.scale_data = [] 32 | 33 | self.prepare_landmarks() 34 | self.shoulder_hip_location() 35 | self.set_hip_as_origin() 36 | try: 37 | self.calculate_rotations() 38 | except AttributeError: 39 | pass 40 | 41 | if self.has_duplicated_results(self.data, "pose"): 42 | return [[], [], []], frame 43 | return [self.data, self.rotation_data, []], frame 44 | 45 | def calculate_rotations(self): 46 | """ Creates custom rotation data for driving the cgt_rig. """ 47 | self.shoulder_rotation() 48 | self.torso_rotation() 49 | self.limb_rotations() 50 | self.foot_rotation() 51 | 52 | def limb_rotations(self): 53 | """ Calculate ik chain rotations. """ 54 | 55 | def calc_chain_rotations(data): 56 | rotations = [] 57 | for i in range(1, len(data)): 58 | quart = cgt_math.rotate_towards(data[i][1], data[i - 1][1], '-Y', 'Z') 59 | euler = self.try_get_euler(quart, prev_rot_idx=data[i - 1][0]) 60 | rotations.append([data[i - 1][0], euler]) 61 | return rotations 62 | 63 | # calculate foot rotation separately 64 | left_leg = [self.data[23], self.data[25], self.data[27]] # , self.data[31]] 65 | right_leg = [self.data[24], self.data[26], self.data[28]] # , self.data[32]] 66 | left_arm = [self.data[11], self.data[13], self.data[15], self.data[19]] 67 | right_arm = [self.data[12], self.data[14], self.data[16], self.data[20]] 68 | 69 | for objs in [left_leg, right_leg, right_arm, left_arm]: 70 | self.rotation_data += calc_chain_rotations(objs) 71 | 72 | def foot_rotation(self): 73 | """ Calculate foot rotations. """ 74 | 75 | def rot_from_matrix(loc: List[np.array], tar_idx: int): 76 | tangent = cgt_math.normal_from_plane(loc) 77 | binormal = loc[0] - loc[2] 78 | normal = loc[1] - loc[2] 79 | 80 | vectors = [tangent, normal, binormal] 81 | matrix = cgt_math.generate_matrix(*[cgt_math.normalize(vec) for vec in vectors]) 82 | _, quart, _ = cgt_math.decompose_matrix(matrix) 83 | euler = self.try_get_euler(quart, None, tar_idx) 84 | return euler 85 | 86 | # left knee, ankle & foot_index 87 | left_locations = [self.data[25][1], self.data[27][1], self.data[31][1]] 88 | left_foot_rot = rot_from_matrix(left_locations, self.data[27][0]) 89 | self.rotation_data.append([self.data[27][0], left_foot_rot]) 90 | 91 | # right knee, ankle & foot_index 92 | right_locations = [self.data[26][1], self.data[28][1], self.data[32][1]] 93 | right_foot_rot = rot_from_matrix(right_locations, self.data[28][0]) 94 | self.rotation_data.append([self.data[28][0], right_foot_rot]) 95 | 96 | def torso_rotation(self): 97 | """ Calculating the torso rotation based on a plane which 98 | forms a triangle connecting hips and the shoulder center. """ 99 | # approximate perpendicular points to origin 100 | hip_center = cgt_math.center_point(np.array(self.data[23][1]), np.array(self.data[24][1])) 101 | right_hip = np.array(self.data[24][1]) 102 | shoulder_center = cgt_math.center_point(np.array(self.data[11][1]), np.array(self.data[12][1])) 103 | 104 | # generate triangle 105 | vertices = np.array( 106 | [self.data[23][1], 107 | self.data[24][1], 108 | shoulder_center]) 109 | connections = np.array([[0, 1, 2]]) 110 | 111 | # get normal from triangle 112 | normal = cgt_math.normal_from_plane([self.data[23][1], self.data[24][1], shoulder_center]) 113 | # normal, norm = cgt_math.create_normal_array(vertices, connections) 114 | 115 | # direction vectors from imaginary origin 116 | tangent = cgt_math.normalize(cgt_math.to_vector(hip_center, right_hip)) 117 | normal = cgt_math.normalize(normal) # [0]) 118 | binormal = cgt_math.normalize(cgt_math.to_vector(hip_center, shoulder_center)) 119 | 120 | # generate matrix to decompose it and access quaternion rotation 121 | matrix = cgt_math.generate_matrix(tangent, binormal, normal) 122 | loc, quart, scale = cgt_math.decompose_matrix(matrix) 123 | offset = [-.5, 0, 0] 124 | euler = self.try_get_euler(quart, offset, self.hip_center.idx) 125 | self.rotation_data.append([self.hip_center.idx, euler]) 126 | 127 | def shoulder_rotation(self): 128 | """ getting shoulder and hip rotation by rotating 129 | the center points to left / right shoulder and hip. """ 130 | # rotation from shoulder center to shoulder.R 131 | shoulder_center = cgt_math.center_point(self.data[11][1], self.data[12][1]) 132 | shoulder_rotation = cgt_math.rotate_towards(shoulder_center, self.data[12][1], 'Z') 133 | 134 | # rotation from hip center to hip.R 135 | hip_center = cgt_math.center_point(self.data[23][1], self.data[24][1]) 136 | hip_rotation = cgt_math.rotate_towards(hip_center, self.data[24][1], 'Z') 137 | 138 | # chance to offset result / rotation may not be keyframed 139 | offset = [0, 0, 0] 140 | shoulder_rot = self.try_get_euler(shoulder_rotation, offset, 7) 141 | hip_rot = self.try_get_euler(hip_rotation, offset, 8) 142 | 143 | # offset between hip & shoulder rot = real shoulder rot 144 | euler = Euler((shoulder_rot[0] - hip_rot[0], 145 | shoulder_rot[1] - hip_rot[1], 146 | shoulder_rot[2] - hip_rot[2])) 147 | self.rotation_data.append([self.shoulder_center.idx, euler]) 148 | 149 | def shoulder_hip_location(self): 150 | """ Appending custom location data for driving the cgt_rig. """ 151 | self.shoulder_center.loc = cgt_math.center_point(self.data[11][1], self.data[12][1]) 152 | self.data.append([self.shoulder_center.idx, self.shoulder_center.loc]) 153 | 154 | self.hip_center.loc = cgt_math.center_point(self.data[23][1], self.data[24][1]) 155 | self.data.append([self.hip_center.idx, self.hip_center.loc]) 156 | 157 | def prepare_landmarks(self): 158 | """ Prepare landmark orientation. """ 159 | self.data = [[idx, np.array([-landmark[0], landmark[2], -landmark[1]])] 160 | for idx, landmark in self.data] 161 | 162 | def set_hip_as_origin(self): 163 | self.pose_offset.loc = self.hip_center.loc 164 | self.data = [[idx, np.array([landmark[0] - self.hip_center.loc[0], 165 | landmark[1] - self.hip_center.loc[1], 166 | landmark[2] - self.hip_center.loc[2]])] 167 | for idx, landmark in self.data] 168 | self.data.append([self.pose_offset.idx, self.pose_offset.loc]) 169 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_core_chains.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import List 3 | 4 | from .cgt_calculators_nodes import mp_calc_face_rot, mp_calc_pose_rot, mp_calc_hand_rot 5 | from .cgt_output_nodes import mp_hand_out, mp_face_out, mp_pose_out 6 | from .cgt_patterns import cgt_nodes 7 | 8 | 9 | class FaceNodeChain(cgt_nodes.NodeChain): 10 | def __init__(self): 11 | super().__init__() 12 | self.append(mp_calc_face_rot.FaceRotationCalculator()) 13 | self.append(mp_face_out.MPFaceOutputNode()) 14 | 15 | 16 | class PoseNodeChain(cgt_nodes.NodeChain): 17 | def __init__(self): 18 | super().__init__() 19 | self.append(mp_calc_pose_rot.PoseRotationCalculator()) 20 | self.append(mp_pose_out.MPPoseOutputNode()) 21 | 22 | 23 | class HandNodeChain(cgt_nodes.NodeChain): 24 | def __init__(self): 25 | super().__init__() 26 | self.append(mp_calc_hand_rot.HandRotationCalculator()) 27 | self.append(mp_hand_out.CgtMPHandOutNode()) 28 | 29 | 30 | class HolisticNodeChainGroup(cgt_nodes.NodeChainGroup): 31 | nodes: List[cgt_nodes.NodeChain] 32 | 33 | def __init__(self): 34 | super().__init__() 35 | self.nodes.append(HandNodeChain()) 36 | self.nodes.append(FaceNodeChain()) 37 | self.nodes.append(PoseNodeChain()) 38 | 39 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_data/collections.json: -------------------------------------------------------------------------------- 1 | { 2 | "collections":{ 3 | "0":"drivers", 4 | "1":"hands", 5 | "2":"face", 6 | "3":"pose" 7 | } 8 | } -------------------------------------------------------------------------------- /src/cgt_core/cgt_data/face.json: -------------------------------------------------------------------------------- 1 | { 2 | "face":{ 3 | "0":"head", 4 | "1":"chin", 5 | "2":"mouth", 6 | "3":"mouth_corners", 7 | "4":"left_eye", 8 | "5":"right_eye", 9 | "6":"right_eyebrow", 10 | "7":"left_eyebrow", 11 | "8":"right_eye_t", 12 | "9":"right_eye_b", 13 | "10":"left_eye_t", 14 | "11":"left_eye_b", 15 | "12":"mouth_t", 16 | "13":"mouth_b", 17 | "14":"mouth_r", 18 | "15":"mouth_l", 19 | "16":"eyebrow_in_l", 20 | "17":"eyebrow_mid_l", 21 | "18":"eyebrow_out_l", 22 | "19":"eyebrow_in_r", 23 | "20":"eyebrow_mid_r", 24 | "21":"eyebrow_out_r" 25 | }, 26 | "default":"face_vertex" 27 | } -------------------------------------------------------------------------------- /src/cgt_core/cgt_data/hand.json: -------------------------------------------------------------------------------- 1 | { 2 | "hand":{ 3 | "0":"wrist", 4 | "1":"thumb_cmc", 5 | "2":"thumb_mcp", 6 | "3":"thumb_ip", 7 | "4":"thumb_tip", 8 | "5":"index_finger_mcp", 9 | "6":"index_finger_pip", 10 | "7":"index_finger_dip", 11 | "8":"index_finger_tip", 12 | "9":"middle_finger_mcp", 13 | "10":"middle_finger_pip", 14 | "11":"middle_finger_dip", 15 | "12":"middle_finger_tip", 16 | "13":"ring_finger_mcp", 17 | "14":"ring_finger_pip", 18 | "15":"ring_finger_dip", 19 | "16":"ring_finger_tip", 20 | "17":"pinky_mcp", 21 | "18":"pinky_pip", 22 | "19":"pinky_dip", 23 | "20":"pinky_tip", 24 | "21":"driver_thumb_cmc", 25 | "22":"driver_thumb_mcp", 26 | "23":"driver_thumb_ip", 27 | "24":"driver_thumb_tip", 28 | "25":"driver_index_finger_mcp", 29 | "26":"driver_index_finger_pip", 30 | "27":"driver_index_finger_dip", 31 | "28":"driver_index_finger_tip", 32 | "29":"driver_middle_finger_mcp", 33 | "30":"driver_middle_finger_pip", 34 | "31":"driver_middle_finger_dip", 35 | "32":"driver_middle_finger_tip", 36 | "33":"driver_ring_finger_mcp", 37 | "34":"driver_ring_finger_pip", 38 | "35":"driver_ring_finger_dip", 39 | "36":"driver_ring_finger_tip", 40 | "37":"driver_pinky_mcp", 41 | "38":"driver_pinky_pip", 42 | "39":"driver_pinky_dip", 43 | "40":"driver_pinky_tip" 44 | } 45 | } -------------------------------------------------------------------------------- /src/cgt_core/cgt_data/pose.json: -------------------------------------------------------------------------------- 1 | { 2 | "pose":{ 3 | "0":"nose", 4 | "1":"left_eye_inner", 5 | "2":"left_eye", 6 | "3":"left_eye_outer", 7 | "4":"right_eye_inner", 8 | "5":"right_eye", 9 | "6":"right_eye_outer", 10 | "7":"left_ear", 11 | "8":"right_ear", 12 | "9":"mouth_left", 13 | "10":"mouth_right", 14 | "11":"left_shoulder", 15 | "12":"right_shoulder", 16 | "13":"left_elbow", 17 | "14":"right_elbow", 18 | "15":"left_wrist", 19 | "16":"right_wrist", 20 | "17":"left_pinky", 21 | "18":"right_pinky", 22 | "19":"left_index", 23 | "20":"right_index", 24 | "21":"left_thumb", 25 | "22":"right_thumb", 26 | "23":"left_hip", 27 | "24":"right_hip", 28 | "25":"left_knee", 29 | "26":"right_knee", 30 | "27":"left_ankle", 31 | "28":"right_ankle", 32 | "29":"left_heel", 33 | "30":"right_heel", 34 | "31":"left_foot_index", 35 | "32":"right_foot_index", 36 | "33":"hip_center", 37 | "34":"shoulder_center", 38 | "35":"shoulder_center_ik", 39 | "36":"left_shoulder_ik", 40 | "37":"right_shoulder_ik", 41 | "38":"left_forearm_ik", 42 | "39":"right_forearm_ik", 43 | "40":"left_hand_ik", 44 | "41":"right_hand_ik", 45 | "42":"left_index_ik", 46 | "43":"right_index_ik", 47 | "44":"hip_center_ik", 48 | "45":"left_hip_ik", 49 | "46":"right_hip_ik", 50 | "47":"left_shin_ik", 51 | "48":"right_shin_ik", 52 | "49":"left_foot_ik", 53 | "50":"right_foot_ik", 54 | "51":"left_foot_index_ik", 55 | "52":"right_foot_index_ik" 56 | } 57 | } -------------------------------------------------------------------------------- /src/cgt_core/cgt_defaults.json: -------------------------------------------------------------------------------- 1 | { 2 | "face":{ 3 | "0":"head", 4 | "1":"chin", 5 | "2":"mouth_corner.L", 6 | "3":"mouth_corner.R", 7 | "4":"face_none" 8 | }, 9 | "hand":{ 10 | "0":"wrist", 11 | "1":"thumb_cmc", 12 | "2":"thumb_mcp", 13 | "3":"thumb_ip", 14 | "4":"thumb_tip", 15 | "5":"index_finger_mcp", 16 | "6":"index_finger_pip", 17 | "7":"index_finger_dip", 18 | "8":"index_finger_tip", 19 | "9":"middle_finger_mcp", 20 | "10":"middle_finger_pip", 21 | "11":"middle_finger_dip", 22 | "12":"middle_finger_tip", 23 | "13":"ring_finger_mcp", 24 | "14":"ring_finger_pip", 25 | "15":"ring_finger_dip", 26 | "16":"ring_finger_tip", 27 | "17":"pinky_mcp", 28 | "18":"pinky_pip", 29 | "19":"pinky_dip", 30 | "20":"pinky_tip", 31 | "21":"hand_none" 32 | }, 33 | "pose":{ 34 | "0":"nose", 35 | "1":"left_eye_inner", 36 | "2":"left_eye", 37 | "3":"left_eye_outer", 38 | "4":"right_eye_inner", 39 | "5":"right_eye", 40 | "6":"right_eye_outer", 41 | "7":"left_ear", 42 | "8":"right_ear", 43 | "9":"mouth_left", 44 | "10":"mouth_right", 45 | "11":"left_shoulder", 46 | "12":"right_shoulder", 47 | "13":"left_elbow", 48 | "14":"right_elbow", 49 | "15":"left_wrist", 50 | "16":"right_wrist", 51 | "17":"left_pinky", 52 | "18":"right_pinky", 53 | "19":"left_index", 54 | "20":"right_index", 55 | "21":"left_thumb", 56 | "22":"right_thumb", 57 | "23":"left_hip", 58 | "24":"right_hip", 59 | "25":"left_knee", 60 | "26":"right_knee", 61 | "27":"left_ankle", 62 | "28":"right_ankle", 63 | "29":"left_heel", 64 | "30":"right_heel", 65 | "31":"left_foot_index", 66 | "32":"right_foot_index", 67 | "33":"hip_center", 68 | "34":"shoulder_center", 69 | "35":"pose_location", 70 | "36":"pose_none" 71 | }, 72 | "identifier": "11b1fb41-1349-4465-b3aa-78db80e8c761" 73 | } -------------------------------------------------------------------------------- /src/cgt_core/cgt_interface/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_core/cgt_interface/__init__.py -------------------------------------------------------------------------------- /src/cgt_core/cgt_interface/cgt_core_panel.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from .. import cgt_naming 3 | from pathlib import Path 4 | 5 | 6 | addon_dir_name = None 7 | if addon_dir_name is None: 8 | f = __file__ 9 | addon_dir_name = Path(f).parent.parent.parent.parent.stem 10 | 11 | 12 | class DefaultPanel: 13 | bl_space_type = "VIEW_3D" 14 | bl_region_type = "UI" 15 | bl_category = "BlendAR" 16 | # bl_options = {"DEFAULT_CLOSED"} 17 | bl_options = {"HEADER_LAYOUT_EXPAND"} 18 | 19 | 20 | class PT_UI_CGT_Panel(DefaultPanel, bpy.types.Panel): 21 | bl_label = cgt_naming.ADDON_NAME 22 | bl_idname = "UI_PT_CGT_Panel" 23 | 24 | def draw(self, context): 25 | pass 26 | 27 | 28 | addon_prefs = set() 29 | class APT_UI_CGT_Panel(bpy.types.AddonPreferences): 30 | bl_label = cgt_naming.ADDON_NAME 31 | bl_idname = addon_dir_name 32 | 33 | def draw(self, context): 34 | global addon_prefs 35 | for func in addon_prefs: 36 | func(self, context) 37 | 38 | 39 | classes = [ 40 | PT_UI_CGT_Panel, 41 | APT_UI_CGT_Panel, 42 | ] 43 | 44 | 45 | def register(): 46 | for cls in classes: 47 | bpy.utils.register_class(cls) 48 | 49 | 50 | def unregister(): 51 | for cls in reversed(classes): 52 | bpy.utils.unregister_class(cls) 53 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_interface/cgt_core_registration.py: -------------------------------------------------------------------------------- 1 | from . import cgt_core_panel 2 | 3 | classes = [ 4 | cgt_core_panel 5 | ] 6 | 7 | 8 | def register(): 9 | from ..cgt_utils import cgt_logging 10 | cgt_logging.init() 11 | for cls in classes: 12 | cls.register() 13 | 14 | 15 | def unregister(): 16 | for cls in classes: 17 | cls.unregister() 18 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_naming.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from dataclasses import dataclass 4 | from .cgt_utils.cgt_json import JsonData 5 | from pathlib import Path 6 | 7 | # has to be at root 8 | PACKAGE = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) 9 | logging.getLogger("BlendArMocap").debug(f"{PACKAGE}, {os.path.dirname(os.path.dirname(__file__))}") 10 | ADDON_NAME = "BlendArMocap" 11 | 12 | 13 | class CGTDefaultsJson(JsonData): 14 | pose: dict 15 | face: dict 16 | hand: dict 17 | identifier: str 18 | 19 | def __init__(self): 20 | path = Path(__file__).parent / "cgt_defaults.json" 21 | super().__init__(str(path)) 22 | 23 | 24 | cgt_defaults = CGTDefaultsJson() 25 | 26 | 27 | @dataclass(frozen=True, init=True) 28 | class COLLECTIONS: 29 | """ TODO: Store all dataclasses as json in dicts - positions matter therefore this setup aint practical 30 | todo: !!! 31 | """ 32 | drivers: str = "cgt_DRIVERS" 33 | hands: str = "cgt_HAND" 34 | face: str = "cgt_FACE" 35 | pose: str = "cgt_POSE" 36 | 37 | 38 | @dataclass(frozen=True, init=False) 39 | class POSE: 40 | nose: str = "cgt_nose" 41 | left_eye_inner: str = "cgt_eye_inner.L" 42 | left_eye: str = "cgt_eye.L" 43 | left_eye_outer: str = "cgt_eye_outer.L" 44 | right_eye_inner: str = "cgt_eye_inner.R" 45 | right_eye: str = "cgt_eye.R" 46 | right_eye_outer: str = "cgt_eye_outer.R" 47 | left_ear: str = "cgt_ear.L" 48 | right_ear: str = "cgt_ear.R" 49 | mouth_left: str = "cgt_left.L" 50 | mouth_right: str = "cgt_mouth.R" 51 | left_shoulder: str = "cgt_shoulder.L" 52 | right_shoulder: str = "cgt_shoulder.R" 53 | left_elbow: str = "cgt_elbow.L" 54 | right_elbow: str = "cgt_elbow.R" 55 | left_wrist: str = "cgt_pose_wrist.L" 56 | right_wrist: str = "cgt_pose_wrist.R" 57 | left_pinky: str = "cgt_pinky.L" 58 | right_pinky: str = "cgt_pinky.R" 59 | left_index: str = "cgt_index.L" 60 | right_index: str = "cgt_index.R" 61 | left_thumb: str = "cgt_thumb.L" 62 | right_thumb: str = "cgt_thumb.R" 63 | left_hip: str = "cgt_hip.L" 64 | right_hip: str = "cgt_hip.R" 65 | left_knee: str = "cgt_knee.L" 66 | right_knee: str = "cgt_knee.R" 67 | left_ankle: str = "cgt_ankle.L" 68 | right_ankle: str = "cgt_ankle.R" 69 | left_heel: str = "cgt_heel.L" 70 | right_heel: str = "cgt_heel.R" 71 | left_foot_index: str = "cgt_foot_index.L" 72 | right_foot_index: str = "cgt_foot_index.R" 73 | 74 | hip_center: str = "cgt_hip_center" 75 | shoulder_center: str = "cgt_shoulder_center" 76 | 77 | shoulder_center_ik: str = "cgt_shoulder_center_driver" 78 | left_shoulder_ik: str = "cgt_shoulder_driver.L" 79 | right_shoulder_ik: str = "cgt_shoulder_driver.R" 80 | left_forearm_ik: str = "cgt_forearm_driver.L" 81 | right_forearm_ik: str = "cgt_forearm_driver.R" 82 | left_hand_ik: str = "cgt_hand_driver.L" 83 | right_hand_ik: str = "cgt_hand_driver.R" 84 | left_index_ik: str = "cgt_index_driver.L" 85 | right_index_ik: str = "cgt_index_driver.R" 86 | 87 | hip_center_ik: str = "cgt_hip_center_driver" 88 | left_hip_ik: str = "cgt_hip_driver.L" 89 | right_hip_ik: str = "cgt_hip_driver.R" 90 | left_shin_ik: str = "cgt_shin_driver.L" 91 | right_shin_ik: str = "cgt_shin_driver.R" 92 | left_foot_ik: str = "cgt_foot_driver.L" 93 | right_foot_ik: str = "cgt_foot_driver.R" 94 | left_foot_index_ik: str = "cgt_foot_index_driver.L" 95 | right_foot_index_ik: str = "cgt_foot_index_driver.R" 96 | 97 | 98 | @dataclass(frozen=True, init=False) 99 | class HAND: 100 | wrist: str = "cgt_wrist" 101 | thumb_cmc: str = "cgt_thumb_cmc" 102 | thumb_mcp: str = "cgt_thumb_mcp" 103 | thumb_ip: str = "cgt_thump_ip" 104 | thumb_tip: str = "cgt_thumb_tip" 105 | index_finger_mcp: str = "cgt_index_mcp" 106 | index_finger_pip: str = "cgt_index_pip" 107 | index_finger_dip: str = "cgt_index_dip" 108 | index_finger_tip: str = "cgt_index_tip" 109 | middle_finger_mcp: str = "cgt_middle_mcp" 110 | middle_finger_pip: str = "cgt_middle_pip" 111 | middle_finger_dip: str = "cgt_middle_dip" 112 | middle_finger_tip: str = "cgt_middle_tip" 113 | ring_finger_mcp: str = "cgt_ring_mcp" 114 | ring_finger_pip: str = "cgt_ring_pip" 115 | ring_finger_dip: str = "cgt_ring_dip" 116 | ring_finger_tip: str = "cgt_ring_tip" 117 | pinky_mcp: str = "cgt_pinky_mcp" 118 | pinky_pip: str = "cgt_pinky_pip" 119 | pinky_dip: str = "cgt_pinky_dip" 120 | pinky_tip: str = "cgt_pinky_tip" 121 | 122 | driver_thumb_cmc: str = "cgt_thumb_cmc_driver" 123 | driver_thumb_mcp: str = "cgt_thumb_mcp_driver" 124 | driver_thumb_ip: str = "cgt_thump_ip_driver" 125 | driver_thumb_tip: str = "cgt_thumb_tip_driver" 126 | driver_index_finger_mcp: str = "cgt_index_mcp_driver" 127 | driver_index_finger_pip: str = "cgt_index_pip_driver" 128 | driver_index_finger_dip: str = "cgt_index_dip_driver" 129 | driver_index_finger_tip: str = "cgt_index_tip_driver" 130 | driver_middle_finger_mcp: str = "cgt_middle_mcp_driver" 131 | driver_middle_finger_pip: str = "cgt_middle_pip_driver" 132 | driver_middle_finger_dip: str = "cgt_middle_dip_driver" 133 | driver_middle_finger_tip: str = "cgt_middle_tip_driver" 134 | driver_ring_finger_mcp: str = "cgt_ring_mcp_driver" 135 | driver_ring_finger_pip: str = "cgt_ring_pip_driver" 136 | driver_ring_finger_dip: str = "cgt_ring_dip_driver" 137 | driver_ring_finger_tip: str = "cgt_ring_tip_driver" 138 | driver_pinky_mcp: str = "cgt_pinky_mcp_driver" 139 | driver_pinky_pip: str = "cgt_pinky_pip_driver" 140 | driver_pinky_dip: str = "cgt_pinky_dip_driver" 141 | driver_pinky_tip: str = "cgt_pinky_tip_driver" 142 | 143 | 144 | @dataclass(frozen=True, init=False) 145 | class FACE: 146 | face: str = "cgt_face_vertex_" 147 | 148 | head: str = "cgt_face_rotation" 149 | chin: str = "cgt_chin_rotation" 150 | mouth: str = "cgt_mouth_driver" 151 | mouth_corners: str = "cgt_mouth_corner_driver" 152 | left_eye: str = "cgt_eye_driver.L" 153 | right_eye: str = "cgt_eye_driver.R" 154 | right_eyebrow: str = "cgt_eyebrow_driver.R" 155 | left_eyebrow: str = "cgt_eyebrow_driver.L" 156 | 157 | right_eye_t: str = "cgt_eye_driver.T.R" 158 | right_eye_b: str = "cgt_eye_driver.B.R" 159 | left_eye_t: str = "cgt_eye_driver.T.L" 160 | left_eye_b: str = "cgt_eye_driver.B.L" 161 | mouth_t: str = "cgt_mouth_driver.T" 162 | mouth_b: str = "cgt_mouth_driver.B" 163 | mouth_r: str = "cgt_mouth_driver.R" 164 | mouth_l: str = "cgt_mouth_driver.L" 165 | 166 | eyebrow_in_l: str = "cgt_eyebrow.I.L" 167 | eyebrow_mid_l: str = "cgt_eyebrow.M.L" 168 | eyebrow_out_l: str = "cgt_eyebrow.O.L" 169 | 170 | eyebrow_in_r: str = "cgt_eyebrow.I.R" 171 | eyebrow_mid_r: str = "cgt_eyebrow.M.R" 172 | eyebrow_out_r: str = "cgt_eyebrow.O.R" 173 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_output_nodes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_core/cgt_output_nodes/__init__.py -------------------------------------------------------------------------------- /src/cgt_core/cgt_output_nodes/blender_output.md: -------------------------------------------------------------------------------- 1 | # Blender output for mediapipe data 2 | 3 | 4 | Sets up objects, once created gets instances. 5 | The process is based on object names, changing object names may lead to issues. 6 | 7 | Insert keyframes to objects on update based on `cgt_calculator_nodes` output. 8 | Overwrites previously set keyframes if available. 9 | 10 | Might gets slow if many keyframes have been set within blender as blender updates object fcurves on each insert. 11 | Consider to use `cgt_bpy.cgt_fc_actions` to directly set keyframes to objects when realtime updates are not required. -------------------------------------------------------------------------------- /src/cgt_core/cgt_output_nodes/mp_face_out.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from . import mp_out_utils 3 | from ..cgt_naming import COLLECTIONS, cgt_defaults 4 | from ..cgt_bpy import cgt_bpy_utils, cgt_collection, cgt_object_prop 5 | 6 | 7 | class MPFaceOutputNode(mp_out_utils.BpyOutputNode): 8 | face = [] 9 | col_name = COLLECTIONS.face 10 | parent_col = COLLECTIONS.drivers 11 | 12 | def __init__(self): 13 | data = cgt_defaults 14 | 15 | references = {} 16 | for i in range(468): 17 | references[f'{i}'] = f"cgt_face_vertex_{i}" 18 | for k, name in data.face.items(): 19 | references[f'{468+int(k)}'] = name 20 | 21 | self.face = cgt_bpy_utils.add_empties(references, 0.005) 22 | for ob in self.face[468:]: 23 | cgt_object_prop.set_custom_property(ob, "cgt_id", data.identifier) 24 | 25 | cgt_collection.add_list_to_collection(self.col_name, self.face[468:], self.parent_col) 26 | cgt_collection.add_list_to_collection(self.col_name+"_DATA", self.face[:468], self.col_name) 27 | 28 | def update(self, data, frame): 29 | loc, rot, sca = data 30 | for data, method in zip([loc, rot, sca], [self.translate, self.euler_rotate, self.scale]): 31 | try: 32 | method(self.face, data, frame) 33 | except IndexError: 34 | pass 35 | return data, frame 36 | 37 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_output_nodes/mp_hand_out.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from . import mp_out_utils 3 | from ..cgt_naming import COLLECTIONS, cgt_defaults 4 | from ..cgt_bpy import cgt_bpy_utils, cgt_collection, cgt_object_prop 5 | 6 | 7 | class CgtMPHandOutNode(mp_out_utils.BpyOutputNode): 8 | left_hand = [] 9 | right_hand = [] 10 | col_name = COLLECTIONS.hands 11 | parent_col = COLLECTIONS.drivers 12 | 13 | def __init__(self): 14 | data = cgt_defaults 15 | references = data.hand 16 | self.left_hand = cgt_bpy_utils.add_empties(references, 0.005, prefix=".L", suffix='cgt_') 17 | self.right_hand = cgt_bpy_utils.add_empties(references, 0.005, prefix=".R", suffix='cgt_') 18 | 19 | for ob in self.left_hand+self.right_hand: 20 | cgt_object_prop.set_custom_property(ob, "cgt_id", data.identifier) 21 | 22 | cgt_collection.create_collection(self.col_name+"S", self.parent_col) 23 | cgt_collection.create_collection(self.col_name+".L", self.col_name+"S") 24 | cgt_collection.create_collection(self.col_name+".R", self.col_name+"S") 25 | cgt_collection.add_list_to_collection(self.col_name+".L", self.left_hand, self.parent_col) 26 | cgt_collection.add_list_to_collection(self.col_name+".R", self.right_hand, self.parent_col) 27 | 28 | def split(self, data): 29 | left_hand_data, right_hand_data = data 30 | return [[self.left_hand, left_hand_data], [self.right_hand, right_hand_data]] 31 | 32 | def update(self, data, frame): 33 | loc, rot, sca = data 34 | for data, method in zip([loc, rot, sca], [self.translate, self.euler_rotate, self.scale]): 35 | for hand, chunk in self.split(data): 36 | try: 37 | method(hand, chunk, frame) 38 | except IndexError: 39 | pass 40 | return data, frame 41 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_output_nodes/mp_out_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import List 3 | import logging 4 | from abc import abstractmethod 5 | 6 | import bpy.types 7 | 8 | from ..cgt_naming import COLLECTIONS 9 | from mathutils import Vector, Quaternion, Euler 10 | from ..cgt_patterns import cgt_nodes 11 | 12 | 13 | class BpyOutputNode(cgt_nodes.OutputNode): 14 | parent_col = COLLECTIONS.drivers 15 | prev_rotation = {} 16 | 17 | @abstractmethod 18 | def update(self, data, frame): 19 | pass 20 | 21 | @staticmethod 22 | def translate(target: List[bpy.types.Object], data, frame: int): 23 | """ Translates and keyframes bpy empty objects. """ 24 | try: 25 | for landmark in data: 26 | target[landmark[0]].location = Vector((landmark[1])) 27 | target[landmark[0]].keyframe_insert(data_path="location", frame=frame) 28 | 29 | except IndexError: 30 | logging.debug(f"missing translation index at {frame}") 31 | pass 32 | 33 | @staticmethod 34 | def scale(target, data, frame): 35 | try: 36 | for landmark in data: 37 | target[landmark[0]].scale = Vector((landmark[1])) 38 | target[landmark[0]].keyframe_insert(data_path="scale", frame=frame) 39 | except IndexError: 40 | logging.debug(f"missing scale index at {data}, {frame}") 41 | pass 42 | 43 | @staticmethod 44 | def quaternion_rotate(target, data, frame): 45 | """ Translates and keyframes bpy empty objects. """ 46 | try: 47 | for landmark in data: 48 | target[landmark[0]].rotation_quaternion = landmark[1] 49 | target[landmark[0]].keyframe_insert(data_path="rotation_quaternion", frame=frame) 50 | except IndexError: 51 | logging.debug(f"missing quat_euler_rotate index {data}, {frame}") 52 | pass 53 | 54 | def euler_rotate(self, target, data, frame, idx_offset=0): 55 | """ Translates and keyframes bpy empty objects. """ 56 | try: 57 | for landmark in data: 58 | target[landmark[0]].rotation_euler = landmark[1] 59 | target[landmark[0]].keyframe_insert(data_path="rotation_euler", frame=frame) 60 | self.prev_rotation[landmark[0] + idx_offset] = landmark[1] 61 | except IndexError: 62 | logging.debug(f"missing euler_rotate index at {data}, {frame}") 63 | pass -------------------------------------------------------------------------------- /src/cgt_core/cgt_output_nodes/mp_pose_out.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from . import mp_out_utils 3 | from ..cgt_naming import COLLECTIONS, cgt_defaults 4 | from ..cgt_bpy import cgt_bpy_utils, cgt_collection, cgt_object_prop 5 | 6 | 7 | class MPPoseOutputNode(mp_out_utils.BpyOutputNode): 8 | pose = [] 9 | col_name = COLLECTIONS.pose 10 | parent_col = COLLECTIONS.drivers 11 | 12 | def __init__(self): 13 | data = cgt_defaults 14 | references = {} 15 | for k, v in data.pose.items(): 16 | references[k] = v 17 | 18 | self.pose = cgt_bpy_utils.add_empties(references, 0.005, suffix='cgt_') 19 | for ob in self.pose: 20 | cgt_object_prop.set_custom_property(ob, "cgt_id", data.identifier) 21 | 22 | cgt_collection.add_list_to_collection(self.col_name, self.pose, self.parent_col) 23 | 24 | def update(self, data, frame): 25 | loc, rot, sca = data 26 | for data, method in zip([loc, rot, sca], [self.translate, self.euler_rotate, self.scale]): 27 | try: 28 | method(self.pose, data, frame) 29 | except IndexError: 30 | pass 31 | return data, frame 32 | 33 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_patterns/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_core/cgt_patterns/__init__.py -------------------------------------------------------------------------------- /src/cgt_core/cgt_patterns/cgt_nodes.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from abc import ABC, abstractmethod 3 | from typing import List, Tuple, Any, Optional 4 | from ..cgt_utils.cgt_timers import timeit 5 | import logging 6 | 7 | 8 | class Node(ABC): 9 | @abstractmethod 10 | def update(self, data: Any, frame: int) -> Tuple[Optional[Any], int]: 11 | pass 12 | 13 | def __str__(self): 14 | return self.__class__.__name__ 15 | 16 | 17 | class NodeChain(Node): 18 | nodes: List[Node] 19 | 20 | def __init__(self): 21 | self.nodes = list() 22 | 23 | # @timeit 24 | def update(self, data: Any, frame: int) -> Tuple[Optional[Any], int]: 25 | """ Nodes executed inside a chain. """ 26 | for node in self.nodes: 27 | # logging.debug(f"{type(node)}, {node.__class__.__name__}.update()") #{data}, {frame})") 28 | if data is None: 29 | return None, frame 30 | 31 | data, frame = node.update(data, frame) 32 | return data, frame 33 | 34 | def append(self, node: Node): 35 | """ Appends node to the chain, order does matter. """ 36 | self.nodes.append(node) 37 | 38 | def __str__(self): 39 | s = "" 40 | for node in self.nodes: 41 | s += str(node) 42 | s += " -> " 43 | return s[:-4] 44 | 45 | 46 | class NodeChainGroup(Node): 47 | """ Node containing multiple node chains. 48 | Chains and input got to match 49 | Input == Output. """ 50 | nodes: List[NodeChain] 51 | 52 | def __init__(self): 53 | self.nodes = list() 54 | 55 | # @timeit 56 | def update(self, data: Any, frame: int) -> Tuple[Optional[Any], int]: 57 | """ Push data in their designed node chains. """ 58 | assert len(data) == len(self.nodes) 59 | 60 | updated_data = [] 61 | for node_chain, chunk in zip(self.nodes, data): 62 | c, f = node_chain.update(chunk, frame) 63 | updated_data.append(c) 64 | 65 | return updated_data, frame 66 | 67 | def __str__(self): 68 | s = "" 69 | for node_chain in self.nodes: 70 | s += '\n\t -> ' 71 | s += str(node_chain) 72 | return s 73 | 74 | 75 | class InputNode(Node): 76 | """ Returns data on call. """ 77 | @abstractmethod 78 | def update(self, data: None, frame: int) -> Tuple[Optional[Any], int]: 79 | pass 80 | 81 | 82 | class CalculatorNode(Node): 83 | """ Calculate new data and changes the input shape. """ 84 | @abstractmethod 85 | def update(self, data: Any, frame: int) -> Tuple[Optional[Any], int]: 86 | pass 87 | 88 | 89 | class OutputNode(Node): 90 | """ Outputs and returns the data without changing values nor shape. """ 91 | @abstractmethod 92 | def update(self, data: Any, frame: int) -> Tuple[Optional[Any], int]: 93 | pass 94 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_patterns/observer_pattern.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from abc import ABC, abstractmethod 3 | 4 | 5 | # The Subject class maintains a list of observers and has methods 6 | # for attaching and detaching observers. It also has a method 7 | # for notifying all observers of state changes. 8 | class Subject(ABC): 9 | """The Subject interface declares a set of methods for managing subscribers.""" 10 | @abstractmethod 11 | def attach(self, observer: Observer) -> None: 12 | """Attach an observer to the subject.""" 13 | pass 14 | 15 | @abstractmethod 16 | def detach(self, observer: Observer) -> None: 17 | """Detach an observer from the subject.""" 18 | pass 19 | 20 | @abstractmethod 21 | def notify(self) -> None: 22 | """Notify all observers about an event.""" 23 | pass 24 | 25 | 26 | # The Observer class has a method, update, that is called by the 27 | # subject when the subject's state changes. 28 | class Observer(ABC): 29 | """The Observer interface declares the update method, used by subjects.""" 30 | @abstractmethod 31 | def update(self, subject: Subject) -> None: 32 | """Receive update from subject.""" 33 | pass 34 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_core/cgt_utils/__init__.py -------------------------------------------------------------------------------- /src/cgt_core/cgt_utils/cgt_json.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import json 3 | 4 | 5 | class JsonData(object): 6 | """ Import json data, preferably as dict. 7 | Can load lists, will store them as dict with "data" as key. """ 8 | data: dict = None 9 | 10 | def __init__(self, path: str = None, **data): 11 | if path: 12 | with open(path, 'rb') as jsonFile: 13 | data = json.load(jsonFile) 14 | 15 | if isinstance(data, dict): 16 | self.__dict__.update((), **data) 17 | elif isinstance(data, list): 18 | self.__dict__.update((), **{"data": data}) 19 | else: 20 | assert TypeError 21 | else: 22 | self.__dict__.update((), **data) 23 | 24 | def save(self, path: str = None): 25 | assert path is not None 26 | with open(path, "w", encoding='utf-8') as jsonFile: 27 | json.dump(self.__dict__, jsonFile, ensure_ascii=False, indent=4, separators=(',', ':'), sort_keys=False) 28 | 29 | def __str__(self): 30 | s = ["{"] 31 | 32 | def recv(d, depth=0): 33 | for k, v in d.items(): 34 | if isinstance(v, dict): 35 | tabs = "\t"*depth 36 | s.append(f"\n{tabs}{k}: ") 37 | s.append("{") 38 | recv(v, depth+1) 39 | tabs = "\t"*depth 40 | s.append(f"\n{tabs}") 41 | s.append("},") 42 | else: 43 | tabs = "\t"*depth 44 | s.append(f"\n{tabs}{k}: {v},") 45 | 46 | recv(self.__dict__, 1) 47 | s.append("\n}") 48 | return "".join(s) 49 | 50 | def __call__(self): 51 | return self.data 52 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_utils/cgt_logging.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | import bpy 5 | 6 | 7 | text = '' 8 | def oops(self, context): 9 | """ Hack to display pop-up message in Blender using a logging Stream. """ 10 | self.layout.label(text=text) 11 | 12 | 13 | class BlenderPopupHandler(logging.StreamHandler): 14 | """ Displays a Popup message in Blender on receiving an error. """ 15 | def __init__(self): 16 | logging.StreamHandler.__init__(self) 17 | self.msg = '' 18 | 19 | def emit(self, record): 20 | """ Emit msg as popup. """ 21 | global text 22 | msg = self.format(record) 23 | text = msg 24 | try: 25 | bpy.context.window_manager.popup_menu(oops, title="Error", icon='ERROR') 26 | except AttributeError: 27 | # logging without bpy 28 | pass 29 | 30 | 31 | def add_console_log(name: str = ''): 32 | """ Default: log stream to console. """ 33 | handler = logging.StreamHandler() 34 | handler.setLevel(logging.DEBUG) 35 | formatter = logging.Formatter('%(asctime)s - BlendArMocap: %(levelname)s - ' 36 | '%(message)s - %(filename)s:%(lineno)d', 37 | '%m-%d %H:%M:%S') 38 | handler.setFormatter(formatter) 39 | logging.getLogger(name).addHandler(handler) 40 | 41 | 42 | def add_custom_log(name: str = ''): 43 | """ Error: Generates popup in Blender when an Error occurs. """ 44 | handler = BlenderPopupHandler() 45 | handler.setLevel(logging.ERROR) 46 | formatter = logging.Formatter('%(levelname)-8s %(message)s, %(filename)s:%(lineno)d') 47 | handler.setFormatter(formatter) 48 | logging.getLogger(name).addHandler(handler) 49 | 50 | 51 | def init(name: str = ''): 52 | # add_custom_log(name) 53 | add_console_log(name) 54 | 55 | 56 | def main(): 57 | # basically a test as unittest don't display log messages 58 | init('') 59 | logging.getLogger().setLevel(logging.DEBUG) 60 | logging.info('Jackdaws love my big sphinx of quartz.') 61 | logger1 = logging.getLogger('myapp.area1') 62 | logger2 = logging.getLogger('myapp.area2') 63 | logger1.debug('Quick zephyrs blow, vexing daft Jim.') 64 | logger1.info('How quickly daft jumping zebras vex.') 65 | logger2.warning('Jail zesty vixen who grabbed pay from quack.') 66 | logger2.error('The five boxing wizards jump quickly.') 67 | logging.info("some root log") 68 | 69 | 70 | if __name__ == '__main__': 71 | main() 72 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_utils/cgt_timers.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from functools import wraps 3 | from time import time 4 | from typing import Callable 5 | from collections import deque 6 | 7 | 8 | def timeit(func: Callable): 9 | avg = deque() 10 | 11 | @wraps(func) 12 | def wrap(*args, **kwargs): 13 | nonlocal avg 14 | 15 | start = time() 16 | result = func(*args, **kwargs) 17 | end = time() 18 | runtime = end - start 19 | avg.appendleft(runtime) 20 | if len(avg) > 30: 21 | avg.pop() 22 | 23 | print(f"function: {func.__name__}\ntook: {round(runtime, 5)} sec, avg of {len(avg)}: {sum(avg)/len(avg)} sec\n") 24 | return result 25 | 26 | return wrap 27 | 28 | 29 | def fps(func: Callable): 30 | start = time() 31 | count = 0 32 | 33 | @wraps(func) 34 | def wrap(*args, **kwargs): 35 | nonlocal count 36 | nonlocal start 37 | res = func(*args, **kwargs) 38 | count += 1 39 | if time() - start >= 1: 40 | start = time() 41 | print(f"function '{func.__name__}' runs at {count} fps") 42 | count = 0 43 | 44 | return res 45 | 46 | return wrap 47 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_utils/cgt_user_prefs.py: -------------------------------------------------------------------------------- 1 | from . import cgt_json 2 | from pathlib import Path 3 | from typing import Any 4 | 5 | 6 | def get_prefs(**kwargs) -> dict: 7 | """ Kwargs as key and default value. Ensure to use unique keys (check json if necessary). """ 8 | data = cgt_json.JsonData(PREFERENCES_PATH) 9 | d = {} 10 | for key, default in kwargs.items(): 11 | d[key] = getattr(data, key, default) 12 | return d 13 | 14 | 15 | def set_prefs(**kwargs) -> None: 16 | """ Saves new preferences. Ensure to use unique keys (check json if necessary). """ 17 | data = cgt_json.JsonData(PREFERENCES_PATH) 18 | for key, value in kwargs.items(): 19 | setattr(data, key, value) 20 | data.save(PREFERENCES_PATH) 21 | 22 | 23 | def set_nested_attr(cls: Any, attr_path: str, value: Any) -> None: 24 | """ Set nested property: 25 | cls[Any]: b.e. bpy.context.scene 26 | attr_path[str]: b.e. user.properties.name 27 | -> bpy.context.scene.user.properties.name = value. """ 28 | props = attr_path.split('.')[::-1] 29 | while len(props) > 0: 30 | sub_attr = props.pop() 31 | cls = getattr(cls, sub_attr, None) 32 | setattr(cls, props[0], value) 33 | 34 | 35 | # Set path to preference, create file if it doesn't exist. 36 | PREFERENCES_PATH = None 37 | if PREFERENCES_PATH is None: 38 | PREFERENCES_PATH = Path(__file__).parent / "prefs.json" 39 | if not PREFERENCES_PATH.is_file(): 40 | with open(PREFERENCES_PATH, 'a') as f: 41 | f.write('{}') 42 | PREFERENCES_PATH = str(PREFERENCES_PATH) 43 | -------------------------------------------------------------------------------- /src/cgt_core/cgt_utils/prefs.json: -------------------------------------------------------------------------------- 1 | { 2 | "local_user":false, 3 | "key_frame_step":4, 4 | "webcam_input_device":0, 5 | "detection_input_type":"stream", 6 | "enum_detection_type":"POSE", 7 | "enum_stream_dim":"sd", 8 | "enum_stream_type":"0", 9 | "min_detection_confidence":0.699999988079071, 10 | "hand_model_complexity":1, 11 | "pose_model_complexity":1, 12 | "holistic_model_complexity":1, 13 | "refine_face_landmarks":false, 14 | "load_raw":false, 15 | "quickload":false 16 | } -------------------------------------------------------------------------------- /src/cgt_freemocap/README.md: -------------------------------------------------------------------------------- 1 | # Freemocap import 2 | 3 | [Freemocap](https://freemocap.org) session data can be saved in a folder which then can be import using BlendArMocap. 4 | To import session data to blender, set the path to the session directory and press the import button. 5 | There are several import options: 6 | - **(default)** to import and process data while updating blender 7 | - **load_quick** to import data in one call, faster but freezes blender 8 | - **load_raw** to leaves data unprocessed (may not be used to drive rigs) 9 | - **load_synch_videos** to import session videos 10 | 11 | To import data via a subprocess or via freemocap directly check the `fm_subprocess_cmd_receiver` or [Freemocap Github](https://github.com/freemocap/freemocap).
12 | 13 | -------------------------------------------------------------------------------- /src/cgt_freemocap/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_freemocap/__init__.py -------------------------------------------------------------------------------- /src/cgt_freemocap/fm_interface.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | 4 | class UI_PT_CGT_Properties_Freemocap(bpy.types.PropertyGroup): 5 | freemocap_session_path: bpy.props.StringProperty( 6 | name="Path", 7 | default="/Users/Scylla/Downloads/sesh_2022-09-19_16_16_50_in_class_jsm/", 8 | description="Directory path to freemocap session.", 9 | options={'HIDDEN'}, 10 | maxlen=1024, 11 | subtype='DIR_PATH' 12 | ) 13 | modal_active: bpy.props.BoolProperty(default=False) 14 | load_raw: bpy.props.BoolProperty( 15 | default=False, description="Loads raw session data - may not be transferred to rigs.") 16 | quickload: bpy.props.BoolProperty( 17 | default=False, description="Quickload session folder. (Freezes Blender)") 18 | 19 | 20 | class UI_PT_CGT_Panel_Freemocap(bpy.types.Panel): 21 | bl_label = "Freemocap" 22 | bl_parent_id = "UI_PT_CGT_Panel" 23 | bl_space_type = "VIEW_3D" 24 | bl_region_type = "UI" 25 | bl_category = "BlendAR" 26 | bl_options = {'DEFAULT_CLOSED'} 27 | 28 | @classmethod 29 | def poll(cls, context): 30 | if context.mode in {'OBJECT', 'POSE'}: 31 | return True 32 | 33 | def quickload_session_folder(self, user): 34 | if user.modal_active: 35 | self.layout.row().operator("wm.cgt_quickload_freemocap_operator", text="Stop Import", icon='CANCEL') 36 | else: 37 | self.layout.row().operator("wm.cgt_quickload_freemocap_operator", text="Quickload Session Folder", icon='IMPORT') 38 | 39 | def load_session_folder(self, user): 40 | if user.modal_active: 41 | self.layout.row().operator("wm.cgt_load_freemocap_operator", text="Stop Import", icon='CANCEL') 42 | else: 43 | self.layout.row().operator("wm.cgt_load_freemocap_operator", text="Load Session Folder", icon='IMPORT') 44 | 45 | def draw(self, context): 46 | layout = self.layout 47 | 48 | user = context.scene.cgtinker_freemocap # noqa 49 | layout.row().prop(user, "freemocap_session_path") 50 | if not user.quickload: 51 | self.load_session_folder(user) 52 | else: 53 | self.quickload_session_folder(user) 54 | 55 | self.layout.row().operator("wm.fmc_load_synchronized_videos", text="Load synchronized videos", icon='IMAGE_PLANE') 56 | row = layout.row() 57 | row.column(align=True).prop(user, "quickload", text="Quickload", toggle=True) 58 | if user.quickload: 59 | row.column(align=True).prop(user, "load_raw", text="Raw", toggle=True) 60 | # layout.separator() 61 | # layout.row().operator("wm.fmc_bind_freemocap_data_to_skeleton", text="Bind to rig (Preview)") 62 | 63 | 64 | classes = [ 65 | UI_PT_CGT_Properties_Freemocap, 66 | UI_PT_CGT_Panel_Freemocap, 67 | ] 68 | 69 | 70 | def register(): 71 | for cls in classes: 72 | bpy.utils.register_class(cls) 73 | bpy.types.Scene.cgtinker_freemocap = bpy.props.PointerProperty(type=UI_PT_CGT_Properties_Freemocap) 74 | 75 | 76 | def unregister(): 77 | for cls in reversed(classes): 78 | bpy.utils.unregister_class(cls) 79 | del bpy.types.Scene.cgtinker_freemocap # noqa 80 | 81 | 82 | if __name__ == '__main__': 83 | try: 84 | unregister() 85 | except RuntimeError: 86 | pass 87 | 88 | register() 89 | -------------------------------------------------------------------------------- /src/cgt_freemocap/fm_paths.py: -------------------------------------------------------------------------------- 1 | __VIDEOS_DIR = 'SyncedVideos' 2 | VIDEOS_DIR = 'annotated_videos' 3 | __DATA_DIR = 'DataArrays' 4 | DATA_DIR = 'output_data' 5 | __SMOOTHED_MEDIAPIPE_DATA = 'mediaPipeSkel_3d_smoothed.npy' 6 | SMOOTHED_MEDIAPIPE_DATA = 'mediaPipeSkel_3d_body_hands_face.npy' 7 | # MEDIAPIPE_DATA_REPROJ_ERR = 'mediaPipeSkel_reprojErr.npy' 8 | -------------------------------------------------------------------------------- /src/cgt_freemocap/fm_registration.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import logging 3 | from . import fm_interface 4 | from . import fm_operators 5 | from ..cgt_core.cgt_utils import cgt_user_prefs 6 | 7 | 8 | modules = [ 9 | fm_operators, 10 | fm_interface 11 | ] 12 | 13 | 14 | FM_ATTRS = { 15 | "load_raw": False, 16 | "quickload": False, 17 | } 18 | 19 | 20 | @bpy.app.handlers.persistent 21 | def save_preferences(*args): 22 | user = bpy.context.scene.cgtinker_freemocap # noqa 23 | cgt_user_prefs.set_prefs(**{attr: getattr(user, attr, default) for attr, default in FM_ATTRS.items()}) 24 | 25 | 26 | @bpy.app.handlers.persistent 27 | def load_preferences(*args): 28 | stored_preferences = cgt_user_prefs.get_prefs(**FM_ATTRS) 29 | user = bpy.context.scene.cgtinker_freemocap # noqa 30 | for property_name, value in stored_preferences.items(): 31 | if not hasattr(user, property_name): 32 | logging.warning(f"{property_name} - not available.") 33 | setattr(user, property_name, value) 34 | 35 | 36 | def register(): 37 | for module in modules: 38 | module.register() 39 | 40 | bpy.app.handlers.save_pre.append(save_preferences) 41 | bpy.app.handlers.load_post.append(load_preferences) 42 | 43 | 44 | def unregister(): 45 | for module in reversed(modules): 46 | module.unregister() 47 | 48 | -------------------------------------------------------------------------------- /src/cgt_freemocap/fm_subprocess_cmd_receiver.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import logging 3 | import time 4 | from . import fm_utils, fm_session_loader 5 | 6 | """ Interfaces for freemocap subprocess commands. """ 7 | 8 | 9 | class LoadFreemocapSession: 10 | user = freemocap_session_path = timeout = processing_manager = None 11 | log_step = 25 12 | 13 | def __init__(self, session_path: str, timeout: int = None): 14 | """ Loads Freemocap data from session directory. 15 | Attention: May not use the WM_Load_Freemocap_Operator. 16 | Modal operations deliver unexpected results when blender is in background. """ 17 | 18 | # set timeout 19 | self.timeout = timeout 20 | if timeout is None: 21 | self.timeout = float('inf') 22 | 23 | # set session path 24 | self.user = bpy.context.scene.cgtinker_freemocap 25 | self.user.freemocap_session_path = session_path 26 | 27 | def quickload(self): 28 | self.user.quickload = True 29 | self.user.load_raw = True 30 | bpy.ops.wm.cgt_quickload_freemocap_operator() 31 | 32 | def quickload_processed(self): 33 | self.user.quickload = True 34 | self.user.load_raw = False 35 | bpy.ops.wm.cgt_quickload_freemocap_operator() 36 | 37 | def run_modal(self): 38 | """ Imports the data, breaks if timeout is reached or import finished. """ 39 | self.user.load_raw = False 40 | self.user.modal_active = False 41 | self.user.quickload = False 42 | 43 | print("Start running modal") 44 | bpy.ops.wm.cgt_load_freemocap_operator() 45 | 46 | start = time.time() 47 | while time.time() - start <= self.timeout and self.user.modal_active: 48 | pass 49 | 50 | self.user.modal_active = False 51 | logging.info("Stopped importing data.") 52 | 53 | 54 | def import_freemocap_session( 55 | session_directory: str, 56 | bind_to_rig: bool = False, 57 | load_synch_videos: bool = False, 58 | timeout: int = None, 59 | load_raw: bool = False, 60 | load_quick: bool = False): 61 | 62 | logging.debug("Called import freemocap session.") 63 | 64 | if not hasattr(bpy.context.scene, 'cgtinker_freemocap'): 65 | logging.error("Aborted, BlendArMocap Add-On might not be registered.") 66 | return 0 67 | 68 | if not fm_utils.is_valid_session_directory(session_directory): 69 | logging.error("Aborted, session path not valid.") 70 | return 0 71 | 72 | # import data 73 | importer = LoadFreemocapSession(session_directory, timeout) 74 | if load_raw and load_quick: 75 | importer.quickload() 76 | elif load_quick: 77 | importer.quickload_processed() 78 | else: 79 | importer.run_modal() 80 | 81 | if bind_to_rig: 82 | bpy.ops.wm.fmc_bind_freemocap_data_to_skeleton() 83 | 84 | if load_synch_videos: 85 | bpy.ops.wm.fmc_load_synchronized_videos() 86 | 87 | logging.info("Finished freemocap session import.") 88 | return 1 89 | -------------------------------------------------------------------------------- /src/cgt_freemocap/fm_utils.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) cgtinker, cgtinker.com, hello@cgtinker.com 3 | 4 | This program is free software: you can redistribute it and/or modify 5 | it under the terms of the GNU General Public License as published by 6 | the Free Software Foundation, either version 3 of the License, or 7 | (at your option) any later version. 8 | 9 | This program is distributed in the hope that it will be useful, 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | GNU General Public License for more details. 13 | 14 | You should have received a copy of the GNU General Public License 15 | along with this program. If not, see . 16 | ''' 17 | 18 | import logging 19 | import bpy 20 | from pathlib import Path 21 | from . import fm_paths 22 | 23 | 24 | def is_valid_session_directory(path): 25 | freemocap_session_path = Path(bpy.path.abspath(path)) 26 | if not freemocap_session_path.is_dir(): 27 | logging.error( 28 | f"Given path doesn't point to a directory {freemocap_session_path}.") 29 | return False 30 | 31 | data_arrays = freemocap_session_path / fm_paths.DATA_DIR 32 | if not data_arrays.is_dir(): 33 | logging.error( 34 | f"Given path doesn't contain a DataArrays directory {freemocap_session_path}.") 35 | return False 36 | 37 | mediaPipeSkel_3d_smoothed = data_arrays / fm_paths.SMOOTHED_MEDIAPIPE_DATA 38 | if not mediaPipeSkel_3d_smoothed.is_file(): 39 | logging.error( 40 | f"Data Arrays don't contain a mediaPipeSkel_3d_smoothed.npy file. {data_arrays}.") 41 | return False 42 | 43 | # mediaPipeSkel_reprojErr = data_arrays / fm_paths.MEDIAPIPE_DATA_REPROJ_ERR 44 | # if not mediaPipeSkel_reprojErr.is_file(): 45 | # logging.error( 46 | # f"Data Arrays don't contain a mediaPipeSkel_reprojErr.npy file. {data_arrays}.") 47 | 48 | logging.debug(f"Path to freemocap session: {freemocap_session_path}") 49 | return True 50 | -------------------------------------------------------------------------------- /src/cgt_imports.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import logging 3 | import sys 4 | from typing import Optional 5 | from pathlib import Path 6 | 7 | # Ensure all modules are reloaded from new files, 8 | # when the addon is removed and a new version is installed in the same session, 9 | # or when Blender's 'Reload Scripts' operator has been called. 10 | 11 | 12 | PACKAGE_PATH = Path(__file__).parent.parent.parent 13 | PACKAGE_NAME = PACKAGE_PATH.name 14 | 15 | 16 | def import_module(module): 17 | importlib.import_module(f"{PACKAGE_NAME}{module}") 18 | 19 | 20 | def reload_module(module): 21 | importlib.reload(sys.modules[f"{PACKAGE_NAME}{module}"]) 22 | 23 | 24 | def get_reload_list(sub_dirs): 25 | reload_list = [] 26 | 27 | for sub_dir in sub_dirs: 28 | files = [p for p in sub_dir.rglob( 29 | "*.py") if not p.stem.startswith('_')] 30 | for file in files: 31 | parents = get_parents(file, []) 32 | imp_path = "" 33 | for parent in reversed(parents): 34 | imp_path += f".{parent}" 35 | imp_path += f".{file.stem}" 36 | reload_list.append(imp_path) 37 | return reload_list 38 | 39 | 40 | def get_parents(file: Path, parents: list): 41 | if file.parent.name != PACKAGE_NAME: 42 | parents.append(file.parent.name) 43 | get_parents(file.parent, parents) 44 | return parents 45 | 46 | 47 | def manage_imports(dirs: Optional[list] = None): 48 | if dirs is None: 49 | s = [PACKAGE_PATH / 'src'] 50 | else: 51 | s = [PACKAGE_PATH / d for d in dirs] 52 | 53 | reload_list = get_reload_list(s) 54 | for module in reload_list: 55 | reload = True 56 | try: 57 | import_module(module) 58 | except (ModuleNotFoundError, ImportError) as e: 59 | reload = False 60 | logging.error(f"Import {module} failed: {e}") 61 | 62 | if reload: 63 | reload_module(module) 64 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/README.md: -------------------------------------------------------------------------------- 1 | # Mediapipe 2 | 3 | ### Purpose
4 | Run mediapipe within blender, detect pose, hand, face or holistic features. 5 | Calculate rotation data based on the detection results to drive rigs. 6 | 7 | ### Setup Instructions
8 | Blender has to be started using the terminal when on mac as blenders plist doesn't contain camera permissions. 9 | To run mediapipe, you need to install the required dependencies [_opencv_](https://opencv.org) and [_mediapipe_](https://google.github.io/mediapipe/) via the add-ons preferences. 10 | Internet connection is required to install the required packages. It's recommended to disable VPN's during the installation processes. 11 | Also Blender may has to be restarted during the installation process. To access the webcam feed blender usually has to be started via the terminal. 12 | 13 | ### Detection
14 | **Type**
15 | Select the data type you want to use as input: 16 | - Stream 17 | - Movie 18 | 19 | **Webcam Device Slot**
20 | If you have multiple webcam devices you may have to change the integer value until you find the device you want to use.
21 | Defaults the Webcam Device Slot should be **0**. 22 | 23 | **File Path**
24 | Select the path to your movie file. Preferable reduce it in size before starting the detection. 25 | 26 | **Key Step**
27 | The Key Step determines the frequency of Keyframes made in Blender. 28 | Adjust the Keyframe Step so the detection results in Blender match the recording speed.
29 | 30 | **Target**
31 | Select the detection target: 32 | - Hands 33 | - Face 34 | - Pose 35 | - Holistic 36 | 37 | **Model Complexity**
38 | Complexity of the landmark model: `0 or 1`. 39 | Landmark accuracy as well as inference latency generally go up with the model complexity. 40 | Default to `1`. The complexity level 2 for pose landmarks is not available due to googles packaging. 41 | 42 | **Min Detection Confidence**
43 | Minimum confidence value `[0.0, 1.0]` from the detection model for the detection to be considered successful. Default to `0.5`. 44 | 45 | **Start Detection**
46 | When pressing the _Start Detection_ button a window will open which contains the webcam or movie feed and detection results. 47 | The detection results are recorded in Blender at runtime. You can modify the recording starting point by changing the keyframe start in Blender.
48 | May deactivate the rig while detecting if you have transferred animation results previously. 49 | To finish the recording press 'Q' or the "Stop Detection" button. 50 | 51 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_mediapipe/__init__.py -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_mediapipe/cgt_mp_core/__init__.py -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_core/cv_stream.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Union, Tuple 3 | import time 4 | import cv2 5 | import logging 6 | import numpy as np 7 | 8 | 9 | class Stream: 10 | updated: bool = None 11 | frame: np.ndarray = None 12 | input_type: int = None 13 | color_spaces = { 14 | 'rgb': cv2.COLOR_BGR2RGB, 15 | 'bgr': cv2.COLOR_RGB2BGR 16 | } 17 | dim: Tuple[int, int] 18 | is_movie: bool = False 19 | frame_configured: bool = False 20 | 21 | def __init__(self, capture_input: Union[str, int], title: str = "Stream Detection", 22 | width: int = 640, height: int = 480, backend: int = 0): 23 | """ Generates a video stream for webcam or opens a movie file using cv2 """ 24 | self.set_capture(capture_input, backend) 25 | 26 | self.dim = (width, height) 27 | self.frame_configured = False 28 | if isinstance(capture_input, str): 29 | self.is_movie = True 30 | self.set_capture_props(width, height) 31 | 32 | time.sleep(.25) 33 | if not self.capture.isOpened(): 34 | # if backend cannot open capture use random backend 35 | self.capture = cv2.VideoCapture(capture_input) 36 | time.sleep(.25) 37 | 38 | if not self.capture.isOpened(): 39 | raise IOError("Cannot open webcam") 40 | self.title = title 41 | 42 | def update(self): 43 | self.updated, frame = self.capture.read() 44 | self.frame = cv2.flip(frame, 1) 45 | 46 | def set_color_space(self, space): 47 | self.frame = cv2.cvtColor(self.frame, self.color_spaces[space]) 48 | 49 | def resize_movie_frame(self): 50 | if not self.frame_configured: 51 | (h, w) = self.frame.shape[:2] 52 | (tar_w, tar_h) = self.dim 53 | 54 | if h < w: # landscape 55 | aspect = tar_w / float(w) 56 | self.dim = (tar_w, int(h*aspect)) 57 | elif h > w: # portrait 58 | aspect = tar_h / float(h) 59 | self.dim = (int(w*aspect), tar_h) 60 | else: 61 | self.dim = (tar_w, tar_w) 62 | 63 | self.frame_configured = True 64 | 65 | return cv2.resize(self.frame, self.dim, interpolation=cv2.INTER_AREA) 66 | 67 | def draw(self): 68 | f = self.frame 69 | if self.is_movie: 70 | f = self.resize_movie_frame() 71 | cv2.imshow(self.title, f) 72 | 73 | def exit_stream(self): 74 | if cv2.waitKey(1) & 0xFF == ord('q'): 75 | logging.debug("ATTEMPT TO EXIT STEAM") 76 | return True 77 | else: 78 | return False 79 | 80 | def set_capture(self, capture_input, backend): 81 | if isinstance(capture_input, int): 82 | self.input_type = 0 83 | elif isinstance(capture_input, str): 84 | self.input_type = 1 85 | 86 | if backend == 0: 87 | self.capture = cv2.VideoCapture(capture_input) 88 | elif backend == 1: 89 | try: 90 | self.capture = cv2.VideoCapture(capture_input, cv2.CAP_DSHOW) 91 | except EOFError: 92 | self.capture = cv2.VideoCapture(capture_input) 93 | 94 | def set_capture_props(self, width, height): 95 | self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) 96 | self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) 97 | self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 1) 98 | 99 | def __del__(self): 100 | logging.debug("DEL STREAM") 101 | self.capture.release() 102 | cv2.destroyAllWindows() 103 | 104 | 105 | def main(): 106 | stream = Stream(0) 107 | while stream.capture.isOpened(): 108 | stream.update() 109 | stream.set_color_space('rgb') 110 | stream.set_color_space('bgr') 111 | stream.draw() 112 | if cv2.waitKey(1) & 0xFF == ord('q'): 113 | break 114 | del stream 115 | 116 | 117 | if __name__ == "__main__": 118 | main() 119 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_core/mp_detector_node.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from mediapipe import solutions 4 | from abc import abstractmethod 5 | 6 | from . import cv_stream 7 | from ...cgt_core.cgt_patterns import cgt_nodes 8 | 9 | 10 | class DetectorNode(cgt_nodes.InputNode): 11 | stream: cv_stream.Stream = None 12 | solution = None 13 | 14 | def __init__(self, stream: cv_stream.Stream = None): 15 | self.stream = stream 16 | self.drawing_utils = solutions.drawing_utils 17 | self.drawing_style = solutions.drawing_styles 18 | 19 | @abstractmethod 20 | def update(self, *args): 21 | pass 22 | 23 | @abstractmethod 24 | def contains_features(self, mp_res): 25 | pass 26 | 27 | @abstractmethod 28 | def draw_result(self, s, mp_res, mp_drawings): 29 | pass 30 | 31 | @abstractmethod 32 | def empty_data(self): 33 | pass 34 | 35 | @abstractmethod 36 | def detected_data(self, mp_res): 37 | pass 38 | 39 | def exec_detection(self, mp_lib): 40 | """ Runs mediapipe detection on frame: 41 | -> detected_data: Detection Results. 42 | -> empty_data: No features detected. 43 | -> None: EOF or Finish. """ 44 | self.stream.update() 45 | updated = self.stream.updated 46 | 47 | if not updated and self.stream.input_type == 0: 48 | # ignore if an update fails while stream detection 49 | return self.empty_data() 50 | 51 | elif not updated and self.stream.input_type == 1: 52 | # stop detection if update fails while movie detection 53 | return None 54 | 55 | if self.stream.frame is None: 56 | # ignore frame if not available 57 | return self.empty_data() 58 | 59 | # detect features in frame 60 | self.stream.frame.flags.writeable = False 61 | self.stream.set_color_space('rgb') 62 | mp_res = mp_lib.process(self.stream.frame) 63 | self.stream.set_color_space('bgr') 64 | 65 | # proceed if contains features 66 | if not self.contains_features(mp_res): 67 | self.stream.draw() 68 | if self.stream.exit_stream(): 69 | return None 70 | return self.empty_data() 71 | 72 | # draw results 73 | self.draw_result(self.stream, mp_res, self.drawing_utils) 74 | self.stream.draw() 75 | 76 | # exit stream 77 | if self.stream.exit_stream(): 78 | return None 79 | 80 | return self.detected_data(mp_res) 81 | 82 | def cvt2landmark_array(self, landmark_list): 83 | """landmark_list: A normalized landmark list proto message to be annotated on the image.""" 84 | return [[idx, [landmark.x, landmark.y, landmark.z]] for idx, landmark in enumerate(landmark_list.landmark)] 85 | 86 | def __del__(self): 87 | if self.stream is not None: 88 | del self.stream 89 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_core/mp_face_detector.py: -------------------------------------------------------------------------------- 1 | import mediapipe as mp 2 | from .mp_detector_node import DetectorNode 3 | from typing import Mapping, Tuple 4 | from mediapipe.python.solutions import face_mesh_connections 5 | from mediapipe.python.solutions.drawing_utils import DrawingSpec 6 | 7 | 8 | class FaceDetector(DetectorNode): 9 | def __init__(self, stream, refine_face_landmarks: bool = False, min_detection_confidence: float = 0.7): 10 | DetectorNode.__init__(self, stream) 11 | self.solution = mp.solutions.face_mesh 12 | self.refine_face_landmarks = refine_face_landmarks 13 | self.min_detection_confidence = min_detection_confidence 14 | 15 | def update(self, data, frame): 16 | with self.solution.FaceMesh( 17 | max_num_faces=1, 18 | static_image_mode=False, 19 | refine_landmarks=self.refine_face_landmarks, 20 | min_detection_confidence=self.min_detection_confidence) as mp_lib: 21 | return self.exec_detection(mp_lib), frame 22 | 23 | def empty_data(self): 24 | return [[[]]] 25 | 26 | def detected_data(self, mp_res): 27 | return [self.cvt2landmark_array(landmark) for landmark in mp_res.multi_face_landmarks] 28 | 29 | def contains_features(self, mp_res): 30 | if not mp_res.multi_face_landmarks: 31 | return False 32 | return True 33 | 34 | @staticmethod 35 | def get_custom_face_mesh_contours_style() -> Mapping[Tuple[int, int], DrawingSpec]: 36 | _THICKNESS_CONTOURS = 2 37 | 38 | _RED = (48, 48, 255) 39 | _WHITE = (224, 224, 224) 40 | 41 | _FACEMESH_CONTOURS_CONNECTION_STYLE = { 42 | face_mesh_connections.FACEMESH_LIPS: 43 | DrawingSpec(color=_WHITE, thickness=_THICKNESS_CONTOURS), 44 | face_mesh_connections.FACEMESH_LEFT_EYE: 45 | DrawingSpec(color=_WHITE, thickness=_THICKNESS_CONTOURS), 46 | face_mesh_connections.FACEMESH_LEFT_EYEBROW: 47 | DrawingSpec(color=_WHITE, thickness=_THICKNESS_CONTOURS), 48 | face_mesh_connections.FACEMESH_RIGHT_EYE: 49 | DrawingSpec(color=_WHITE, thickness=_THICKNESS_CONTOURS), 50 | face_mesh_connections.FACEMESH_RIGHT_EYEBROW: 51 | DrawingSpec(color=_WHITE, thickness=_THICKNESS_CONTOURS), 52 | face_mesh_connections.FACEMESH_FACE_OVAL: 53 | DrawingSpec(color=_WHITE, thickness=_THICKNESS_CONTOURS), 54 | } 55 | 56 | face_mesh_contours_connection_style = {} 57 | for k, v in _FACEMESH_CONTOURS_CONNECTION_STYLE.items(): 58 | for connection in k: 59 | face_mesh_contours_connection_style[connection] = v 60 | return face_mesh_contours_connection_style 61 | 62 | def draw_result(self, s, mp_res, mp_drawings): 63 | """Draws the landmarks and the connections on the image.""" 64 | for face_landmarks in mp_res.multi_face_landmarks: 65 | self.drawing_utils.draw_landmarks( 66 | image=self.stream.frame, 67 | landmark_list=face_landmarks, 68 | connections=self.solution.FACEMESH_CONTOURS, 69 | connection_drawing_spec=self.get_custom_face_mesh_contours_style(), 70 | # connection_drawing_spec=self.drawing_style.get_default_face_mesh_contours_style(), 71 | landmark_drawing_spec=None) 72 | 73 | # image=self.stream.frame, 74 | # landmark_list=face_landmarks, 75 | # connections=self.solution.FACEMESH_IRISES, 76 | # landmark_drawing_spec=None, 77 | # connection_drawing_spec=self.drawing_style.get_default_face_mesh_iris_connections_style()) 78 | 79 | 80 | # region manual tests 81 | if __name__ == '__main__': 82 | from . import cv_stream 83 | from ...cgt_core.cgt_calculators_nodes import mp_calc_face_rot 84 | detector = FaceDetector(cv_stream.Stream(0)) 85 | calc = mp_calc_face_rot.FaceRotationCalculator() 86 | frame = 0 87 | for _ in range(50): 88 | frame += 1 89 | data, frame = detector.update(None, frame) 90 | data, frame = calc.update(data, frame) 91 | 92 | del detector 93 | # endregion 94 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_core/mp_hand_detector.py: -------------------------------------------------------------------------------- 1 | import mediapipe as mp 2 | from mediapipe.framework.formats import classification_pb2 3 | 4 | from .mp_detector_node import DetectorNode 5 | from . import cv_stream 6 | from ...cgt_core.cgt_utils import cgt_timers 7 | 8 | 9 | class HandDetector(DetectorNode): 10 | def __init__(self, stream, hand_model_complexity: int = 1, min_detection_confidence: float = .7): 11 | DetectorNode.__init__(self, stream) 12 | self.solution = mp.solutions.hands 13 | self.hand_model_complexity = hand_model_complexity 14 | self.min_detection_confidence = min_detection_confidence 15 | 16 | # https://google.github.io/mediapipe/solutions/hands#python-solution-api 17 | def update(self, data, frame): 18 | with self.solution.Hands( 19 | static_image_mode=True, 20 | max_num_hands=2, 21 | model_complexity=self.hand_model_complexity, 22 | min_detection_confidence=self.min_detection_confidence) as mp_lib: 23 | return self.exec_detection(mp_lib), frame 24 | 25 | @staticmethod 26 | def separate_hands(hand_data): 27 | left_hand = [data[0] for data in hand_data if data[1][1] is False] 28 | right_hand = [data[0] for data in hand_data if data[1][1] is True] 29 | return left_hand, right_hand 30 | 31 | @staticmethod 32 | def cvt_hand_orientation(orientation: classification_pb2): 33 | if not orientation: 34 | return None 35 | 36 | return [[idx, "Right" in str(o)] for idx, o in enumerate(orientation)] 37 | 38 | def empty_data(self): 39 | return [[], []] 40 | 41 | def detected_data(self, mp_res): 42 | data = [self.cvt2landmark_array(hand) for hand in mp_res.multi_hand_world_landmarks] 43 | left_hand_data, right_hand_data = self.separate_hands( 44 | list(zip(data, self.cvt_hand_orientation(mp_res.multi_handedness)))) 45 | return [left_hand_data, right_hand_data] 46 | 47 | def contains_features(self, mp_res): 48 | if not mp_res.multi_hand_landmarks and not mp_res.multi_handedness: 49 | return False 50 | return True 51 | 52 | def draw_result(self, s, mp_res, mp_drawings): 53 | for hand in mp_res.multi_hand_landmarks: 54 | mp_drawings.draw_landmarks(s.frame, hand, self.solution.HAND_CONNECTIONS) 55 | 56 | 57 | if __name__ == '__main__': 58 | import logging 59 | from ...cgt_core.cgt_calculators_nodes import mp_calc_hand_rot 60 | from ...cgt_core.cgt_patterns import cgt_nodes 61 | logging.getLogger().setLevel(logging.DEBUG) 62 | 63 | chain = cgt_nodes.NodeChain() 64 | 65 | # Get detector and premade chain 66 | detector = HandDetector(cv_stream.Stream(0)) 67 | calc = mp_calc_hand_rot.HandRotationCalculator() 68 | 69 | chain.append(detector) 70 | chain.append(calc) 71 | 72 | frame, data = 0, [] 73 | for _ in range(50): 74 | frame += 1 75 | data, frame = chain.update(data, frame) 76 | del detector 77 | # endregion 78 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_core/mp_holistic_detector.py: -------------------------------------------------------------------------------- 1 | import mediapipe as mp 2 | 3 | from . import cv_stream, mp_detector_node 4 | import ssl 5 | ssl._create_default_https_context = ssl._create_unverified_context 6 | 7 | 8 | class HolisticDetector(mp_detector_node.DetectorNode): 9 | def __init__(self, stream, model_complexity: int = 1, 10 | min_detection_confidence: float = .7, refine_face_landmarks: bool = False): 11 | 12 | self.solution = mp.solutions.holistic 13 | mp_detector_node.DetectorNode.__init__(self, stream) 14 | self.model_complexity = model_complexity 15 | self.min_detection_confidence = min_detection_confidence 16 | self.refine_face_landmarks = refine_face_landmarks 17 | 18 | # https://google.github.io/mediapipe/solutions/holistic#python-solution-api 19 | def update(self, data, frame): 20 | with self.solution.Holistic( 21 | refine_face_landmarks=self.refine_face_landmarks, 22 | model_complexity=self.model_complexity, 23 | min_detection_confidence=self.min_detection_confidence, 24 | static_image_mode=True, 25 | ) as mp_lib: 26 | return self.exec_detection(mp_lib), frame 27 | 28 | def empty_data(self): 29 | return [[[], []], [[[]]], []] 30 | 31 | def detected_data(self, mp_res): 32 | face, pose, l_hand, r_hand = [], [], [], [] 33 | if mp_res.pose_landmarks: 34 | pose = self.cvt2landmark_array(mp_res.pose_landmarks) 35 | if mp_res.face_landmarks: 36 | face = self.cvt2landmark_array(mp_res.face_landmarks) 37 | if mp_res.left_hand_landmarks: 38 | l_hand = [self.cvt2landmark_array(mp_res.left_hand_landmarks)] 39 | if mp_res.right_hand_landmarks: 40 | r_hand = [self.cvt2landmark_array(mp_res.right_hand_landmarks)] 41 | # TODO: recheck every update, mp hands are flipped while detecting holistic. 42 | return [[r_hand, l_hand], [face], pose] 43 | 44 | def contains_features(self, mp_res): 45 | if not mp_res.pose_landmarks: 46 | return False 47 | return True 48 | 49 | def draw_result(self, s, mp_res, mp_drawings): 50 | mp_drawings.draw_landmarks( 51 | s.frame, 52 | mp_res.face_landmarks, 53 | self.solution.FACEMESH_CONTOURS, 54 | landmark_drawing_spec=None, 55 | connection_drawing_spec=self.drawing_style 56 | .get_default_face_mesh_contours_style()) 57 | mp_drawings.draw_landmarks( 58 | s.frame, 59 | mp_res.pose_landmarks, 60 | self.solution.POSE_CONNECTIONS, 61 | landmark_drawing_spec=self.drawing_style 62 | .get_default_pose_landmarks_style()) 63 | mp_drawings.draw_landmarks( 64 | s.frame, mp_res.left_hand_landmarks, self.solution.HAND_CONNECTIONS) 65 | mp_drawings.draw_landmarks( 66 | s.frame, mp_res.right_hand_landmarks, self.solution.HAND_CONNECTIONS) 67 | 68 | 69 | if __name__ == '__main__': 70 | detection_type = "image" 71 | detector = HolisticDetector(cv_stream.Stream(0)) 72 | 73 | frame = 0 74 | for _ in range(15): 75 | frame += 1 76 | detector.update(None, frame) 77 | 78 | del detector 79 | # endregion 80 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_core/mp_pose_detector.py: -------------------------------------------------------------------------------- 1 | import mediapipe as mp 2 | 3 | from . import cv_stream, mp_detector_node 4 | 5 | 6 | class PoseDetector(mp_detector_node.DetectorNode): 7 | def __init__(self, stream, pose_model_complexity: int = 1, min_detection_confidence: float = 0.7): 8 | mp_detector_node.DetectorNode.__init__(self, stream) 9 | self.pose_model_complexity = pose_model_complexity 10 | self.min_detection_confidence = min_detection_confidence 11 | self.solution = mp.solutions.pose 12 | 13 | # https://google.github.io/mediapipe/solutions/pose#python-solution-api 14 | def update(self, data, frame): 15 | # BlazePose GHUM 3D 16 | with self.solution.Pose( 17 | static_image_mode=True, 18 | model_complexity=self.pose_model_complexity, 19 | min_detection_confidence=self.min_detection_confidence) as mp_lib: 20 | return self.exec_detection(mp_lib), frame 21 | 22 | def detected_data(self, mp_res): 23 | return self.cvt2landmark_array(mp_res.pose_world_landmarks) 24 | 25 | def empty_data(self): 26 | return [] 27 | 28 | def contains_features(self, mp_res): 29 | if not mp_res.pose_world_landmarks: 30 | return False 31 | return True 32 | 33 | def draw_result(self, s, mp_res, mp_drawings): 34 | mp_drawings.draw_landmarks( 35 | s.frame, 36 | mp_res.pose_landmarks, 37 | self.solution.POSE_CONNECTIONS, 38 | landmark_drawing_spec=self.drawing_style.get_default_pose_landmarks_style()) 39 | 40 | 41 | # region manual tests 42 | if __name__ == '__main__': 43 | from . import cv_stream 44 | from ...cgt_core.cgt_calculators_nodes import mp_calc_pose_rot 45 | detector = PoseDetector(cv_stream.Stream(0)) 46 | calc = mp_calc_pose_rot.PoseRotationCalculator() 47 | frame = 0 48 | for _ in range(50): 49 | frame += 1 50 | data, frame = detector.update(None, frame) 51 | data, frame = calc.update(data, frame) 52 | print(data) 53 | 54 | del detector 55 | # endregion 56 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_core/test.py: -------------------------------------------------------------------------------- 1 | import mediapipe as mp 2 | import mediapipe_rotations as mpr 3 | import cv2 4 | 5 | mp_drawing = mp.solutions.drawing_utils 6 | mp_drawing_styles = mp.solutions.drawing_styles 7 | mp_holistic = mp.solutions.holistic 8 | 9 | 10 | def cvt2array(landmark_list): 11 | """ converts landmark list to list. """ 12 | return [[landmark.x, landmark.y, landmark.z] for landmark in landmark_list.landmark] 13 | 14 | 15 | cap = cv2.VideoCapture(0) 16 | with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic: 17 | while cap.isOpened(): 18 | success, image = cap.read() 19 | if not success: 20 | print("Ignoring empty camera frame.") 21 | # If loading a video, use 'break' instead of 'continue'. 22 | continue 23 | 24 | # To improve performance, optionally mark the image as not writeable to 25 | # pass by reference. 26 | image.flags.writeable = False 27 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 28 | results = holistic.process(image) 29 | 30 | # Get Detection Data 31 | pose, face, l_hand, r_hand = [], [], [], [] 32 | 33 | if results.pose_landmarks: 34 | pose = cvt2array(results.pose_landmarks) 35 | pose_rotation_quaternion = mpr.pose(pose) 36 | 37 | if results.face_landmarks: 38 | face = cvt2array(results.face_landmarks) 39 | 40 | if results.left_hand_landmarks and results.right_hand_landmarks: 41 | l_hand = cvt2array(results.left_hand_landmarks) 42 | r_hand = cvt2array(results.right_hand_landmarks) 43 | elif results.left_hand_landmarks: 44 | l_hand = cvt2array(results.left_hand_landmarks) 45 | elif results.right_hand_landmarks: 46 | r_hand = cvt2array(results.right_hand_landmarks) 47 | 48 | # Calculate rotations 49 | l_hand_rotation_quaternion = mpr.hand(l_hand) 50 | r_hand_rotation_quaternion = mpr.hand(r_hand) 51 | hands_rotation_quaternion = mpr.hands([l_hand, r_hand]) 52 | face_rotation_quaternion = mpr.face(face) 53 | holistic_rotation_quaternion = mpr.holistic(pose, face, [l_hand, r_hand]) 54 | 55 | # Draw landmark annotation on the image. 56 | image.flags.writeable = True 57 | image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) 58 | mp_drawing.draw_landmarks( 59 | image, 60 | results.face_landmarks, 61 | mp_holistic.FACEMESH_CONTOURS, 62 | landmark_drawing_spec=None, 63 | connection_drawing_spec=mp_drawing_styles 64 | .get_default_face_mesh_contours_style()) 65 | 66 | mp_drawing.draw_landmarks( 67 | image, 68 | results.pose_landmarks, 69 | mp_holistic.POSE_CONNECTIONS, 70 | landmark_drawing_spec=mp_drawing_styles 71 | .get_default_pose_landmarks_style()) 72 | 73 | # Flip the image horizontally for a selfie-view display. 74 | cv2.imshow('MediaPipe Holistic', cv2.flip(image, 1)) 75 | if cv2.waitKey(5) & 0xFF == 27: 76 | break 77 | cap.release() 78 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_interface.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | from . import cgt_dependencies 4 | from ..cgt_core.cgt_interface import cgt_core_panel 5 | 6 | 7 | class CGT_PT_MP_Detection(cgt_core_panel.DefaultPanel, bpy.types.Panel): 8 | bl_label = "Mediapipe" 9 | bl_parent_id = "UI_PT_CGT_Panel" 10 | bl_idname="UI_PT_CGT_Detection" 11 | bl_options = {'DEFAULT_CLOSED'} 12 | 13 | @classmethod 14 | def poll(cls, context): 15 | return context.mode in {'OBJECT', 'POSE'} and all(cgt_dependencies.dependencies_installed) 16 | 17 | def movie_panel(self, user): 18 | layout = self.layout 19 | layout.row().prop(user, "mov_data_path") 20 | layout.row().prop(user, "key_frame_step") 21 | layout.row().prop(user, "enum_detection_type") 22 | if user.modal_active: 23 | layout.row().operator("wm.cgt_feature_detection_operator", text="Stop Detection", icon='CANCEL') 24 | else: 25 | layout.row().operator("wm.cgt_feature_detection_operator", text="Detect Clip", icon='IMPORT') 26 | 27 | def webcam_panel(self, user): 28 | layout = self.layout 29 | layout.row().prop(user, "webcam_input_device") 30 | layout.row().prop(user, "key_frame_step") 31 | layout.row().prop(user, "enum_detection_type") 32 | if user.modal_active: 33 | layout.row().operator("wm.cgt_feature_detection_operator", text="Stop Detection", icon='RADIOBUT_ON') 34 | else: 35 | layout.row().operator("wm.cgt_feature_detection_operator", text="Start Detection", icon='RADIOBUT_OFF') 36 | 37 | def draw(self, context): 38 | user = context.scene.cgtinker_mediapipe # noqa 39 | layout = self.layout 40 | layout.label(text='Detect') 41 | layout.row().prop(user, "detection_input_type") 42 | 43 | if user.detection_input_type == "movie": 44 | self.movie_panel(user) 45 | else: 46 | self.webcam_panel(user) 47 | 48 | 49 | class CGT_PT_MP_DetectorProperties(cgt_core_panel.DefaultPanel, bpy.types.Panel): 50 | bl_label = "Advanced" 51 | bl_parent_id = "UI_PT_CGT_Detection" 52 | bl_options = {'DEFAULT_CLOSED'} 53 | 54 | def draw(self, context): 55 | user = context.scene.cgtinker_mediapipe # noqa 56 | layout = self.layout 57 | 58 | if user.enum_detection_type == 'HAND': 59 | layout.row().prop(user, "hand_model_complexity") 60 | elif user.enum_detection_type == 'FACE': 61 | # layout.row().prop(user, "refine_face_landmarks") 62 | pass 63 | elif user.enum_detection_type == 'POSE': 64 | layout.row().prop(user, "pose_model_complexity") 65 | elif user.enum_detection_type == 'HOLISTIC': 66 | layout.row().prop(user, "holistic_model_complexity") 67 | 68 | layout.row().prop(user, "min_detection_confidence", slider=True) 69 | 70 | 71 | class CGT_PT_MP_Warning(cgt_core_panel.DefaultPanel, bpy.types.Panel): 72 | bl_label = "Mediapipe" 73 | bl_parent_id = "UI_PT_CGT_Panel" 74 | bl_options = {'DEFAULT_CLOSED'} 75 | bl_idname="UI_PT_CGT_Detection_Warning" 76 | 77 | @classmethod 78 | def poll(cls, context): 79 | return not all(cgt_dependencies.dependencies_installed) 80 | 81 | def draw(self, context): 82 | layout = self.layout 83 | 84 | lines = [f"Please install the missing dependencies for BlendArMocap.", 85 | f"1. Open the preferences (Edit > Preferences > Add-ons).", 86 | f"2. Search for the BlendArMocap add-on.", 87 | f"3. Open the details section of the add-on.", 88 | f"4. Click on the 'install dependencies' button."] 89 | 90 | for line in lines: 91 | layout.label(text=line) 92 | 93 | 94 | classes = [ 95 | CGT_PT_MP_Warning, 96 | CGT_PT_MP_Detection, 97 | CGT_PT_MP_DetectorProperties 98 | ] 99 | 100 | 101 | def register(): 102 | for cls in classes: 103 | bpy.utils.register_class(cls) 104 | 105 | 106 | def unregister(): 107 | for cls in reversed(classes): 108 | bpy.utils.unregister_class(cls) 109 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_preferences.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import bpy 3 | 4 | from ..cgt_core.cgt_interface import cgt_core_panel 5 | from ..cgt_mediapipe import cgt_dependencies 6 | from ..cgt_core.cgt_utils import cgt_user_prefs 7 | 8 | 9 | class PREFERENCES_OT_CGT_install_dependencies_button(bpy.types.Operator): 10 | bl_idname = "button.cgt_install_dependencies" 11 | bl_label = "Install dependencies" 12 | bl_description = ("Downloads and installs the required python packages for this add-on. " 13 | "Internet connection is required. Blender may have to be started with " 14 | "elevated permissions in order to install the package") 15 | bl_options = {"REGISTER", "INTERNAL"} 16 | 17 | @classmethod 18 | def poll(self, context): 19 | return not all(cgt_dependencies.dependencies_installed) 20 | 21 | def execute(self, context): 22 | try: 23 | cgt_dependencies.ensure_pip(self) 24 | for i, dependency in enumerate(cgt_dependencies.required_dependencies): 25 | if cgt_dependencies.dependencies_installed[i]: 26 | continue 27 | user = context.scene.cgtinker_mediapipe # noqa 28 | success = cgt_dependencies.install_dependency(self, dependency, user.local_user) 29 | cgt_dependencies.dependencies_installed[i] = success 30 | 31 | except (subprocess.CalledProcessError, ImportError) as err: 32 | self.report({"ERROR"}, str(err)) 33 | return {"CANCELLED"} 34 | return {"FINISHED"} 35 | 36 | 37 | class PREFERENCES_OT_CGT_uninstall_dependencies_button(bpy.types.Operator): 38 | bl_idname = "button.cgt_uninstall_dependencies" 39 | bl_label = "Uninstall dependencies and shutdown" 40 | bl_description = "Removes installed dependencies from site-packages" \ 41 | "and deletes them on start up." 42 | bl_options = {"REGISTER", "INTERNAL"} 43 | 44 | @classmethod 45 | def poll(self, context): 46 | return any(cgt_dependencies.dependencies_installed) 47 | 48 | def execute(self, context): 49 | for i, dependency in enumerate(cgt_dependencies.required_dependencies): 50 | if not cgt_dependencies.is_installed(dependency): 51 | continue 52 | success = cgt_dependencies.uninstall_dependency(self, dependency) 53 | cgt_dependencies.dependencies_installed[i] = success 54 | 55 | import time 56 | time.sleep(1) 57 | bpy.ops.wm.quit_blender() 58 | return {"FINISHED"} 59 | 60 | 61 | class PREFERENCES_OT_CGT_save_preferences(bpy.types.Operator): 62 | bl_idname = "button.cgt_save_preferences" 63 | bl_label = "Save Preferences" 64 | bl_description = "Save BlendArMocaps User Preferences" 65 | bl_options = {"REGISTER", "INTERNAL"} 66 | 67 | def execute(self, context): 68 | from .cgt_mp_registration import MP_ATTRS 69 | user = bpy.context.scene.cgtinker_mediapipe # noqa 70 | cgt_user_prefs.set_prefs(**{attr: getattr(user, attr, default) for attr, default in MP_ATTRS.items()}) 71 | self.report({'INFO'}, "Saved user preferences.") 72 | return {"FINISHED"} 73 | 74 | 75 | def draw(self, context): 76 | 77 | """ Dependency layout for user preferences. """ 78 | layout = self.layout 79 | user = context.scene.cgtinker_mediapipe # noqa 80 | 81 | # dependency box 82 | dependency_box = layout.box() 83 | dependency_box.label(text="Mediapipe Dependencies") 84 | 85 | def draw_dependency(dependency, dependency_box): 86 | """ Draws package name, version, path and if a dependency has been installed. """ 87 | _d_box = dependency_box.box() 88 | box_split = _d_box.split() 89 | cols = [box_split.column(align=False) for _ in range(4)] 90 | cols[3].label(text=f"{cgt_dependencies.is_installed(dependency)}") 91 | if not cgt_dependencies.is_installed(dependency): 92 | cols[0].label(text=f"{dependency.name}") 93 | cols[1].label(text=f"NaN") 94 | cols[2].label(text=f"NaN") 95 | 96 | else: 97 | version, path = cgt_dependencies.get_package_info(dependency) 98 | cols[0].label(text=f"{dependency.name}") 99 | cols[1].label(text=f"{version}") 100 | cols[2].label(text=f"{path}") 101 | 102 | # pip headers 103 | pip_headers = dependency_box.split() 104 | for name in ["Installer", "Version", "Path", "Installed"]: 105 | col = pip_headers.column() 106 | col.label(text=name) 107 | 108 | # draw dependencies individually 109 | draw_dependency(cgt_dependencies.Dependency("pip", "pip", "pip", None), dependency_box) 110 | dependency_box.row().separator() 111 | 112 | dependency_header = dependency_box.row() 113 | dependency_header.label(text="Dependencies") 114 | 115 | # dependency headers 116 | headers = dependency_box.split() 117 | for name in ["Module", "Version", "Path", "Installed"]: 118 | col = headers.column() 119 | col.label(text=name) 120 | for m_dependency in cgt_dependencies.required_dependencies: 121 | draw_dependency(m_dependency, dependency_box) 122 | 123 | # user settings 124 | dependency_box.row().separator() 125 | settings_box = layout.box() 126 | if all(cgt_dependencies.dependencies_installed): 127 | # cam settings 128 | settings_box.label(text="Camera Settings") 129 | settings_box.row().prop(user, "enum_stream_dim") 130 | settings_box.row().prop(user, "enum_stream_type") 131 | settings_box.row().separator() 132 | settings_box.label(text="Dependency Settings") 133 | else: 134 | # install dependencies button 135 | settings_box.label(text="Dependency Settings") 136 | settings_box.row().label(text="Make sure to have elevated privileges.") 137 | settings_box.row().operator(PREFERENCES_OT_CGT_install_dependencies_button.bl_idname, icon="CONSOLE") 138 | deps_col = settings_box.row() 139 | deps_col.row(align=True).prop(user, "local_user") 140 | deps_col.row().operator(PREFERENCES_OT_CGT_save_preferences.bl_idname) 141 | 142 | 143 | classes = [ 144 | PREFERENCES_OT_CGT_save_preferences, 145 | PREFERENCES_OT_CGT_install_dependencies_button, 146 | PREFERENCES_OT_CGT_uninstall_dependencies_button 147 | ] 148 | 149 | 150 | def register(): 151 | for cls in classes: 152 | bpy.utils.register_class(cls) 153 | cgt_core_panel.addon_prefs.add(draw) 154 | 155 | 156 | def unregister(): 157 | for cls in reversed(classes): 158 | bpy.utils.unregister_class(cls) 159 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_properties.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | 4 | class MP_PG_Properties(bpy.types.PropertyGroup): 5 | # region mediapipe props 6 | enum_detection_type: bpy.props.EnumProperty( 7 | name="Target", 8 | description="Select detection type tracking.", 9 | items=( 10 | ("HAND", "Hands", ""), 11 | ("FACE", "Face", ""), 12 | ("POSE", "Pose", ""), 13 | ("HOLISTIC", "Holistic", ""), 14 | ) 15 | ) 16 | 17 | refine_face_landmarks: bpy.props.BoolProperty( 18 | name="Refine Face Landmarks", default=False, 19 | description="Whether to further refine the landmark coordinates " 20 | "around the eyes and lips, and output additional landmarks " 21 | "around the irises by applying the Attention Mesh Model. " 22 | "Default to false.") 23 | 24 | # downloading during session seem inappropriate (therefor max 1) 25 | holistic_model_complexity: bpy.props.IntProperty( 26 | name="Model Complexity", default=1, min=0, max=1, 27 | description="Complexity of the pose landmark model: " 28 | "0, 1 or 1. Landmark accuracy as well as inference " 29 | "latency generally go up with the model complexity. " 30 | "Default to 1.") 31 | 32 | # downloading during session seem inappropriate (therefor max 1) 33 | pose_model_complexity: bpy.props.IntProperty( 34 | name="Model Complexity", default=1, min=0, max=1, 35 | description="Complexity of the pose landmark model: " 36 | "0, 1 or 1. Landmark accuracy as well as inference " 37 | "latency generally go up with the model complexity. " 38 | "Default to 1.") 39 | 40 | hand_model_complexity: bpy.props.IntProperty( 41 | name="Model Complexity", default=1, min=0, max=1, 42 | description="Complexity of the hand landmark model: " 43 | "0 or 1. Landmark accuracy as well as inference " 44 | "latency generally go up with the model complexity. " 45 | "Default to 1.") 46 | 47 | min_detection_confidence: bpy.props.FloatProperty( 48 | name="Min Tracking Confidence", default=0.5, min=0.0, max=1.0, 49 | description="Minimum confidence value ([0.0, 1.0]) from the detection " 50 | "model for the detection to be considered successful. Default to 0.5.") 51 | # endregion 52 | 53 | # region stream props 54 | mov_data_path: bpy.props.StringProperty( 55 | name="File Path", 56 | description="File path to .mov file.", 57 | default='*.mov;*mp4', 58 | options={'HIDDEN'}, 59 | maxlen=1024, 60 | subtype='FILE_PATH' 61 | ) 62 | 63 | enum_stream_type: bpy.props.EnumProperty( 64 | name="Stream Backend", 65 | description="Sets Stream backend.", 66 | items=( 67 | ("0", "default", ""), 68 | ("1", "capdshow", "") 69 | ) 70 | ) 71 | 72 | enum_stream_dim: bpy.props.EnumProperty( 73 | name="Stream Dimensions", 74 | description="Dimensions for video Stream input.", 75 | items=( 76 | ("sd", "720x480 - recommended", ""), 77 | ("hd", "1240x720 - experimental", ""), 78 | ("fhd", "1920x1080 - experimental", ""), 79 | ) 80 | ) 81 | 82 | detection_input_type: bpy.props.EnumProperty( 83 | name="Type", 84 | description="Select input type.", 85 | items=( 86 | ("movie", "Movie", ""), 87 | ("stream", "Webcam", ""), 88 | ) 89 | ) 90 | 91 | webcam_input_device: bpy.props.IntProperty( 92 | name="Webcam Device Slot", 93 | description="Select Webcam device.", 94 | min=0, 95 | max=4, 96 | default=0 97 | ) 98 | 99 | key_frame_step: bpy.props.IntProperty( 100 | name="Key Step", 101 | description="Select keyframe step rate.", 102 | min=1, 103 | max=12, 104 | default=4 105 | ) 106 | # endregion 107 | 108 | modal_active: bpy.props.BoolProperty( 109 | name="modal_active", 110 | description="Check if operator is running", 111 | default=False 112 | ) 113 | 114 | local_user: bpy.props.BoolProperty( 115 | name="Local user", 116 | description="Install to local user and not to blenders python site packages.", 117 | default=False, 118 | ) 119 | 120 | 121 | classes = [ 122 | MP_PG_Properties, 123 | ] 124 | 125 | 126 | def register(): 127 | for cls in classes: 128 | bpy.utils.register_class(cls) 129 | bpy.types.Scene.cgtinker_mediapipe = bpy.props.PointerProperty(type=MP_PG_Properties) 130 | 131 | 132 | def unregister(): 133 | for cls in reversed(classes): 134 | bpy.utils.unregister_class(cls) 135 | del bpy.types.Scene.cgtinker_mediapipe 136 | -------------------------------------------------------------------------------- /src/cgt_mediapipe/cgt_mp_registration.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import bpy 3 | from ..cgt_core.cgt_utils import cgt_user_prefs 4 | from . import cgt_mp_interface, cgt_mp_preferences, cgt_mp_detection_operator, cgt_mp_properties 5 | 6 | 7 | classes = [ 8 | cgt_mp_properties, 9 | cgt_mp_detection_operator, 10 | cgt_mp_interface, 11 | cgt_mp_preferences 12 | ] 13 | 14 | MP_ATTRS = { 15 | "local_user": False, 16 | "key_frame_step": 4, 17 | "webcam_input_device": 0, 18 | "detection_input_type": "movie", 19 | "enum_detection_type": "HAND", 20 | "enum_stream_dim": "sd", 21 | "enum_stream_type": "0", 22 | "min_detection_confidence": 0.5, 23 | "hand_model_complexity": 1, 24 | "pose_model_complexity": 1, 25 | "holistic_model_complexity": 1, 26 | "refine_face_landmarks": False 27 | } 28 | 29 | 30 | @bpy.app.handlers.persistent 31 | def save_preferences(*args): 32 | user = bpy.context.scene.cgtinker_mediapipe # noqa 33 | cgt_user_prefs.set_prefs(**{attr: getattr(user, attr, default) for attr, default in MP_ATTRS.items()}) 34 | 35 | 36 | @bpy.app.handlers.persistent 37 | def load_preferences(*args): 38 | stored_preferences = cgt_user_prefs.get_prefs(**MP_ATTRS) 39 | user = bpy.context.scene.cgtinker_mediapipe # noqa 40 | for property_name, value in stored_preferences.items(): 41 | if not hasattr(user, property_name): 42 | logging.warning(f"{property_name} - not available.") 43 | setattr(user, property_name, value) 44 | 45 | 46 | def register(): 47 | for cls in classes: 48 | if cls is None: 49 | continue 50 | cls.register() 51 | 52 | bpy.app.handlers.save_pre.append(save_preferences) 53 | bpy.app.handlers.load_post.append(load_preferences) 54 | 55 | 56 | def unregister(): 57 | for cls in reversed(classes): 58 | if cls is None: 59 | continue 60 | cls.unregister() 61 | 62 | -------------------------------------------------------------------------------- /src/cgt_registration.py: -------------------------------------------------------------------------------- 1 | # BlendArMocap is split into separated modules which may access cgt_core. 2 | # Every module has to be registered to be active. 3 | 4 | from .cgt_core.cgt_interface import cgt_core_registration 5 | from .cgt_mediapipe import cgt_mp_registration 6 | from .cgt_transfer import cgt_transfer_registration 7 | # from .cgt_socket_ipc import cgt_ipc_registration 8 | from .cgt_freemocap import fm_registration 9 | 10 | 11 | modules = [ 12 | # cgt_ipc_registration, 13 | cgt_core_registration, 14 | cgt_mp_registration, 15 | fm_registration, 16 | cgt_transfer_registration, 17 | ] 18 | 19 | 20 | def register(): 21 | for module in modules: 22 | module.register() 23 | 24 | 25 | def unregister(): 26 | for module in modules: 27 | module.unregister() 28 | -------------------------------------------------------------------------------- /src/cgt_socket_ipc/__init__.py: -------------------------------------------------------------------------------- 1 | from .BlendPyNet.b3dnet.src.b3dnet.connection import CACHE 2 | from . import cgt_ipc_persistent_fns as fns 3 | 4 | 5 | init_fns = [ 6 | (fns.process_holisitic, fns.HOLI_FN_ID), 7 | (fns.process_face, fns.FACE_FN_ID), 8 | (fns.process_hand, fns.HAND_FN_ID), 9 | (fns.process_pose, fns.POSE_FN_ID) 10 | ] 11 | 12 | for _fn, _id in init_fns: 13 | CACHE[_id] = _fn 14 | -------------------------------------------------------------------------------- /src/cgt_socket_ipc/cgt_ipc_persistent_fns.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from ..cgt_core.cgt_core_chains import ( 3 | FaceNodeChain, PoseNodeChain, HandNodeChain, HolisticNodeChainGroup 4 | ) 5 | from .BlendPyNet.b3dnet.src.b3dnet.connection import CACHE 6 | 7 | 8 | # dict slots for mediapipe result processing 9 | POSE_CHAIN_ID = "_CGT_LOCAL_CHAIN_POSE" 10 | FACE_CHAIN_ID = "_CGT_LOCAL_CHAIN_FACE" 11 | HOLI_CHAIN_ID = "_CGT_LOCAL_CHAIN_HOLISTIC" 12 | HAND_CHAIN_ID = "_CGT_LOCAL_CHAIN_HAND" 13 | 14 | # persistent fns which only get removed when activly 15 | # CLEAR_CACHE gets called 16 | POSE_FN_ID = "PERSISTENT_FN_POSE" 17 | FACE_FN_ID = "PERSISTENT_FN_FACE" 18 | HAND_FN_ID = "PERSISTENT_FN_HAND" 19 | HOLI_FN_ID = "PERSISTENT_FN_HOLISTIC" 20 | 21 | 22 | def process_holisitic(data: Optional[list], frame: int): 23 | # Input -> data: List[List[pose], List[face], List[l_hand], List[r_hand]], int 24 | if not data: 25 | return False 26 | 27 | if CACHE.get(HOLI_CHAIN_ID) is None: 28 | CACHE[HOLI_CHAIN_ID] = HolisticNodeChainGroup() 29 | 30 | data = [[[i, p] for i, p in enumerate(model)] for model in data] 31 | pose, face, lhand, rhand = data 32 | hands = [[lhand], [rhand]] 33 | CACHE[HOLI_CHAIN_ID].update([hands, [face], pose], frame) 34 | return True 35 | 36 | 37 | def process_pose(data: Optional[list], frame: int): 38 | # Input: List[List[float, float, float]], int 39 | if not data: 40 | return False 41 | 42 | if CACHE.get(POSE_CHAIN_ID) is None: 43 | CACHE[POSE_CHAIN_ID] = PoseNodeChain() 44 | 45 | data = [[i, p] for i, p in enumerate(data)] 46 | CACHE[POSE_CHAIN_ID].update(data, frame) 47 | return True 48 | 49 | 50 | def process_hand(data: Optional[list], frame: int): 51 | # Input: List[List[float, float, float], List[float, float, float]], int 52 | if not data: 53 | return False 54 | 55 | if CACHE.get(HAND_CHAIN_ID) is None: 56 | CACHE[HAND_CHAIN_ID] = HandNodeChain() 57 | 58 | data = [[[[i, p] for i, p in enumerate(hand)]] for hand in data] 59 | CACHE[HAND_CHAIN_ID].update(data, frame) 60 | return True 61 | 62 | 63 | def process_face(data: Optional[list], frame: int): 64 | # Input: List[List[float, float, float]], int 65 | if not data: 66 | return False 67 | 68 | if CACHE.get(FACE_CHAIN_ID) is None: 69 | CACHE[FACE_CHAIN_ID] = FaceNodeChain() 70 | 71 | data = [[i, p] for i, p in enumerate(data)] 72 | CACHE[FACE_CHAIN_ID].update([data], frame) 73 | return True 74 | -------------------------------------------------------------------------------- /src/cgt_socket_ipc/cgt_ipc_registration.py: -------------------------------------------------------------------------------- 1 | from . import BlendPyNet 2 | import addon_utils # type: ignore 3 | 4 | 5 | def register(): 6 | if addon_utils.check('BlendPySock') == (False, False): 7 | BlendPyNet.register() 8 | 9 | 10 | def unregister(): 11 | if addon_utils.check('BlendPySock') == (False, False): 12 | BlendPyNet.unregister() 13 | -------------------------------------------------------------------------------- /src/cgt_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_tests/__init__.py -------------------------------------------------------------------------------- /src/cgt_tests/test_cgt_json.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from src.cgt_core.cgt_utils.cgt_json import * 3 | 4 | 5 | class TestJson(unittest.TestCase): 6 | def test_json_dict(self): 7 | path = "data/json_dict.json" 8 | data = JsonData(path=path) 9 | data.save(path) 10 | 11 | def test_json_list(self): 12 | path = "data/json_list.json" 13 | data = JsonData(path=path) 14 | data.save(path) 15 | 16 | 17 | if __name__ == '__main__': 18 | unittest.main() 19 | -------------------------------------------------------------------------------- /src/cgt_transfer/README.md: -------------------------------------------------------------------------------- 1 | # Transfer 2 | 3 | 4 | ### Concept 5 | **Object Properties**
6 | Object stores mapping instructions as object properties. 7 | Based on the properties, a driver object gets generated. 8 | The object may also have constraints, which are getting applied to the target to copy values from the driver. 9 | 10 | ```` 11 | mapping_object: object with instructions and constraints 12 | driver_object: generated driver based on instructions 13 | target_object: copies values from driver_object via constraints 14 | ```` 15 | 16 | ### Setup Helper 17 | There are some setup helper scripts to generated mapping configs with less effort. 18 | Just copy the raw file into blenders scripting space and modify it. 19 | 20 | ### Specific Info 21 | **Transfer Management**
22 | Mapping takes place here, at first, gathers information about the passed object properties. 23 | Based on the instructions, drivers objects are getting populated. 24 | Afterwards, constraints based on the input object constrains are getting added to the target object to copy values from the driver. 25 | 26 | **Object Properties**
27 | Mapping instructions are getting stored on objects. 28 | Check the `cgt_tf_object_properties` to get some more information about the properties. 29 | As object properties are stored in the active scene, it's not required to autosave. 30 | 31 | **Reflect Properties**
32 | All instruction from the mapping object and also the drivers from the object can be gathered to generate drivers and populate constraints. 33 | Runtime reflection of registered classes sadly support stops at `Blender 3.0+`. Check `cgt_tf_object_properties` and `core_transfer.tf_reflect_object_properties`. 34 | 35 | **Driver Generation**
36 | Based on properties, new driver objects are getting generated in `tf_set_object_properties`. 37 | For understanding Driver setup in blender check `cgt_core.cgt_bpy.cgt_drivers` 38 | 39 | **Saving and Loading properties**
40 | The object properties and object constraints can be stored in and loaded from .json files, check the `data folder`. 41 | -------------------------------------------------------------------------------- /src/cgt_transfer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_transfer/__init__.py -------------------------------------------------------------------------------- /src/cgt_transfer/cgt_tf_3dview_panel.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from pathlib import Path 3 | from bpy.types import Panel 4 | from ..cgt_core.cgt_interface import cgt_core_panel 5 | from bpy.props import PointerProperty 6 | 7 | 8 | class CgtRigifyTransferProperties(bpy.types.PropertyGroup): 9 | advanced_features: bpy.props.BoolProperty(default=True) 10 | save_object_properties_bool: bpy.props.BoolProperty( 11 | default=False, 12 | description="Save data for transferring animation data (located in the object constraint properties) toggle." 13 | ) 14 | delete_object_properties_bool: bpy.props.BoolProperty( 15 | default=False, 16 | description="Delete a configuration file toggle." 17 | ) 18 | save_object_properties_name: bpy.props.StringProperty( 19 | default="", 20 | description="Insert name for the new mocap transfer configuration file." 21 | ) 22 | 23 | def is_armature(self, object): 24 | if object.type == 'ARMATURE': 25 | return True 26 | return False 27 | 28 | selected_rig: bpy.props.PointerProperty( 29 | type=bpy.types.Object, 30 | description="Select an armature for animation transfer.", 31 | name="Armature", 32 | poll=is_armature) 33 | 34 | def json_files(self, context): 35 | path = Path(__file__).parent / 'data' 36 | 37 | files = [x for x in path.glob('**/*') if x.is_file()] 38 | if len(files) == 0: 39 | return [('None', 'None', "")] 40 | return [(str(x.name)[:-5], str(x.name)[:-5], "") for x in files] 41 | 42 | transfer_types: bpy.props.EnumProperty( 43 | name="Target Type", 44 | items=json_files 45 | ) 46 | 47 | def cgt_collection_poll(self, col): 48 | return col.name.startswith('cgt_') 49 | 50 | selected_driver_collection: bpy.props.PointerProperty( 51 | name="", 52 | type=bpy.types.Collection, 53 | description="Select a collection of Divers.", 54 | poll=cgt_collection_poll 55 | ) 56 | 57 | 58 | class PT_CGT_Main_Transfer(cgt_core_panel.DefaultPanel, Panel): 59 | bl_label = "Transfer" 60 | bl_parent_id = "UI_PT_CGT_Panel" 61 | bl_idname = "UI_PT_Transfer_Panel" 62 | bl_options = {'DEFAULT_CLOSED'} 63 | 64 | @classmethod 65 | def poll(cls, context): 66 | if context.mode in {'OBJECT', 'POSE'}: 67 | return True 68 | 69 | def draw(self, context): 70 | user = getattr(context.scene, "cgtinker_transfer") 71 | layout = self.layout 72 | 73 | row = layout.row(align=True) 74 | row.prop_search(data=user, property="selected_rig", search_data=bpy.data, 75 | search_property="objects", text="Armature", icon="ARMATURE_DATA") 76 | row.label(icon='BLANK1') 77 | 78 | row = layout.row(align=True) 79 | row.prop_search(data=user, property="selected_driver_collection", search_data=bpy.data, 80 | search_property="collections", text="Drivers") 81 | row.label(icon='BLANK1') 82 | 83 | row = layout.row(align=True) 84 | row.prop(user, "transfer_types", text="Transfer Type") 85 | 86 | if not user.advanced_features: 87 | row.label(icon='BLANK1') 88 | row = layout.row(align=True) 89 | row.use_property_decorate = False 90 | row.operator("button.cgt_object_apply_properties", 91 | text="Transfer Animation", icon="DRIVER") 92 | return 93 | 94 | row.prop(user, "delete_object_properties_bool", text="", icon='TRASH') 95 | 96 | # flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=True, align=True) 97 | # col = flow.column(align=True) 98 | row = layout.row(align=True) 99 | col = row.column(align=True) 100 | 101 | if user.delete_object_properties_bool: 102 | row = col.row(align=True) 103 | row.use_property_decorate = False 104 | row.label(text="Deletion is permanent. Proceed?") 105 | row.operator("button.cgt_object_delete_properties", 106 | text="", icon='CHECKMARK') 107 | row.prop(user, "delete_object_properties_bool", 108 | text="", icon='CANCEL', invert_checkbox=True) 109 | col.separator() 110 | 111 | row = col.row(align=True) 112 | row.use_property_decorate = False 113 | sub = row.row(align=True) 114 | sub.operator("button.cgt_object_load_properties", 115 | text="Load", icon='FILE_TICK') 116 | sub.prop(user, "save_object_properties_bool", 117 | text="Save Config", icon='FILE_NEW') 118 | 119 | if user.save_object_properties_bool: 120 | row = col.row(align=True) 121 | row.use_property_decorate = False 122 | row.prop(user, "save_object_properties_name", text="") 123 | row.operator("button.cgt_object_save_properties", 124 | text="", icon='CHECKMARK') 125 | row.prop(user, "save_object_properties_bool", text="", 126 | toggle=True, icon='CANCEL', invert_checkbox=True) 127 | 128 | row = col.row(align=True) 129 | row.use_property_decorate = False 130 | row.operator("button.cgt_object_apply_properties", 131 | text="Transfer Animation", icon="DRIVER") 132 | 133 | 134 | class PT_CGT_Advanced_Transfer(cgt_core_panel.DefaultPanel, Panel): 135 | bl_label = "Advanced" 136 | bl_parent_id = "UI_PT_Transfer_Panel" 137 | bl_idname = "UI_PT_CGT_Transfer_Tools" 138 | 139 | def draw(self, context): 140 | user = getattr(context.scene, "cgtinker_transfer") 141 | layout = self.layout 142 | row = layout.row(align=True) 143 | row.prop(user, "advanced_features", 144 | text="Use Advanced Features", toggle=True) 145 | 146 | 147 | classes = [ 148 | PT_CGT_Main_Transfer, 149 | CgtRigifyTransferProperties, 150 | # PT_CGT_Advanced_Transfer, 151 | ] 152 | 153 | 154 | def register(): 155 | for cls in classes: 156 | bpy.utils.register_class(cls) 157 | bpy.types.Scene.cgtinker_transfer = PointerProperty( 158 | type=CgtRigifyTransferProperties) 159 | 160 | 161 | def unregister(): 162 | for cls in reversed(classes): 163 | bpy.utils.unregister_class(cls) 164 | del bpy.types.Scene.cgtinker_transfer 165 | -------------------------------------------------------------------------------- /src/cgt_transfer/cgt_tf_io_config.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | from pathlib import Path 3 | import shutil 4 | import logging 5 | 6 | 7 | class OT_CGT_Import_Transfer_Config(bpy.types.Operator): 8 | """ Imports transfer configuration file.""" 9 | bl_idname = "wm.import_rig_transfer_config" 10 | bl_label = "Import BlendArMocap Transfer Configuration" 11 | bl_options = {'REGISTER'} 12 | 13 | filename_ext = ".json" 14 | filter_glob: bpy.props.StringProperty(default="*.json;", options={'HIDDEN'}, ) 15 | filepath: bpy.props.StringProperty(maxlen=1024, subtype='FILE_PATH', default="*.json;", 16 | options={'HIDDEN', 'SKIP_SAVE'}) 17 | 18 | def draw(self, context): 19 | layout = self.layout 20 | 21 | def invoke(self, context, event): 22 | context.window_manager.fileselect_add(self) 23 | return {'RUNNING_MODAL'} 24 | 25 | def execute(self, context): 26 | def current_files(): 27 | path = Path(__file__).parent / 'data' 28 | files = [x for x in path.glob('**/*') if x.is_file()] 29 | if len(files) == 0: 30 | return ["None"] 31 | return [str(x.name) for x in files] 32 | 33 | from_path = Path(self.filepath) 34 | name = from_path.name 35 | 36 | if name in current_files(): 37 | self.report({'ERROR'}, "Overwriting is not supported. " 38 | "Consider to change the configuration filename. ") 39 | return {'CANCELLED'} 40 | 41 | to_path = Path(__file__).parent / "data" / name 42 | shutil.copy(str(from_path), str(to_path)) 43 | self.report({'INFO'}, f"Import Configuration from {self.filepath} to {str(to_path)}") 44 | return {'FINISHED'} 45 | 46 | 47 | class OT_CGT_Export_Transfer_Config(bpy.types.Operator): 48 | """ Export transfer configuration file. """ 49 | bl_idname = "wm.export_rig_transfer_config" 50 | bl_label = "Export BlendArMocap Transfer Configuration" 51 | bl_options = {'REGISTER'} 52 | 53 | filename_ext = ".json" 54 | filter_glob: bpy.props.StringProperty(default="*.json;", options={'HIDDEN'}, ) 55 | filepath: bpy.props.StringProperty(maxlen=1024, subtype='DIR_PATH', options={'HIDDEN', 'SKIP_SAVE'}) 56 | directory: bpy.props.StringProperty(maxlen=1024, subtype='DIR_PATH', options={'HIDDEN', 'SKIP_SAVE'}) 57 | 58 | def draw(self, context): 59 | layout = self.layout 60 | user = context.scene.cgtinker_transfer # noqa 61 | row = layout.row(align=True) 62 | row.prop(user, "transfer_types", text="Export Configuration") 63 | 64 | def invoke(self, context, event): 65 | context.window_manager.fileselect_add(self) 66 | return {'RUNNING_MODAL'} 67 | 68 | def execute(self, context): 69 | user = context.scene.cgtinker_transfer # noqa 70 | config = user.transfer_types 71 | config += '.json' 72 | 73 | from_path = Path(__file__).parent / "data" / config 74 | to_path = Path(self.directory) / config 75 | 76 | shutil.copy(str(from_path), str(to_path)) 77 | self.report({'INFO'}, f"Exported Configuration {str(from_path)} to {str(to_path)}") 78 | return {'FINISHED'} 79 | 80 | 81 | def import_config_button(self, context): 82 | self.layout.operator(OT_CGT_Import_Transfer_Config.bl_idname, text="BlendArMocap Config (.json)") 83 | 84 | 85 | def export_config_button(self, context): 86 | self.layout.operator(OT_CGT_Export_Transfer_Config.bl_idname, text="BlendArMocap Config (.json)") 87 | 88 | 89 | classes = [ 90 | OT_CGT_Export_Transfer_Config, 91 | OT_CGT_Import_Transfer_Config 92 | ] 93 | 94 | 95 | def register(): 96 | for cls in classes: 97 | bpy.utils.register_class(cls) 98 | 99 | bpy.types.TOPBAR_MT_file_import.append(import_config_button) 100 | bpy.types.TOPBAR_MT_file_export.append(export_config_button) 101 | 102 | 103 | def unregister(): 104 | bpy.types.TOPBAR_MT_file_export.remove(export_config_button) 105 | bpy.types.TOPBAR_MT_file_import.remove(import_config_button) 106 | 107 | for cls in classes: 108 | bpy.utils.unregister_class(cls) 109 | 110 | 111 | if __name__ == '__main__': 112 | register() 113 | -------------------------------------------------------------------------------- /src/cgt_transfer/cgt_transfer_registration.py: -------------------------------------------------------------------------------- 1 | from . import ( 2 | cgt_tf_object_properties, 3 | cgt_tf_3dview_panel, 4 | cgt_tf_operators, 5 | cgt_tf_properties_panel, 6 | cgt_tf_io_config 7 | ) 8 | 9 | modules = [ 10 | cgt_tf_object_properties, 11 | cgt_tf_3dview_panel, 12 | cgt_tf_operators, 13 | cgt_tf_properties_panel, 14 | cgt_tf_io_config, 15 | ] 16 | 17 | 18 | def register(): 19 | for module in modules: 20 | module.register() 21 | 22 | 23 | def unregister(): 24 | for module in reversed(modules): 25 | module.unregister() 26 | -------------------------------------------------------------------------------- /src/cgt_transfer/core_transfer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_transfer/core_transfer/__init__.py -------------------------------------------------------------------------------- /src/cgt_transfer/core_transfer/tf_check_object_properties.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import List 4 | 5 | from .. import cgt_tf_object_properties 6 | import logging 7 | 8 | 9 | def check_value_mapping_generic_props(props: List[cgt_tf_object_properties.OBJECT_PGT_CGT_ValueMapping]) -> List[ 10 | cgt_tf_object_properties.OBJECT_PGT_CGT_ValueMapping]: 11 | """ Copy values from first container and set axis explicit. """ 12 | main_prop = props[0] 13 | if main_prop.remap_default == 'DEFAULT': 14 | main_prop.remap_default = 'XYZ' 15 | target_axis = [axis for axis in main_prop.remap_default] 16 | 17 | for tar_axis, prop in zip(target_axis, props): 18 | if not prop.active: 19 | continue 20 | prop.remap_details = tar_axis 21 | prop.factor = main_prop.factor 22 | prop.offset = main_prop.offset 23 | prop.from_min = main_prop.from_min 24 | prop.from_max = main_prop.from_max 25 | prop.to_min = main_prop.to_min 26 | prop.to_max = main_prop.to_max 27 | return props 28 | 29 | 30 | def check_value_mapping_detail_props(props: List[cgt_tf_object_properties.OBJECT_PGT_CGT_ValueMapping]) -> List[ 31 | cgt_tf_object_properties.OBJECT_PGT_CGT_ValueMapping]: 32 | """ Sets explicit axis names and checks for overlaps. """ 33 | axis_d = {0: 'X', 1: 'Y', 2: 'Z'} 34 | 35 | for i, prop in enumerate(props): 36 | if prop.remap_details in ['X', 'Y', 'Z']: 37 | continue 38 | 39 | prop.remap_details = axis_d[i] 40 | 41 | active_props = [prop for prop in props if prop.active] 42 | 43 | if not len(set(active_props)) == len(active_props): 44 | logging.error(f"Internal Error, active properties don't match expected properties. {props[0].id_data}") 45 | raise RuntimeError 46 | return props 47 | 48 | 49 | def check_distance_mapping_object_props(props: cgt_tf_object_properties.OBJECT_PGT_CGT_TransferProperties) -> cgt_tf_object_properties.OBJECT_PGT_CGT_TransferProperties: 50 | """ Checks if required objects assigned and updates mapping props. """ 51 | objects = [ 52 | # props.by_obj, 53 | props.to_obj, 54 | props.from_obj, 55 | props.remap_from_obj, 56 | props.remap_to_obj 57 | ] 58 | 59 | if not all([True if ob is not None else False for ob in objects]): 60 | logging.error(f"All object pointers for distance remapping have to be set. {props.id_data}") 61 | raise RuntimeError 62 | return props 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /src/cgt_transfer/core_transfer/tf_get_object_properties.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Tuple, Any, Optional, List 4 | import bpy 5 | import numpy as np 6 | from ...cgt_core.cgt_calculators_nodes import cgt_math 7 | from .. import cgt_tf_object_properties 8 | from . import tf_check_object_properties, tf_reflect_object_properties 9 | 10 | 11 | # driver_prop_cls_dict = None 12 | def get_properties_from_object(obj: bpy.types.Object) -> tf_reflect_object_properties.RuntimeClass(): 13 | """ Get properties from object as Runtime Class to not modify values in Blender by accident. """ 14 | # global driver_prop_cls_dict 15 | # if driver_prop_cls_dict is None: 16 | # driver_prop_cls_dict = object_prop_reflection.copy_ptr_prop_cls(object_prop_reflection.cls_type_dict) 17 | 18 | properties = tf_reflect_object_properties.get_object_attributes( 19 | # driver_prop_cls_dict["OBJECT_PGT_CGT_TransferProperties"], 20 | cgt_tf_object_properties.TransferPropertiesProto, 21 | obj.cgt_props, 22 | tf_reflect_object_properties.RuntimeClass() 23 | ) 24 | 25 | return properties 26 | 27 | 28 | def get_constraint_props(c: bpy.types.Constraint): 29 | pool = {'target', 'type', 'subtarget', 'is_valid', 'active', 'bl_rna', 'error_location', 'error_rotation', 30 | 'head_tail', 'is_proxy_local', 'mute', 'rna_type', 'show_expanded', 'use_bbone_shape', 31 | 'is_override_data'} 32 | props = {key: getattr(c, key, None) for key in dir(c) if key not in pool and not key.startswith('_')} 33 | return props 34 | 35 | 36 | def get_target(tar_props: cgt_tf_object_properties.OBJECT_PGT_CGT_TransferTarget) -> Tuple[Optional[bpy.types.Object], Optional[Any], str]: 37 | """ Get target property and set appropriate Pointers. """ 38 | if tar_props.target is None: 39 | return None, None, 'ABORT' 40 | 41 | if tar_props.obj_type == 'ANY': 42 | return tar_props.target, None, 'ANY' 43 | 44 | elif tar_props.obj_type == 'MESH': 45 | if tar_props.object_type == 'OBJECT': 46 | return tar_props.target, None, 'OBJECT' 47 | 48 | elif tar_props.object_type == 'SHAPE_KEY': 49 | if tar_props.target_shape_key not in tar_props.target.data.shape_keys.key_blocks: 50 | return None, None, 'ABORT' 51 | return tar_props.target, tar_props.target.data.shape_keys.key_blocks[ 52 | tar_props.target_shape_key], 'SHAPE_KEY' 53 | 54 | elif tar_props.obj_type == 'ARMATURE': 55 | if tar_props.armature_type == 'ARMATURE': 56 | return tar_props.target, None, 'ARMATURE' 57 | 58 | elif tar_props.armature_type == 'BONE': 59 | if tar_props.target_bone not in tar_props.target.pose.bones: 60 | return None, None, 'ABORT' 61 | return tar_props.target, tar_props.target.pose.bones[tar_props.target_bone], 'BONE' 62 | assert RuntimeError, f'Type not defined. \n{tar_props}' 63 | 64 | 65 | def get_value_by_distance_properties(cgt_props: cgt_tf_object_properties.OBJECT_PGT_CGT_TransferProperties): 66 | # todo: unpacking? improve check (less harsh one?) 67 | cgt_props = tf_check_object_properties.check_distance_mapping_object_props(cgt_props) 68 | return cgt_props 69 | 70 | 71 | def get_remapping_properties(cgt_props: cgt_tf_object_properties.OBJECT_PGT_CGT_TransferProperties) -> List[List[ 72 | cgt_tf_object_properties.OBJECT_PGT_CGT_ValueMapping]]: 73 | """ Validates, updates and returns remapping properties. """ 74 | loc_xyz = [cgt_props.loc_details, [cgt_props.use_loc_x, cgt_props.use_loc_y, cgt_props.use_loc_z]] 75 | rot_xyz = [cgt_props.rot_details, [cgt_props.use_rot_x, cgt_props.use_rot_y, cgt_props.use_rot_z]] 76 | sca_xyz = [cgt_props.sca_details, [cgt_props.use_sca_x, cgt_props.use_sca_y, cgt_props.use_sca_z]] 77 | 78 | updated_props = [] 79 | for details, props in [loc_xyz, rot_xyz, sca_xyz]: 80 | if details: 81 | props = tf_check_object_properties.check_value_mapping_detail_props(props) 82 | else: 83 | props = tf_check_object_properties.check_value_mapping_generic_props(props) 84 | updated_props.append(props) 85 | 86 | return updated_props 87 | 88 | 89 | def get_distance(cur_props): 90 | """ Returns 'remap by' dist either from bones or the bone len... """ 91 | if cur_props.by_obj.target is None or cur_props.by_obj.target_bone in ["NONE", None]: 92 | return None 93 | 94 | armature = cur_props.by_obj.target 95 | m_dist = None 96 | 97 | if cur_props.by_obj.target_type == 'BONE_LEN': 98 | m_dist = armature.pose.bones[cur_props.by_obj.target_bone].length 99 | 100 | elif cur_props.by_obj.target_type == 'BONE_DIST': 101 | assert cur_props.by_obj.target_bone is not None and cur_props.by_obj.other_bone is not None 102 | 103 | if cur_props.by_obj.target_bone_type == 'HEAD': 104 | v1 = armature.pose.bones[cur_props.by_obj.target_bone].head 105 | elif cur_props.by_obj.target_bone_type == 'TAIL': 106 | v1 = armature.pose.bones[cur_props.by_obj.target_bone].tail 107 | elif cur_props.by_obj.target_bone_type == 'LOCATION': 108 | v1 = armature.pose.bones[cur_props.by_obj.target_bone].location 109 | 110 | if cur_props.by_obj.other_bone_type == 'HEAD': 111 | v2 = armature.pose.bones[cur_props.by_obj.other_bone].head 112 | elif cur_props.by_obj.other_bone_type == 'TAIL': 113 | v2 = armature.pose.bones[cur_props.by_obj.other_bone].tail 114 | elif cur_props.by_obj.other_bone_type == 'LOCATION': 115 | v2 = armature.pose.bones[cur_props.by_obj.other_bone].location 116 | 117 | m_dist = cgt_math.get_vector_distance(np.array(v1), np.array(v2)) 118 | return m_dist 119 | 120 | 121 | -------------------------------------------------------------------------------- /src/cgt_transfer/core_transfer/tf_load_object_properties.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import bpy 3 | from typing import Union, Any 4 | import logging 5 | from ...cgt_core.cgt_utils import cgt_json 6 | from ...cgt_core.cgt_bpy import cgt_bpy_utils, cgt_object_prop, cgt_collection 7 | 8 | 9 | def idle_object_props(props): 10 | """ Set CGT_Object_Properties to Idle State. """ 11 | def value_mapping(values): 12 | values.active = False 13 | for val in ['factor', 'from_max', 'to_max']: 14 | setattr(values, val, 1.0) 15 | for val in ['offset', 'from_min', 'to_min']: 16 | setattr(values, val, 0.0) 17 | 18 | props.active = False 19 | props.driver_type = 'NONE' 20 | for details in ['loc_details', 'rot_details', 'sca_details']: 21 | setattr(props, details, False) 22 | for target in ['to_obj', 'from_obj', 'remap_from_obj', 'remap_to_obj']: 23 | setattr(props, target, None) 24 | 25 | for transform in ['rot', 'loc', 'sca']: 26 | paths = [f"use_{transform}_{axis}" for axis in ['x', 'y', 'z']] 27 | for path in paths: 28 | values = getattr(props, path, None) 29 | if values is None: 30 | continue 31 | value_mapping(values) 32 | 33 | props.target.target = None 34 | props.by_obj.target = None 35 | 36 | 37 | def apply_props2obj(props: dict, obj: Union[bpy.types.Object, bpy.types.Constraint], target_armature: bpy.types.Object = None): 38 | """ Apply CGT_Object_Properties stored state. """ 39 | if obj == {} or props == {} or target_armature is None: 40 | return 41 | 42 | for key, value in props.items(): 43 | if isinstance(value, dict): # recv 44 | apply_props2obj(value, getattr(obj, key, {}), target_armature) 45 | 46 | elif isinstance(value, list): 47 | # obj types are declared at 2nd idx in a list 48 | if len(value) != 2: 49 | setattr(obj, key, value) 50 | else: 51 | if value[1] == 'ARMATURE': 52 | setattr(obj, key, target_armature) 53 | 54 | elif value[1] in ['EMPTY', 'MESH', 'CURVE', 'SURFACE', 'META', 'FONT', 55 | 'POINTCLOUD', 'VOLUME', 'GPENCIL', 'LATTICE', 'LIGHT', 56 | 'LIGHT_PROBE', 'CAMERA', 'SPEAKER', 'CURVES']: 57 | # handling default objects and other kinds of ptrs 58 | if cgt_bpy_utils.get_object_by_name(value[0]) is None: 59 | logging.warning(f"Object of type {value[1]} doesn't exist - creating {value[1]} as EMPTY.") 60 | 61 | # adding id as it might be required in some cases and hopefully doesn't matter in others 62 | target = cgt_bpy_utils.add_empty(0.25, value[0]) 63 | cgt_object_prop.set_custom_property(target, 'cgt_id', '11b1fb41-1349-4465-b3aa-78db80e8c761') 64 | 65 | try: 66 | setattr(obj, key, target) 67 | except AttributeError as err: 68 | logging.warning(err) 69 | 70 | else: 71 | logging.error(f"{value[1]} - Type not supported: {value[1]}.") 72 | 73 | else: 74 | try: 75 | setattr(obj, key, value) 76 | except (AttributeError, TypeError) as err: 77 | logging.warning(err) 78 | 79 | 80 | def apply_constraints(constraints: list, obj: bpy.types.Object, target_armature: bpy.types.Object): 81 | """ Add stored constraints to objects. """ 82 | if obj == {} or len(constraints) == 0: 83 | return 84 | 85 | # storing constraints as list of [constraint_name, constraint_properties] 86 | for name, props in constraints: 87 | constraint = obj.constraints.new(name) 88 | apply_props2obj(props, constraint, target_armature) 89 | 90 | 91 | # TODO: Col polling unused 92 | def load(objects: Any, path: str = None, target_armature: bpy.types.Object = None): 93 | """ Load CGT_Object_Properties and Constraints from json and apply the data. """ 94 | assert path is not None 95 | if target_armature is None: 96 | _objs = bpy.context.selected_objects 97 | assert len(_objs) != 0 98 | assert _objs[0].type == 'ARMATURE' 99 | target_armature = _objs[0] 100 | 101 | json_data = cgt_json.JsonData(path) 102 | 103 | # clean existing objs 104 | for ob in objects: 105 | if cgt_object_prop.get_custom_property(ob, 'cgt_id') is None: 106 | continue 107 | ob.constraints.clear() 108 | idle_object_props(ob.cgt_props) 109 | 110 | for key, d in json_data.__dict__.items(): 111 | # only link if collection exists 112 | if bpy.data.collections.get(d['collection'], None) is None: 113 | continue 114 | 115 | # get object target 116 | obj = objects.get(key, None) 117 | if obj is None: 118 | obj = cgt_bpy_utils.add_empty(0.01, key) 119 | 120 | if cgt_object_prop.get_custom_property(obj, 'cgt_id') is None: 121 | # TODO: cgt id shouldn't be that long tag - maybe switch to key in the future and just check if any id 122 | cgt_object_prop.set_custom_property(obj, 'cgt_id', '11b1fb41-1349-4465-b3aa-78db80e8c761') 123 | cgt_collection.add_object_to_collection(d['collection'], obj) 124 | 125 | # apply data 126 | apply_props2obj(d['cgt_props'], obj.cgt_props, target_armature) 127 | apply_constraints(d['constraints'], obj, target_armature) -------------------------------------------------------------------------------- /src/cgt_transfer/core_transfer/tf_reflect_object_properties.py: -------------------------------------------------------------------------------- 1 | import typing 2 | from typing import Dict 3 | from .. import cgt_tf_object_properties 4 | import bpy 5 | 6 | 7 | class RuntimeClass: 8 | """ 9 | Class for copying internal registered properties. 10 | Pointer Properties to other classes has to be set at generation. 11 | """ 12 | pass 13 | 14 | def __str__(self): 15 | s = ["{"] 16 | for k, v in self.__dict__.items(): 17 | if isinstance(v, RuntimeClass): 18 | s.append(f"\n\t{k}: ") 19 | s.append("{") 20 | for nk, nv in v.__dict__.items(): 21 | s.append(f"\n\t\t{nk}: {nv},") 22 | s.append("\n\t},") 23 | 24 | else: 25 | s.append(f"\n\t{k}: {v},") 26 | s.append("\n}") 27 | return "".join(s) 28 | 29 | # region DEPRECIATED 30 | # DEPRECIATED - dict to reflect registered property groups in Blender (2.9.3) 31 | cls_type_dict = { 32 | "OBJECT_PGT_CGT_TransferTarget": RuntimeClass(), 33 | "OBJECT_PGT_CGT_RemapDistance": RuntimeClass(), 34 | "OBJECT_PGT_CGT_ValueMapping": RuntimeClass(), 35 | "OBJECT_PGT_CGT_TransferProperties": RuntimeClass(), 36 | "Object": bpy.types.Object 37 | } 38 | 39 | 40 | # DEPRECIATED 41 | def copy_ptr_prop_cls(class_name_dict: Dict[str, RuntimeClass]) -> Dict[str, RuntimeClass]: 42 | """ Uses cls names to copy slots from pointer property groups to flat classes. 43 | Helper cls improves usage of internal registered types. """ 44 | import warnings 45 | warnings.warn("DEPRECIATED - Function may only be used in Blender (2.9.0) - (2.9,3)") 46 | 47 | for cls_name in class_name_dict: 48 | """ Get all registered PropertyGroup properties. """ 49 | cls = getattr(cgt_tf_object_properties, cls_name, None) 50 | if cls is None: 51 | continue 52 | # TODO: static props 53 | 54 | type_hints = typing.get_type_hints(cls) 55 | id_data = getattr(cls, "id_data", None) 56 | setattr(class_name_dict[cls_name], 'id_data', id_data) 57 | 58 | for hint in type_hints: 59 | property_type = type_hints[hint][0].__name__ 60 | 61 | # if prop is pointing to sub_cls 62 | if property_type == 'PointerProperty': 63 | cls_type_name = type_hints[hint][1]['type'].__name__ 64 | setattr(class_name_dict[cls_name], hint, class_name_dict[cls_type_name]) 65 | 66 | else: # mimic property type 67 | default_val = type_hints[hint][1].get("default", None) 68 | enum = type_hints[hint][1].get("items", None) 69 | if isinstance(enum, typing.Callable): 70 | # TODO: static classes for reflection to avoid hacky solution for dynamic enums 71 | # dynamic enum -> lf str 72 | setattr(class_name_dict[cls_name], hint, "dynamic_enum") 73 | 74 | elif isinstance(enum, typing.Tuple): 75 | # tuple of enum elements 76 | setattr(class_name_dict[cls_name], hint, enum) 77 | 78 | else: 79 | # default val (int / float etc) 80 | setattr(class_name_dict[cls_name], hint, type(default_val)) 81 | return class_name_dict 82 | # endregion 83 | 84 | 85 | def get_runtime_object_attributes(cls_template, obj, cls_out): 86 | """ Use the runtime dict to get all properties from Object required for remapping. """ 87 | import warnings 88 | warnings.warn("DEPRECIATED - Function may only be used in Blender (2.9.0) - (2.9,3)") 89 | 90 | for key, value in cls_template.__dict__.items(): 91 | if value == "dynamic_enum": 92 | if not hasattr(obj, key): 93 | continue 94 | # TODO: static classes for reflection to avoid hacky solution for dynamic enums 95 | # regular a dynamic enum will have a target ob 96 | if obj.target is not None: 97 | obj_value = getattr(obj, key, None) 98 | else: 99 | obj_value = getattr(obj, key, None) 100 | 101 | if type(value) == RuntimeClass: 102 | # creating new empty cls and recv 103 | setattr(cls_out, key, RuntimeClass()) 104 | recv_next_cls = getattr(cls_out, key, RuntimeClass()) 105 | get_object_attributes(value, getattr(obj, key, None), recv_next_cls) 106 | else: 107 | setattr(cls_out, key, obj_value) 108 | return cls_out 109 | 110 | 111 | def get_object_attributes(cls_template, obj, cls_out): 112 | """ Use the runtime dict to get all properties from Object required for remapping. """ 113 | for key, value in cls_template.__dict__.get('__annotations__', {}).items(): 114 | obj_value = getattr(obj, key, None) 115 | 116 | if value in (cgt_tf_object_properties.TransferPropertiesProto, 117 | cgt_tf_object_properties.ValueMappingProto, 118 | cgt_tf_object_properties.RemapDistanceProto, 119 | cgt_tf_object_properties.TransferTargetProto): 120 | 121 | # creating new empty cls and recv 122 | setattr(cls_out, key, RuntimeClass()) 123 | recv_next_cls = getattr(cls_out, key, RuntimeClass()) 124 | get_object_attributes(value, getattr(obj, key, None), recv_next_cls) 125 | else: 126 | setattr(cls_out, key, obj_value) 127 | return cls_out 128 | 129 | 130 | if __name__ == '__main__': 131 | ob = bpy.context.selected_objects[0] 132 | copy_ptr_prop_cls(cls_type_dict) 133 | res_1 = get_runtime_object_attributes(cgt_tf_object_properties.TransferPropertiesProto, ob.cgt_props, RuntimeClass()) 134 | res_2 = get_runtime_object_attributes(cls_type_dict["OBJECT_PGT_CGT_TransferProperties"], ob.cgt_props, RuntimeClass()) 135 | print("TEMPLATE:", cls_type_dict["OBJECT_PGT_CGT_TransferProperties"], "\n\nCOPY:", res_1) 136 | -------------------------------------------------------------------------------- /src/cgt_transfer/core_transfer/tf_save_object_properties.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import List 3 | import bpy 4 | import logging 5 | from . import tf_get_object_properties, tf_reflect_object_properties 6 | from ...cgt_core.cgt_utils import cgt_json 7 | 8 | armature_name = None 9 | 10 | 11 | def convert_object_ptrs2str(cls) -> None: 12 | """ Pointers to objects to strs (inplace). """ 13 | for key, value in cls.__dict__.items(): 14 | if isinstance(value, tf_reflect_object_properties.RuntimeClass): 15 | convert_object_ptrs2str(value) 16 | elif isinstance(value, bpy.types.Object): 17 | 18 | # inform user about target armature 19 | if value.type == 'ARMATURE': 20 | global armature_name 21 | if armature_name is None: 22 | armature_name = value.name 23 | 24 | if armature_name != value.name: 25 | logging.warning( 26 | f"Armature targets don't match in cls which may lead to errors when importing: \n{cls}") 27 | 28 | setattr(cls, key, [value.name, value.type]) 29 | else: 30 | pass 31 | 32 | 33 | def convert_cls2dict(cls, d: dict) -> None: 34 | """ Convert cls and subcls to dict """ 35 | for key, value in cls.__dict__.items(): 36 | if isinstance(value, tf_reflect_object_properties.RuntimeClass): 37 | d[key] = {} 38 | convert_cls2dict(value, d[key]) 39 | else: 40 | d[key] = value 41 | 42 | 43 | def delete_typeof_none(cls) -> None: 44 | """ Remove ptrs to None (inplace) """ 45 | removable_attrs = [] 46 | 47 | for key, value in cls.__dict__.items(): 48 | if isinstance(value, tf_reflect_object_properties.RuntimeClass): 49 | delete_typeof_none(value) 50 | elif value is None: 51 | removable_attrs.append((cls, key)) 52 | else: 53 | pass 54 | 55 | for cls, key in removable_attrs: 56 | delattr(cls, key) 57 | 58 | 59 | def delete_id_data(cls) -> None: 60 | """ Remove id_data props (internal bpy data used for proper prints) """ 61 | removable_attrs = [] 62 | 63 | for key, value in cls.__dict__.items(): 64 | if key == 'id_data': 65 | removable_attrs.append((cls, key)) 66 | if isinstance(value, tf_reflect_object_properties.RuntimeClass): 67 | delete_id_data(value) 68 | else: 69 | pass 70 | 71 | for cls, key in removable_attrs: 72 | delattr(cls, key) 73 | 74 | 75 | def save(objs: List[bpy.types.Object]) -> cgt_json.JsonData: 76 | """ Saves all available remapping objects, boils down transfer properties to the required minimum. """ 77 | # TODO: SAVING MUST CHECK X AXIS WHEN USING DEFAULT REMAP VALUES 78 | # armature name as helper to check only one armature is used 79 | global armature_name 80 | armature_name = None 81 | properties = {} 82 | 83 | for obj in objs: 84 | if obj.get('cgt_id') != '11b1fb41-1349-4465-b3aa-78db80e8c761': 85 | continue 86 | 87 | props = tf_get_object_properties.get_properties_from_object(obj) 88 | 89 | if props.target.target is None: 90 | continue 91 | 92 | if props.driver_type == 'NONE': 93 | continue 94 | 95 | # Remove unused remap properties 96 | remap_props = [ 97 | ["use_loc_x", "use_loc_y", "use_loc_z"], 98 | ["use_rot_x", "use_rot_y", "use_rot_z"], 99 | ["use_sca_x", "use_sca_y", "use_sca_z"] 100 | ] 101 | 102 | detail_toggles = [ 103 | "loc_details", "rot_details", "sca_details" 104 | ] 105 | 106 | remap_defaults = { 107 | "factor": 1.0, 108 | "offset": 0.0, 109 | "from_min": 0.0, 110 | "from_max": 1.0, 111 | "to_min": 0.0, 112 | "to_max": 1.0 113 | } 114 | 115 | # keep x values if details aren't used, remove inactive remap props 116 | for remap_prop, details in zip(remap_props, detail_toggles): 117 | use_details = getattr(props, details, False) 118 | if not use_details: 119 | delattr(props, details) 120 | 121 | for i, axis in enumerate(remap_prop): 122 | # remove default values 123 | sub_cls = getattr(props, axis, None) 124 | for key, value in remap_defaults.items(): 125 | if not getattr(sub_cls, key, value) == value: 126 | continue 127 | delattr(sub_cls, key) 128 | 129 | # keep x-remap-props if not use details 130 | if not use_details and i == 0: 131 | continue 132 | 133 | # check cls 134 | if sub_cls is None: 135 | continue 136 | 137 | if getattr(sub_cls, "active", False): 138 | continue 139 | 140 | # del cls 141 | delattr(props, axis) 142 | 143 | # remove properties set to None 144 | if props.by_obj.target is None: 145 | del props.by_obj 146 | delete_typeof_none(props) 147 | 148 | # convert remaining ptrs to str and cls to dict, then remove id_props 149 | convert_object_ptrs2str(props) 150 | id_name = props.id_data 151 | delete_id_data(props) 152 | 153 | # convert cls to dict 154 | cls_dict = dict() 155 | convert_cls2dict(props, cls_dict) 156 | 157 | # get constraints 158 | constraints = [(c.type, tf_get_object_properties.get_constraint_props(c)) for c in obj.constraints] 159 | 160 | # id_name contains 'object name' and 'object type', get in first lvl depth for easier loading 161 | properties[id_name[0]] = {} 162 | properties[id_name[0]]['cgt_props'] = cls_dict 163 | properties[id_name[0]]['constraints'] = constraints 164 | 165 | if obj.users_collection: 166 | properties[id_name[0]]['collection'] = obj.users_collection[0].name 167 | else: 168 | properties[id_name[0]]['collection'] = "cgt_DRIVERS" 169 | 170 | json_data = cgt_json.JsonData(**properties) 171 | return json_data 172 | 173 | 174 | def test(): 175 | objs = bpy.data.objects 176 | json_data = save(objs) 177 | 178 | 179 | if __name__ == '__main__': 180 | test() 181 | -------------------------------------------------------------------------------- /src/cgt_transfer/setup_helper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgtinker/BlendArMocap/ecb4c91c01befbe29b75196eb72321867810ddf0/src/cgt_transfer/setup_helper/__init__.py -------------------------------------------------------------------------------- /src/cgt_transfer/setup_helper/tf_hand_mapping_helper.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | 4 | RIGNAME = 'rig' 5 | 6 | ################################################################ 7 | ###################### TARGET-BONES ############################ 8 | ################################################################ 9 | 10 | # expecting .L / .R suffixes for left and right hand 11 | refs = { 12 | "thumb_cmc": "thumb.01", 13 | "thumb_mcp": "thumb.02", 14 | "thumb_ip": "thumb.03", 15 | 16 | "index_finger_mcp": "f_index.01", 17 | "index_finger_pip": "f_index.02", 18 | "index_finger_dip": "f_index.03", 19 | 20 | "middle_finger_mcp": "f_middle.01", 21 | "middle_finger_pip": "f_middle.02", 22 | "middle_finger_dip": "f_middle.03", 23 | 24 | "ring_finger_mcp": "f_ring.01", 25 | "ring_finger_pip": "f_ring.02", 26 | "ring_finger_dip": "f_ring.03", 27 | 28 | "pinky_mcp": "f_pinky.01", 29 | "pinky_pip": "f_pinky.02", 30 | "pinky_dip": "f_pinky.03", 31 | 32 | "wrist": "hand_ik", 33 | } 34 | 35 | ################################################################## 36 | ####################### MAPPING VALUES ########################### 37 | ################################################################## 38 | 39 | # x angles overcast when the hand is rotated, limits help to mitigate the issue 40 | constraint_x_limits = [ 41 | [-0.261, 3.14159], [-0.261, 3.1415926], [-0.349, 3.1415926], # thumb 42 | [-0.261, 1.22634], [-0.136, 1.3962634], [-0.136, 1.3962634], # index 43 | [-0.349, 1.22634], [-0.136, 1.3962634], [-0.136, 1.3962634], # middle 44 | [-0.436, 1.22634], [-0.136, 1.3962634], [-0.136, 1.3962634], # ring 45 | [-0.698, 1.22634], [-0.136, 1.3962634], [-0.136, 1.3962634], # pinky 46 | ] 47 | 48 | # x angles determine the curling and effect mcps, dips and pips 49 | x_inputs = [ 50 | [0.011, 0.630], [0.010, 0.536], [0.008, 1.035], # thumb 51 | [0.105, 1.331], [0.014, 1.858], [0.340, 1.523], # index 52 | [0.046, 1.326], [0.330, 1.803], [0.007, 1.911], # middle 53 | [0.012, 1.477], [0.244, 1.674], [0.021, 1.614], # ring 54 | [0.120, 1.322], [0.213, 1.584], [0.018, 1.937], # pinky 55 | ] 56 | 57 | x_outputs = [ 58 | [-.60, 0.63], [-.30, 0.54], [-.15, 1.03], # thumb 59 | [-.50, 1.33], [-.20, 1.86], [-.55, 1.52], # index 60 | [-.50, 1.33], [-.30, 1.80], [-.15, 1.91], # middle 61 | [-.60, 1.48], [-.30, 1.67], [-.30, 1.61], # ring 62 | [-.80, 1.32], [-.50, 1.58], [-.30, 1.94], # pinky 63 | ] 64 | 65 | # z angles determine the spreading range and only effect MCPs 66 | z_inputs = [ 67 | [0.349, 1.047], # thumb 68 | [-0.43, 1.047], # index 69 | [-0.61, 0.698], # middle 70 | [-0.43, 0.698], # ring 71 | [-0.69, 0.872], # pinky 72 | ] 73 | 74 | z_outputs = [ 75 | [-0.436, 0.4363], # thumb 76 | [0.4363, -0.698], # index 77 | [0.6108, -0.436], # middle 78 | [0.1745, -0.523], # ring 79 | [0.3490, -0.523], # pinky 80 | ] 81 | 82 | 83 | ############################################################ 84 | ###################### TRANSFER ############################ 85 | ############################################################ 86 | 87 | def set_hand_properties(rig: bpy.types.Armature, prefix: str = '.L'): 88 | def set_remap_properties(remap_prop, from_min, from_max, to_min, to_max, factor, offset): 89 | remap_prop.from_min = from_min 90 | remap_prop.from_max = from_max 91 | remap_prop.to_min = to_min 92 | remap_prop.to_max = to_max 93 | remap_prop.factor = factor 94 | remap_prop.offset = offset 95 | 96 | def set_wrist_properties(ob): 97 | ob.cgt_props.use_rot_x.active = True 98 | ob.cgt_props.use_rot_y.active = True 99 | ob.cgt_props.use_rot_z.active = True 100 | ob.cgt_props.remap_details = 'DEFAULT' 101 | 102 | set_remap_properties(ob.cgt_props.use_rot_x, 0, 1, 0, 1, 1, 0) 103 | set_remap_properties(ob.cgt_props.use_rot_y, 0, 1, 0, 1, 1, 0) 104 | set_remap_properties(ob.cgt_props.use_rot_z, 0, 1, 0, 1, 1, 0) 105 | 106 | c = ob.constraints.new(type='COPY_ROTATION') 107 | c.mix_mode = 'ADD' 108 | c.owner_space = 'LOCAL' 109 | 110 | for i, entry in enumerate(refs.items()): 111 | # reference object names 112 | k, v = entry 113 | ob_name = 'cgt_' + k + prefix 114 | bone = v + prefix 115 | 116 | # get ob and clear constraints 117 | ob = bpy.data.objects.get(ob_name, None) 118 | assert ob is not None 119 | ob.constraints.clear() 120 | 121 | # target 122 | ob.cgt_props.target.obj_type = 'ARMATURE' 123 | ob.cgt_props.target.target = rig 124 | ob.cgt_props.target.armature_type = 'BONE' 125 | ob.cgt_props.target.target_bone = bone 126 | 127 | # driver 128 | ob.cgt_props.driver_type = 'REMAP' 129 | ob.cgt_props.by_obj.target_type = 'NONE' 130 | 131 | if k == 'wrist': 132 | set_wrist_properties(ob) 133 | continue 134 | 135 | ob.cgt_props.rot_details = True 136 | 137 | # x_vals 138 | ob.cgt_props.use_rot_x.remap_details = 'DEFAULT' 139 | x_in_min, x_in_max = x_inputs[i] 140 | x_out_min, x_out_max = x_outputs[i] 141 | ob.cgt_props.use_rot_x.active = True 142 | set_remap_properties(ob.cgt_props.use_rot_x, x_in_min, x_in_max, x_out_min, x_out_max, 1, 0) 143 | 144 | # x limits 145 | limit_min, limit_max = constraint_x_limits[i] 146 | 147 | c = ob.constraints.new(type='LIMIT_ROTATION') 148 | c.use_limit_x = True 149 | c.min_x = limit_min 150 | c.max_x = limit_max 151 | c.influence = 1 152 | c.owner_space = 'LOCAL' 153 | 154 | # z_vals 155 | if i % 3 == 0: 156 | z_in_min, z_in_max = z_outputs[i // 3] 157 | z_out_min, z_out_max = z_outputs[i // 3] 158 | set_remap_properties(ob.cgt_props.use_rot_x, z_in_min, z_in_max, z_out_min, z_out_max, 1, 0) 159 | 160 | c = ob.constraints.new(type='COPY_ROTATION') 161 | c.use_y = False 162 | c.mix_mode = 'ADD' 163 | c.owner_space = 'LOCAL' 164 | 165 | else: 166 | c = ob.constraints.new(type='COPY_ROTATION') 167 | c.use_y = False 168 | c.use_z = False 169 | c.mix_mode = 'ADD' 170 | c.owner_space = 'LOCAL' 171 | 172 | 173 | def main(): 174 | rig = bpy.data.objects.get(RIGNAME, None) 175 | if rig is None: 176 | objs = bpy.context.selected_objects 177 | assert len(objs) > 0 178 | assert objs[0].type == 'ARMATURE' 179 | rig = objs[0] 180 | 181 | assert rig is not None 182 | 183 | set_hand_properties(rig, '.L') 184 | set_hand_properties(rig, '.R') 185 | 186 | 187 | if __name__ == '__main__': 188 | main() 189 | --------------------------------------------------------------------------------