├── pykinect_recorder ├── cli │ ├── __init__.py │ └── command.py ├── pyrealsense │ └── __init__.py ├── pyzenmuse │ └── __init__.py ├── renderer │ ├── components │ │ ├── __init__.py │ │ ├── sidebar_solutions.py │ │ ├── viewer_solution.py │ │ ├── viewer_3dsensors.py │ │ ├── sidebar_control.py │ │ ├── viewer_control.py │ │ ├── viewer_audio.py │ │ ├── statusbar.py │ │ ├── playback_sensors.py │ │ ├── viewer_imu_sensors.py │ │ ├── record_sensors.py │ │ ├── sidebar_menu.py │ │ ├── topbar.py │ │ ├── sidebar_explorer.py │ │ ├── viewer_sensors.py │ │ ├── viewer_playback.py │ │ └── viewer_video_clipping.py │ ├── __init__.py │ ├── public │ │ └── kinect-sensor.ico │ ├── signals.py │ ├── logger.py │ ├── synology_utils.py │ ├── split_data.py │ └── common_widgets.py ├── __init__.py ├── pyk4a │ ├── k4arecord │ │ ├── __init__.py │ │ ├── datablock.py │ │ ├── record_configuration.py │ │ ├── record.py │ │ ├── _k4arecordTypes.py │ │ └── playback.py │ ├── k4abt │ │ ├── __init__.py │ │ ├── body.py │ │ ├── joint2d.py │ │ ├── joint.py │ │ ├── body2d.py │ │ ├── tracker.py │ │ ├── frame.py │ │ └── _k4abtTypes.py │ ├── k4a │ │ ├── __init__.py │ │ ├── imu_sample.py │ │ ├── configuration.py │ │ ├── capture.py │ │ ├── transformation.py │ │ ├── image.py │ │ ├── device.py │ │ └── calibration.py │ ├── __init__.py │ ├── pykinect.py │ └── utils.py └── main_window.py ├── poetry.toml ├── CODE_OF_CONDUCT.md ├── docs ├── source │ ├── k4abt.rst │ ├── k4a │ │ └── calibration.rst │ ├── k4arecord.rst │ ├── pyrealsense.rst │ ├── pyk4a.rst │ ├── k4a.rst │ ├── index.rst │ └── conf.py ├── Makefile └── make.bat ├── CONTRIBUTING.md ├── .gitignore ├── .readthedocs.yaml ├── CITATION.cff ├── main.py ├── .github └── workflows │ └── publish.yml ├── tests ├── test_dependency.py └── test_ctype_types.py ├── LICENSE ├── pyproject.toml └── README.md /pykinect_recorder/cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pykinect_recorder/pyrealsense/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pykinect_recorder/pyzenmuse/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /poetry.toml: -------------------------------------------------------------------------------- 1 | [virtualenvs] 2 | in-project = true 3 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/__init__.py: -------------------------------------------------------------------------------- 1 | from . import * 2 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge -------------------------------------------------------------------------------- /pykinect_recorder/__init__.py: -------------------------------------------------------------------------------- 1 | # __all__ = [ 2 | # "main", "renderer" 3 | # ] 4 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/__init__.py: -------------------------------------------------------------------------------- 1 | from ..main_window import * 2 | from .components import * 3 | -------------------------------------------------------------------------------- /docs/source/k4abt.rst: -------------------------------------------------------------------------------- 1 | K4ABT 2 | ===== 3 | 4 | .. note:: 5 | 6 | This wrapper is under active development. 7 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/public/kinect-sensor.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unerue/pykinect-recorder/HEAD/pykinect_recorder/renderer/public/kinect-sensor.ico -------------------------------------------------------------------------------- /docs/source/k4a/calibration.rst: -------------------------------------------------------------------------------- 1 | Calibration 2 | =========== 3 | 4 | .. automodule:: pykinect_recorder.main.pyk4a.k4a.calibration 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4arecord/__init__.py: -------------------------------------------------------------------------------- 1 | from .datablock import Datablock 2 | from .record import Record 3 | from .record_configuration import RecordConfiguration 4 | from .playback import Playback 5 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/sidebar_solutions.py: -------------------------------------------------------------------------------- 1 | from PySide6.QtWidgets import QFrame 2 | 3 | 4 | class SolutionSidebar(QFrame): 5 | def __init__(self) -> None: 6 | super().__init__() 7 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4abt/__init__.py: -------------------------------------------------------------------------------- 1 | from .body import Body 2 | from .body2d import Body2d 3 | from .frame import Frame 4 | from .joint import Joint 5 | from .joint2d import Joint2d 6 | from .tracker import Tracker 7 | -------------------------------------------------------------------------------- /docs/source/k4arecord.rst: -------------------------------------------------------------------------------- 1 | K4A Record 2 | ========== 3 | 4 | .. note:: 5 | 6 | This wrapper is under active development. 7 | 8 | .. .. automodule:: pykinect_recorder.main.pyk4a.k4arecord 9 | .. :members: 10 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribute to PyKinect Recorder 2 | 3 | ## Ways to contribute 4 | 5 | ``` 6 | poetry install 7 | poetry run main.py 8 | ``` 9 | 10 | ``` 11 | ufmt format 12 | ``` 13 | 14 | ``` 15 | mypy 16 | ``` 17 | 18 | ``` 19 | pytest tests 20 | ``` -------------------------------------------------------------------------------- /docs/source/pyrealsense.rst: -------------------------------------------------------------------------------- 1 | .. _pyrealsense: 2 | 3 | Python wrapper for Intel RealSense SDK API 4 | ########################################## 5 | 6 | We wrapped C/C++ API of Intel RealSense SDK in Python. 7 | 8 | .. note:: 9 | 10 | This wrapper is under active development. 11 | -------------------------------------------------------------------------------- /docs/source/pyk4a.rst: -------------------------------------------------------------------------------- 1 | .. _pyk4a: 2 | 3 | Python wrapper for K4A 4 | ###################### 5 | 6 | We wrapped K4A C/C++ API of Azure Kinect Sensor SDK in Python. 7 | 8 | 9 | .. currentmodule:: pykinect_recorder.main.pyk4a 10 | .. toctree:: 11 | 12 | k4a 13 | k4abt 14 | k4arecord 15 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4a/__init__.py: -------------------------------------------------------------------------------- 1 | from .calibration import Calibration 2 | from .capture import Capture 3 | from .configuration import Configuration, default_configuration 4 | from .device import Device 5 | from .image import Image 6 | from .imu_sample import ImuSample 7 | from .transformation import Transformation 8 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/viewer_solution.py: -------------------------------------------------------------------------------- 1 | from PySide6.QtWidgets import QFrame 2 | 3 | 4 | class ViewerSolution(QFrame): 5 | def __init__(self): 6 | super().__init__() 7 | self.setFixedSize(1200, 1000) 8 | self.setStyleSheet("background-color: black;") 9 | pass 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .venv 3 | __pycache__ 4 | dist 5 | build 6 | main.spec 7 | pykinect_recorder.exe 8 | main/synology_utils.py 9 | pykinect_recorder.egg-info 10 | *.exe 11 | datas/* 12 | pykinect_recorder/main/synology_utils.py 13 | dataset_list.txt 14 | .pytest_cache 15 | metadata.csv 16 | .vscode 17 | *.mkv 18 | examples 19 | pykinect_recorder/dev 20 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: "3.10" 7 | jobs: 8 | post_create_environment: 9 | - pip install poetry 10 | - poetry config virtualenvs.create false 11 | post_install: 12 | - poetry install --with docs 13 | 14 | sphinx: 15 | fail_on_warning: true 16 | configuration: docs/source/conf.py 17 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/__init__.py: -------------------------------------------------------------------------------- 1 | # import logging 2 | # import sys 3 | 4 | # __appname__ = "AzureKinectCamera" 5 | 6 | # from . import pyk4a 7 | from .k4a import * 8 | from .k4arecord import * 9 | from .utils import * 10 | from .pykinect import * 11 | 12 | __all__ = [ 13 | "Calibration", "Device", "Capture", "Image", "ImuSample", "Transformation", 14 | "Configuration", "default_configuration", "initialize_libraries", "start_device", 15 | "start_playback", "utils.colorize", 16 | ] -------------------------------------------------------------------------------- /docs/source/k4a.rst: -------------------------------------------------------------------------------- 1 | K4A 2 | === 3 | 4 | Hello. 5 | 6 | Functions 7 | --------- 8 | 9 | Hello. 10 | 11 | .. automodule:: pykinect_recorder.main.pyk4a.k4a._k4a 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | 16 | Structures (Types) 17 | ------------------ 18 | 19 | Hello. 20 | 21 | .. automodule:: pykinect_recorder.main.pyk4a.k4a._k4atypes 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | 26 | Wrappers 27 | -------- 28 | 29 | Hello. 30 | 31 | .. toctree:: 32 | 33 | k4a/calibration 34 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to pykinect-recorder's documentation! 2 | ============================================= 3 | 4 | **pykinect-recorder** is an educational/industrial library 5 | that provides sensor recording (including audio), playback, 6 | and computer vision soultions through a python wrapper of 7 | the Azure Kinect Sensor SDK. 8 | 9 | 10 | .. note:: 11 | 12 | This project is under active development. 13 | 14 | 15 | .. toctree:: 16 | :maxdepth: 2 17 | :caption: Package Reference 18 | 19 | pyk4a 20 | pyrealsense 21 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: "1.2.0" 2 | date-released: "2023-07-16" 3 | message: "If you use this software, please cite it using these metadata." 4 | authors: 5 | - family-names: Kang 6 | given-names: Kyung-Su 7 | orcid: https://orcid.org/0000-0002-6955-578X 8 | - family-names: Kim 9 | given-names: Young-Il 10 | orcid: https://orcid.org/0000-0002-7371-6101 11 | - family-names: Lee 12 | given-names: YeoReum 13 | title: "PyKinect Recorder: Azure Kinect Sensor with Audio, Vision Solutions" 14 | version: 0.9.3 15 | url: "https://github.com/unerue/pykinect-recorder" -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import qdarktheme 4 | from pykinect_recorder.main_window import MainWindow 5 | from PySide6.QtGui import QIcon 6 | from PySide6.QtWidgets import QApplication, QSystemTrayIcon 7 | 8 | 9 | if __name__ == "__main__": 10 | app = QApplication(sys.argv) 11 | qdarktheme.setup_theme() 12 | # tray_icon = QSystemTrayIcon(QIcon("pykinect_recorder/renderer/public/kinect-sensor.ico")) 13 | # tray_icon.setToolTip("Pykinect Recorder") 14 | # tray_icon.show() 15 | 16 | screen_size = app.primaryScreen().size() 17 | width, height = screen_size.width(), screen_size.height() 18 | main_window = MainWindow(width, height) 19 | main_window.show() 20 | app.exec() 21 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /pykinect_recorder/cli/command.py: -------------------------------------------------------------------------------- 1 | import click 2 | from pykinect_recorder.main_window import MainWindow 3 | import qdarktheme 4 | from PySide6.QtWidgets import QApplication 5 | 6 | 7 | CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) 8 | 9 | 10 | @click.group(context_settings=CONTEXT_SETTINGS) 11 | def cli(): 12 | pass 13 | 14 | 15 | # @click.command(help="pykinect") 16 | # @click.option("--open", is_flag=True) 17 | # def check(open): 18 | # if open: 19 | # # run() 20 | 21 | 22 | def main(): 23 | app = QApplication() 24 | qdarktheme.setup_theme() 25 | screen_rect = app.primaryScreen().size() 26 | width, height = screen_rect.width(), screen_rect.height() 27 | main_window = MainWindow(width, height) 28 | main_window.show() 29 | app.exec() 30 | 31 | 32 | if __name__ == "__main__": 33 | main() 34 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | # name: Python package 2 | 3 | # on: [push] 4 | 5 | # jobs: 6 | # build: 7 | 8 | # runs-on: ubuntu-latest 9 | # strategy: 10 | # matrix: 11 | # python-version: ["3.9", "3.10"] 12 | 13 | # steps: 14 | # - name: Set up Python ${{ matrix.python-version }} 15 | # uses: JRubics/poetry-publish@v1.16 16 | # with: 17 | # python_version: "3.10" 18 | # poetry_version: "==1.4" # (PIP version specifier syntax) 19 | # pypi_token: ${{ secrets.PYPI_TOKEN }} 20 | # build_format: "sdist" 21 | # allow_poetry_pre_release: "yes" 22 | # ignore_dev_requirements: "yes" 23 | # repository_name: "pykinect-recorder" 24 | # repository_url: "https://github.com/unerue/pykinect-recorder" 25 | # repository_username: "unerue" 26 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd -------------------------------------------------------------------------------- /tests/test_dependency.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import pytest 3 | from pykinect_recorder.pyk4a._pyk4a import pykinect 4 | from pykinect_recorder.pyk4a._pyk4a.k4a import _k4a 5 | from pykinect_recorder.pyk4a._pyk4a.k4a import _k4atypes 6 | 7 | 8 | def test_setup_library(): 9 | """ 10 | Check Azure Kinect SDK installed in execution environment. 11 | 12 | Azure kinect SDK can be installed in 'Linux', 'Windows'. 13 | 14 | The function operates automatically according to the execution environment. 15 | """ 16 | assert pykinect.initialize_libraries() == True 17 | 18 | 19 | def test_device_open(): 20 | """ 21 | Check the connection between desktop and Azure Kinect Camera. 22 | """ 23 | device_handle = _k4atypes.k4a_device_t 24 | assert _k4a.k4a_device_open(0, device_handle) == _k4atypes.K4A_RESULT_SUCCEEDED 25 | 26 | 27 | if __name__ == "__main__": 28 | test_setup_library() 29 | test_device_open() 30 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/viewer_3dsensors.py: -------------------------------------------------------------------------------- 1 | from PySide6.QtCore import QSize 2 | from PySide6.QtWidgets import QFrame, QHBoxLayout, QRadioButton 3 | from ..common_widgets import Frame 4 | 5 | 6 | class Viewer3DSensors(QFrame): 7 | def __init__(self): 8 | super().__init__() 9 | self.setContentsMargins(0, 0, 0, 0) 10 | self.setMinimumSize(QSize(920, 670)) 11 | self.setMaximumSize(QSize(1190, 1030)) 12 | self.setStyleSheet("background-color: black;") 13 | 14 | self.main_layout = QHBoxLayout() 15 | self.main_layout.setSpacing(0) 16 | self.main_layout.setContentsMargins(0, 0, 0, 0) 17 | 18 | self.frame_sensor = QFrame() 19 | self.frame_sensor.setFixedHeight() 20 | 21 | 22 | 23 | 24 | # self.btn_frame = QFrame() 25 | 26 | self.btn_rgb = QRadioButton("RGB") 27 | self.btn_depth = QRadioButton("Depth") 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4arecord/datablock.py: -------------------------------------------------------------------------------- 1 | from . import _k4arecord 2 | from ..k4a import _k4a 3 | 4 | 5 | class Datablock: 6 | def __init__(self, modulePath): 7 | self._handle = _k4arecord.k4a_playback_data_block_t() 8 | 9 | def __del__(self): 10 | self.reset() 11 | 12 | def is_valid(self): 13 | return self.datablock_handle != None 14 | 15 | def handle(self): 16 | return self._handle 17 | 18 | def reset(self): 19 | if self.is_valid(): 20 | _k4arecord.k4a_playback_data_block_release(self._handle) 21 | self._handle = None 22 | 23 | def get_device_timestamp_usec(self): 24 | return int(_k4arecord.k4a_playback_data_block_get_device_timestamp_usec(self._handle)) 25 | 26 | def get_buffer_size(self): 27 | return int(_k4arecord.k4a_playback_data_block_get_buffer_size(self._handle)) 28 | 29 | def get_buffer(self): 30 | if not self.is_valid(): 31 | return None 32 | 33 | return _k4arecord.k4a_playback_data_block_get_buffer(self._handle) 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 teamvisual 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4abt/body.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pykinect_azure.k4abt._k4abtTypes import K4ABT_JOINT_COUNT 4 | from pykinect_azure.k4abt.joint import Joint 5 | 6 | 7 | class Body: 8 | def __init__(self, skeleton_handle): 9 | if skeleton_handle: 10 | self._handle = skeleton_handle 11 | self.initialize() 12 | 13 | def json(self): 14 | return self._handle.__iter__() 15 | 16 | def numpy(self): 17 | return np.array([joint.numpy() for joint in self.joints]) 18 | 19 | def __del__(self): 20 | self.destroy() 21 | 22 | def is_valid(self): 23 | return self._handle 24 | 25 | def handle(self): 26 | return self._handle 27 | 28 | def destroy(self): 29 | if self.is_valid(): 30 | self._handle = None 31 | 32 | def initialize(self): 33 | joints = np.ndarray((K4ABT_JOINT_COUNT,), dtype=np.object_) 34 | 35 | for i in range(K4ABT_JOINT_COUNT): 36 | joints[i] = Joint(self._handle.skeleton.joints[i], i) 37 | 38 | self.joints = joints 39 | 40 | def __str__(self): 41 | """Print the current settings and a short explanation""" 42 | message = "\nBody:\n" + "".join(f"{joint.__str__()}" for joint in self.joints) 43 | 44 | return message 45 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -- General configuration 2 | 3 | extensions = [ 4 | "sphinx.ext.duration", 5 | "sphinx.ext.doctest", 6 | "sphinx.ext.autodoc", 7 | "sphinx.ext.autosummary", 8 | "sphinx.ext.intersphinx", 9 | "sphinx.ext.napoleon", 10 | "sphinx_search.extension", 11 | ] 12 | 13 | intersphinx_mapping = { 14 | "python": ("https://docs.python.org/3/", None), 15 | "sphinx": ("https://www.sphinx-doc.org/en/master/", None), 16 | } 17 | intersphinx_disabled_domains = ["std"] 18 | 19 | templates_path = ["_templates"] 20 | 21 | # -- Options for HTML output 22 | 23 | html_theme = "sphinx_rtd_theme" 24 | 25 | # -- Options for EPUB output 26 | epub_show_urls = "footnote" 27 | 28 | # sphinx apidoc 29 | apidoc_module_dir = "../../pykinect_recorder" 30 | apidoc_excluded_paths = ["tests"] 31 | 32 | # Napoleon settings 33 | napoleon_google_docstring = True 34 | napoleon_numpy_docstring = True 35 | napoleon_include_init_with_doc = False 36 | napoleon_include_private_with_doc = False 37 | napoleon_include_special_with_doc = True 38 | napoleon_use_admonition_for_examples = False 39 | napoleon_use_admonition_for_notes = False 40 | napoleon_use_admonition_for_references = False 41 | napoleon_use_ivar = False 42 | napoleon_use_param = True 43 | napoleon_use_rtype = True 44 | napoleon_preprocess_types = False 45 | napoleon_type_aliases = None 46 | napoleon_attr_annotations = True 47 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4abt/joint2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pykinect_azure.k4abt._k4abtTypes import K4ABT_JOINT_NAMES 3 | 4 | 5 | class Joint2d: 6 | def __init__(self, joint2d_handle, id): 7 | if joint2d_handle: 8 | self._handle = joint2d_handle 9 | self.position = joint2d_handle.position.xy 10 | self.confidence_level = joint2d_handle.confidence_level 11 | self.id = id 12 | self.name = self.get_name() 13 | 14 | def __del__(self): 15 | self.destroy() 16 | 17 | def numpy(self): 18 | return np.array([self.position.x, self.position.y]) 19 | 20 | def is_valid(self): 21 | return self._handle 22 | 23 | def handle(self): 24 | return self._handle 25 | 26 | def destroy(self): 27 | if self.is_valid(): 28 | self._handle = None 29 | 30 | def get_coordinates(self): 31 | return (int(self.position.x), int(self.position.y)) 32 | 33 | def get_name(self): 34 | return K4ABT_JOINT_NAMES[self.id] 35 | 36 | def __str__(self): 37 | """Print the current settings and a short explanation""" 38 | message = ( 39 | f"{self.name} Joint 2d info: \n" 40 | f"\tPixel: [{self.position.x},{self.position.y}]\n" 41 | f"\tconfidence: {self.confidence_level} \n\n" 42 | ) 43 | return message 44 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/sidebar_control.py: -------------------------------------------------------------------------------- 1 | from PySide6.QtCore import Slot 2 | from PySide6.QtWidgets import QStackedLayout, QFrame 3 | 4 | from .sidebar_solutions import SolutionSidebar 5 | from .sidebar_record_control import ViewerSidebar 6 | from .sidebar_explorer import ExplorerSidebar 7 | from ..signals import all_signals 8 | 9 | 10 | class StackedSidebar(QFrame): 11 | def __init__(self) -> None: 12 | super().__init__() 13 | self.setMaximumWidth(330) 14 | self.setContentsMargins(0, 0, 0, 0) 15 | self.main_layout = QStackedLayout() 16 | self.sidebar_viewer = ViewerSidebar() 17 | self.sidebar_explorer = ExplorerSidebar() 18 | self.sidebar_solution = SolutionSidebar() 19 | 20 | self.main_layout.addWidget(self.sidebar_viewer) 21 | self.main_layout.addWidget(self.sidebar_explorer) 22 | self.main_layout.addWidget(self.sidebar_solution) 23 | 24 | self.main_layout.setCurrentIndex(0) 25 | self.setLayout(self.main_layout) 26 | 27 | all_signals.option_signals.stacked_sidebar_status.connect(self.set_current_widget) 28 | 29 | @Slot(str) 30 | def set_current_widget(self, value): 31 | if value == "explorer": 32 | self.main_layout.setCurrentWidget(self.sidebar_explorer) 33 | elif value == "solution": 34 | self.main_layout.setCurrentWidget(self.sidebar_solution) 35 | else: 36 | self.main_layout.setCurrentWidget(self.sidebar_viewer) 37 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/viewer_control.py: -------------------------------------------------------------------------------- 1 | from PySide6.QtCore import Slot, Qt 2 | from PySide6.QtWidgets import QStackedLayout, QFrame, QHBoxLayout, QWidget 3 | 4 | from .viewer_sensors import SensorViewer 5 | from .viewer_playback import PlaybackViewer 6 | from .viewer_solution import ViewerSolution 7 | from ..signals import all_signals 8 | 9 | 10 | class StackedViewer(QFrame): 11 | def __init__(self) -> None: 12 | super().__init__() 13 | 14 | self.setMaximumHeight(2080) 15 | self.setMaximumWidth(2300) 16 | self.setContentsMargins(0, 0, 0, 0) 17 | self.main_layout = QStackedLayout() 18 | # self.main_layout.setAlignment(Qt.AlignTop | Qt.AlignLeft) 19 | self.main_viewer = SensorViewer() 20 | self.main_explorer = PlaybackViewer() 21 | self.main_solution = ViewerSolution() 22 | 23 | self.main_layout.addWidget(self.main_viewer) 24 | self.main_layout.addWidget(self.main_explorer) 25 | self.main_layout.addWidget(self.main_solution) 26 | 27 | self.main_layout.setCurrentIndex(0) 28 | self.setLayout(self.main_layout) 29 | 30 | all_signals.option_signals.stacked_sidebar_status.connect(self.set_current_widget) 31 | 32 | @Slot(str) 33 | def set_current_widget(self, value): 34 | if value == "explorer": 35 | self.main_layout.setCurrentWidget(self.main_explorer) 36 | elif value == "solution": 37 | self.main_layout.setCurrentWidget(self.main_solution) 38 | else: 39 | self.main_layout.setCurrentWidget(self.main_viewer) 40 | all_signals.option_signals.clear_frame.emit(True) 41 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "pykinect-recorder" 3 | version = "0.9.4" 4 | description = "" 5 | license = "MIT" 6 | authors = [ 7 | "Kyung-Su Kang ", 8 | "Young-Il Kim ", 9 | "Yeo-Reum Lee ", 10 | ] 11 | readme = ["README.md", "LICENSE"] 12 | packages = [{include = "pykinect_recorder"}] 13 | 14 | repository = "https://github.com/unerue/pykinect-recorder" 15 | keywords = ["azure", "kinect", "deep-learning", "computer-vision"] 16 | 17 | [tool.poetry.scripts] 18 | pykinect = "pykinect_recorder.cli.command:main" 19 | 20 | [tool.poetry.dependencies] 21 | python = "<3.12,>=3.9.1" 22 | pyside6 = "^6.4.2" 23 | open3d = "^0.17.0" 24 | pyqtdarktheme = "^2.1.0" 25 | opencv-python = "^4.7.0.72" 26 | matplotlib = "^3.7.1" 27 | sounddevice = "^0.4.6" 28 | soundfile = "^0.12.1" 29 | scikit-learn = "^1.2.2" 30 | qtawesome = "^1.2.3" 31 | pandas = "^2.0.2" 32 | superqt = "^0.4.1" 33 | 34 | [tool.poetry.group.dev.dependencies] 35 | pytest = "^7.2.2" 36 | flake8 = "^6.0.0" 37 | black = "^23.1.0" 38 | isort = "^5.12.0" 39 | mypy = "^1.1.1" 40 | pyinstaller = "^5.10.1" 41 | 42 | [tool.poetry.group.docs.dependencies] 43 | sphinx = "^4.5.0" 44 | sphinx-rtd-theme = "^1.0.0" 45 | readthedocs-sphinx-search = "^0.3.1" 46 | pyyaml = "^6.0" 47 | addict = "^2.4.0" 48 | tqdm = "^4.65.0" 49 | 50 | [build-system] 51 | requires = ["poetry-core"] 52 | build-backend = "poetry.core.masonry.api" 53 | 54 | [tool.pytest.ini_options] 55 | testpaths = ["tests"] 56 | 57 | [tool.black] 58 | line-length = 120 59 | target-version = ["py3.9", "py3.10"] 60 | exclude = """.venv|venv|.env""" 61 | 62 | [tool.isort] 63 | skip = [".gitignore"] 64 | 65 | [tool.flake8] 66 | exclude = ".venv" 67 | max-line-length = 120 68 | 69 | [tool.mypy] 70 | exclude = ["tests"] 71 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4abt/joint.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pykinect_azure.k4abt._k4abtTypes import K4ABT_JOINT_NAMES 3 | 4 | 5 | class Joint: 6 | def __init__(self, joint_handle, id): 7 | if joint_handle: 8 | self._handle = joint_handle 9 | self.position = joint_handle.position.xyz 10 | self.orientation = joint_handle.orientation.wxyz 11 | self.confidence_level = joint_handle.confidence_level 12 | self.id = id 13 | self.name = self.get_name() 14 | 15 | def __del__(self): 16 | self.destroy() 17 | 18 | def numpy(self): 19 | return np.array( 20 | [ 21 | self.position.x, 22 | self.position.y, 23 | self.position.z, 24 | self.orientation.w, 25 | self.orientation.x, 26 | self.orientation.y, 27 | self.orientation.z, 28 | self.confidence_level, 29 | ] 30 | ) 31 | 32 | def is_valid(self): 33 | return self._handle 34 | 35 | def handle(self): 36 | return self._handle 37 | 38 | def destroy(self): 39 | if self.is_valid(): 40 | self._handle = None 41 | 42 | def get_name(self): 43 | return K4ABT_JOINT_NAMES[self.id] 44 | 45 | def __str__(self): 46 | """Print the current settings and a short explanation""" 47 | message = ( 48 | f"{self.name} Joint info: \n" 49 | f"\tposition: [{self.position.x},{self.position.y},{self.position.z}]\n" 50 | f"\torientation: [{self.orientation.w},{self.orientation.x},{self.orientation.y},{self.orientation.z}]\n" 51 | f"\tconfidence: {self.confidence_level} \n\n" 52 | ) 53 | return message 54 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4a/imu_sample.py: -------------------------------------------------------------------------------- 1 | from ..utils import get_dict 2 | 3 | 4 | class ImuSample: 5 | def __init__(self, imu_sample_struct): 6 | self._struct = imu_sample_struct 7 | self.parse_data() 8 | 9 | def __del__(self): 10 | self.reset() 11 | 12 | def is_valid(self): 13 | return self._struct 14 | 15 | def struct(self): 16 | return self._struct 17 | 18 | def reset(self): 19 | if self.is_valid(): 20 | self._struct = None 21 | 22 | def parse_data(self): 23 | self.imu_sample_dict = get_dict(self._struct) 24 | 25 | # Convert the acc and gyro dicts to numpy array 26 | self.imu_sample_dict["acc_sample"] = self.imu_sample_dict["acc_sample"]["v"] 27 | self.imu_sample_dict["gyro_sample"] = self.imu_sample_dict["gyro_sample"]["v"] 28 | 29 | @property 30 | def temp(self): 31 | return self.get_temp() 32 | 33 | @property 34 | def acc(self): 35 | return self.get_acc() 36 | 37 | @property 38 | def acc_time(self): 39 | return self.get_acc_time() 40 | 41 | @property 42 | def gyro(self): 43 | return self.get_gyro() 44 | 45 | @property 46 | def gyro_time(self): 47 | return self.get_gyro_time() 48 | 49 | def get_temp(self): 50 | return self.imu_sample_dict["temperature"] 51 | 52 | def get_acc(self): 53 | return self.imu_sample_dict["acc_sample"] 54 | 55 | def get_acc_time(self): 56 | return self.imu_sample_dict["acc_timestamp_usec"] 57 | 58 | def get_gyro(self): 59 | return self.imu_sample_dict["gyro_sample"] 60 | 61 | def get_gyro_time(self): 62 | return self.imu_sample_dict["gyro_timestamp_usec"] 63 | 64 | def get_sample(self): 65 | return self.imu_sample_dict 66 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/signals.py: -------------------------------------------------------------------------------- 1 | from PySide6.QtCore import Signal, QObject 2 | from PySide6.QtGui import QImage 3 | 4 | 5 | class OptionSignals(QObject): 6 | # Sidebar stacked widget signals 7 | stacked_sidebar_status = Signal(str) 8 | camera_connect_status = Signal(bool) 9 | 10 | # Sidebar button signals 11 | sidebar_toggle = Signal(bool) 12 | camera_option = Signal(dict) 13 | device_option = Signal(str) 14 | color_option = Signal(str) 15 | 16 | device_serial_number = Signal(str) 17 | save_filepath = Signal(str) 18 | clear_frame = Signal(bool) 19 | 20 | 21 | class RecorderSignals(QObject): 22 | rgb_image = Signal(QImage) 23 | depth_image = Signal(QImage) 24 | ir_image = Signal(QImage) 25 | record_time = Signal(float) 26 | video_fps = Signal(int) 27 | imu_acc_data = Signal(list) 28 | imu_gyro_data = Signal(list) 29 | audio_data = Signal(list) 30 | is_sidebar_enable = Signal(bool) 31 | 32 | 33 | class PlaybackSignals(QObject): 34 | rgb_image = Signal(QImage) 35 | depth_image = Signal(QImage) 36 | ir_image = Signal(QImage) 37 | record_time = Signal(float) 38 | video_fps = Signal(int) 39 | imu_acc_data = Signal(list) 40 | imu_gyro_data = Signal(list) 41 | 42 | playback_filepath = Signal(str) 43 | time_control = Signal(int) 44 | time_value = Signal(int) 45 | 46 | # Video clipping signals 47 | clip_option = Signal(str) 48 | video_total_frame = Signal(int) 49 | current_frame_cnt = Signal(int) 50 | 51 | 52 | class AllSignals(QObject): 53 | """ 54 | This class manages all signals used throughout this project 55 | """ 56 | window_control = Signal(str) 57 | option_signals = OptionSignals() 58 | record_signals = RecorderSignals() 59 | playback_signals = PlaybackSignals() 60 | 61 | 62 | all_signals = AllSignals() 63 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/logger.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import logging 3 | 4 | import termcolor 5 | 6 | from ..pyk4a import __appname__ 7 | 8 | COLORS = { 9 | "WARNING": "yellow", 10 | "INFO": "white", 11 | "DEBUG": "blue", 12 | "CRITICAL": "red", 13 | "ERROR": "red", 14 | } 15 | 16 | 17 | class ColoredFormatter(logging.Formatter): 18 | def __init__(self, fmt, use_color=True): 19 | logging.Formatter.__init__(self, fmt) 20 | self.use_color = use_color 21 | 22 | def format(self, record): 23 | levelname = record.levelname 24 | if self.use_color and levelname in COLORS: 25 | 26 | def colored(text): 27 | return termcolor.colored( 28 | text, 29 | color=COLORS[levelname], 30 | attrs={"bold": True}, 31 | ) 32 | 33 | record.levelname2 = colored("{:<7}".format(record.levelname)) 34 | record.message2 = colored(record.msg) 35 | 36 | asctime2 = datetime.datetime.fromtimestamp(record.created) 37 | record.asctime2 = termcolor.colored(asctime2, color="green") 38 | 39 | record.module2 = termcolor.colored(record.module, color="cyan") 40 | record.funcName2 = termcolor.colored(record.funcName, color="cyan") 41 | record.lineno2 = termcolor.colored(record.lineno, color="cyan") 42 | return logging.Formatter.format(self, record) 43 | 44 | 45 | class ColoredLogger(logging.Logger): 46 | FORMAT = "[%(levelname2)s] %(module2)s:%(funcName2)s:%(lineno2)s - %(message2)s" 47 | 48 | def __init__(self, name): 49 | logging.Logger.__init__(self, name, logging.INFO) 50 | 51 | color_formatter = ColoredFormatter(self.FORMAT) 52 | 53 | console = logging.StreamHandler() 54 | console.setFormatter(color_formatter) 55 | 56 | self.addHandler(console) 57 | return 58 | 59 | 60 | logger = logging.getLogger(__appname__) 61 | logger.__class__ = ColoredLogger 62 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/viewer_audio.py: -------------------------------------------------------------------------------- 1 | from PySide6.QtCore import Qt, QSize 2 | from PySide6.QtWidgets import QFrame, QVBoxLayout, QSizePolicy 3 | from PySide6.QtCharts import QChart, QChartView, QLineSeries, QValueAxis 4 | 5 | from ..common_widgets import Label 6 | 7 | SAMPLE_COUNT = 10000 8 | 9 | 10 | class AudioSensor(QFrame): 11 | def __init__(self, min_size: tuple[int, int], max_size: tuple[int, int]) -> None: 12 | super().__init__() 13 | self.setMinimumSize(QSize(min_size[0], min_size[1])) 14 | self.setMaximumSize(QSize(max_size[0], max_size[1])) 15 | self.setContentsMargins(0, 0, 0, 0) 16 | self.setObjectName("AudioSensor") 17 | self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) 18 | 19 | self.main_layout = QVBoxLayout() 20 | self.main_layout.setSpacing(0) 21 | self.main_layout.setContentsMargins(0, 0, 0, 0) 22 | self.main_layout.setAlignment(Qt.AlignTop) 23 | 24 | self.label_title = Label("Audio Sensor", orientation=Qt.AlignCenter) 25 | self.label_title.setMinimumHeight(30) 26 | self.label_title.setMaximumHeight(50) 27 | 28 | self.series = QLineSeries() 29 | self.chart = QChart() 30 | self.chart.setTheme(QChart.ChartThemeDark) 31 | self.chart.addSeries(self.series) 32 | self.axis_x = QValueAxis() 33 | self.axis_x.setRange(0, SAMPLE_COUNT) 34 | self.axis_x.setLabelFormat("%g") 35 | self.axis_y = QValueAxis() 36 | self.axis_y.setRange(-1, 1) 37 | self.chart.setAxisX(self.axis_x, self.series) 38 | self.chart.setAxisY(self.axis_y, self.series) 39 | self.chart.legend().hide() 40 | 41 | self.chart_view = QChartView(self.chart) 42 | self.chart_view.setContentsMargins(0, 0, 0, 0) 43 | self.chart_view.setMinimumSize(QSize(min_size[0], min_size[1] - 30)) 44 | self.chart_view.setMaximumSize(QSize(max_size[0], (max_size[1] - 50))) 45 | 46 | self.main_layout.addWidget(self.label_title) 47 | self.main_layout.addWidget(self.chart_view) 48 | self.setLayout(self.main_layout) 49 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4arecord/record_configuration.py: -------------------------------------------------------------------------------- 1 | from ..k4a import _k4a 2 | 3 | 4 | class RecordConfiguration: 5 | def __init__(self, configuration_handle=None): 6 | self._handle = configuration_handle 7 | 8 | def handle(self): 9 | return self._handle 10 | 11 | def __getattr__(self, name): 12 | """Pass the handle parameter, when asked""" 13 | 14 | if name == "_handle": 15 | return self.__dict__[name] 16 | else: 17 | return self._handle.__dict__[name] 18 | 19 | def __str__(self): 20 | """Print the current settings and a short explanation""" 21 | message = ( 22 | "Record configuration: \n" 23 | f"\tcolor_format: {self._handle.color_format} \n\t(0:JPG, 1:NV12, 2:YUY2, 3:BGRA32)\n\n" 24 | f"\tcolor_resolution: {self._handle.color_resolution} \n\t(0:OFF, 1:720p, 2:1080p, 3:1440p, 4:1536p, 5:2160p, 6:3072p)\n\n" 25 | f"\tdepth_mode: {self._handle.depth_mode} \n\t(0:OFF, 1:NFOV_2X2BINNED, 2:NFOV_UNBINNED,3:WFOV_2X2BINNED, 4:WFOV_UNBINNED, 5:Passive IR)\n\n" 26 | f"\tcamera_fps: {self._handle.camera_fps} \n\t(0:5 FPS, 1:15 FPS, 2:30 FPS)\n\n" 27 | f"\tcolor_track_enabled: {self._handle.color_track_enabled} \n\t(True of False). If Color camera images exist\n\n" 28 | f"\tdepth_track_enabled: {self._handle.depth_track_enabled} \n\t(True of False). If Depth camera images exist\n\n" 29 | f"\tir_track_enabled: {self._handle.ir_track_enabled} \n\t(True of False). If IR camera images exist\n\n" 30 | f"\timu_track_enabled: {self._handle.imu_track_enabled} \n\t(True of False). If IMU samples exist\n\n" 31 | f"\tdepth_delay_off_color_usec: {self._handle.depth_delay_off_color_usec} us. \n\tDelay between the color image and the depth image\n\n" 32 | f"\twired_sync_mode: {self._handle.wired_sync_mode}\n\t(0:Standalone mode, 1:Master mode, 2:Subordinate mode)\n\n" 33 | f"\tsubordinate_delay_off_master_usec: {self._handle.subordinate_delay_off_master_usec} us.\n\tThe external synchronization timing.\n\n" 34 | f"\tstart_timestamp_offset_usec: {self._handle.start_timestamp_offset_usec} us. \n\tStart timestamp offset.\n\n" 35 | ) 36 | return message 37 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/pykinect.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | 3 | from .k4a import _k4a, Device, default_configuration 4 | from .k4arecord import _k4arecord 5 | from .k4arecord.playback import Playback 6 | from .utils import ( 7 | get_k4a_module_path, 8 | get_k4abt_module_path, 9 | get_k4arecord_module_path, 10 | ) 11 | 12 | 13 | def initialize_libraries(module_k4a_path=None, module_k4abt_path=None, track_body=False) -> bool: 14 | # Search the module path for k4a if not available 15 | if module_k4a_path is None: 16 | module_k4a_path = get_k4a_module_path() 17 | 18 | module_k4arecord_path = get_k4arecord_module_path(module_k4a_path) 19 | _flag = True 20 | try: 21 | # Initialize k4a related wrappers 22 | init_k4a(module_k4a_path) 23 | 24 | # Initialize k4arecord related wrappers 25 | init_k4arecord(module_k4arecord_path) 26 | 27 | # if track_body: 28 | # # Search the module path for k4abt if not available 29 | # if module_k4abt_path is None: 30 | # module_k4abt_path = get_k4abt_module_path() 31 | 32 | # # Initialize k4abt related wrappers 33 | # init_k4abt(module_k4abt_path) 34 | except: 35 | print("Can not setting .dll") 36 | _flag = False 37 | 38 | finally: 39 | return _flag 40 | 41 | 42 | def init_k4a(module_k4a_path): 43 | _k4a.setup_library(module_k4a_path) 44 | 45 | 46 | # def init_k4abt(module_k4abt_path): 47 | # _k4abt.setup_library(module_k4abt_path) 48 | 49 | 50 | def init_k4arecord(module_k4arecord_path): 51 | _k4arecord.setup_library(module_k4arecord_path) 52 | 53 | 54 | def start_device( 55 | device_index=0, 56 | config=default_configuration, 57 | record=False, 58 | record_filepath="output.mkv", 59 | ): 60 | # Create device object 61 | device = Device(device_index) 62 | 63 | # Start device 64 | device.start(config, record, record_filepath) 65 | 66 | return device 67 | 68 | # def start_body_tracker(model_type=_k4abt.K4ABT_DEFAULT_MODEL, calibration=None): 69 | # if calibration: 70 | # return Tracker(calibration, model_type) 71 | # else: 72 | # return Tracker(Device.calibration, model_type) 73 | 74 | 75 | def start_playback(filepath): 76 | return Playback(filepath) 77 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/statusbar.py: -------------------------------------------------------------------------------- 1 | import os 2 | import platform 3 | 4 | from PySide6.QtCore import QSize, Qt, Slot 5 | from PySide6.QtWidgets import QFrame, QHBoxLayout, QSizeGrip 6 | 7 | from ..common_widgets import Label 8 | from ..signals import all_signals 9 | 10 | class StatusBar(QFrame): 11 | def __init__(self): 12 | super().__init__() 13 | self.setFixedHeight(30) 14 | self.setContentsMargins(0, 0, 0, 0) 15 | self.setStyleSheet(" background-color: #007acc; border-radius: 0px;") 16 | 17 | self.main_layout = QHBoxLayout() 18 | self.main_layout.setSpacing(0) 19 | self.main_layout.setContentsMargins(0, 0, 0, 0) 20 | 21 | self.user_name = os.getenv("USERNAME") 22 | if platform.system().startswith("Windows"): 23 | self.base_path = os.path.join("C", "Users", self.user_name, "Videos") 24 | else: 25 | self.base_path = os.path.join("home", self.user_name, "Videos") 26 | 27 | self.sub_data_layout = QHBoxLayout() 28 | self.label_save_path = Label(f"Save dir: {self.base_path}", fontsize=12) 29 | self.sub_data_layout.addWidget(self.label_save_path) 30 | self.sub_data_layout.setAlignment(Qt.AlignLeft) 31 | self.main_layout.addLayout(self.sub_data_layout) 32 | 33 | self.frame_size_layout = QHBoxLayout() 34 | self.frame_size_grip = QFrame() 35 | self.frame_size_grip.setFixedWidth(30) 36 | self.frame_size_grip.setObjectName("frame_size_grip") 37 | self.frame_size_grip.setMinimumSize(QSize(30, 10)) 38 | self.frame_size_grip.setMaximumSize(QSize(30, 16777215)) 39 | self.frame_size_grip.setFrameShape(QFrame.NoFrame) 40 | self.frame_size_grip.setFrameShadow(QFrame.Raised) 41 | self.frame_size_layout.addWidget(self.frame_size_grip) 42 | self.frame_size_layout.setAlignment(Qt.AlignRight) 43 | 44 | self.sizegrip = QSizeGrip(self.frame_size_grip) 45 | self.sizegrip.setStyleSheet("width: 30px; height: 30px; margin 0px; padding: 0px;") 46 | self.main_layout.addLayout(self.frame_size_layout) 47 | self.setLayout(self.main_layout) 48 | 49 | all_signals.option_signals.save_filepath.connect(self.set_save_path) 50 | 51 | @Slot(str) 52 | def set_save_path(self, value): 53 | self.label_save_path.setText("save path: " + value) -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4arecord/record.py: -------------------------------------------------------------------------------- 1 | from ..k4arecord import _k4arecord 2 | 3 | 4 | class Record: 5 | def __init__(self, device_handle, device_configuration, filepath): 6 | self.record_handle = _k4arecord.k4a_record_t() 7 | self.header_written = False 8 | 9 | self.create_recording(device_handle, device_configuration, filepath) 10 | 11 | def __del__(self): 12 | self.flush() 13 | self.close() 14 | 15 | def create_recording(self, device_handle, device_configuration, filepath): 16 | _k4arecord.VERIFY( 17 | _k4arecord.k4a_record_create( 18 | filepath.encode("utf-8"), 19 | device_handle, 20 | device_configuration, 21 | self.record_handle, 22 | ), 23 | "Failed to create recording!", 24 | ) 25 | 26 | def is_valid(self): 27 | return self.record_handle != None 28 | 29 | def close(self): 30 | if self.is_valid(): 31 | _k4arecord.k4a_record_close(self.record_handle) 32 | self.record_handle = None 33 | 34 | def flush(self): 35 | if self.is_valid(): 36 | _k4arecord.VERIFY(_k4arecord.k4a_record_flush(self.record_handle), "Failed to flush!") 37 | 38 | def write_header(self): 39 | if self.is_valid(): 40 | _k4arecord.VERIFY( 41 | _k4arecord.k4a_record_write_header(self.record_handle), 42 | "Failed to write header!", 43 | ) 44 | 45 | def write_imu(self, imu_sample): 46 | if self.is_valid(): 47 | _k4arecord.VERIFY( 48 | _k4arecord.k4a_record_write_imu_sample(self.record_handle, imu_sample), 49 | "Failed to write imu!", 50 | ) 51 | 52 | def write_capture(self, capture_handle): 53 | if not self.is_valid(): 54 | raise NameError("Recording not found") 55 | if not self.header_written: 56 | self.write_header() 57 | self.header_written = True 58 | _k4arecord.VERIFY( 59 | _k4arecord.k4a_record_write_capture(self.record_handle, capture_handle), 60 | "Failed to write capture!", 61 | ) 62 | 63 | def add_imu_track(self): 64 | _k4arecord.VERIFY( 65 | _k4arecord.k4a_record_add_imu_track(self.record_handle), 66 | "Failed to add imu track!", 67 | ) 68 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4arecord/_k4arecordTypes.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | 3 | 4 | # K4A_DECLARE_HANDLE(k4a_record_t); 5 | class _handle_k4a_record_t(ctypes.Structure): 6 | _fields_ = [ 7 | ("_rsvd", ctypes.c_size_t), 8 | ] 9 | 10 | 11 | # streaming frame record 12 | k4a_record_t = ctypes.POINTER(_handle_k4a_record_t) 13 | 14 | 15 | # K4A_DECLARE_HANDLE(k4a_playback_t); 16 | class _handle_k4a_playback_t(ctypes.Structure): 17 | _fields_ = [ 18 | ("_rsvd", ctypes.c_size_t), 19 | ] 20 | 21 | 22 | # watching video which recorded by k4a_record 23 | k4a_playback_t = ctypes.POINTER(_handle_k4a_playback_t) 24 | 25 | 26 | # K4A_DECLARE_HANDLE(k4a_playback_data_block_t); 27 | class _handle_k4a_playback_data_block_t(ctypes.Structure): 28 | _fields_ = [ 29 | ("_rsvd", ctypes.c_size_t), 30 | ] 31 | 32 | 33 | # Handle to a block of data read from k4a_playback 34 | k4a_playback_data_block_t = ctypes.POINTER(_handle_k4a_playback_data_block_t) 35 | 36 | 37 | # class k4a_stream_result_t(CtypeIntEnum): 38 | k4a_stream_result_t = ctypes.c_int 39 | K4A_STREAM_RESULT_SUCCEEDED = 0 40 | K4A_STREAM_RESULT_FAILED = 1 41 | K4A_STREAM_RESULT_EOF = 2 42 | 43 | # handling record video 44 | k4a_playback_seek_origin_t = ctypes.c_int 45 | K4A_PLAYBACK_SEEK_BEGIN = 0 46 | K4A_PLAYBACK_SEEK_END = 1 47 | K4A_PLAYBACK_SEEK_DEVICE_TIME = 2 48 | 49 | 50 | class _k4a_record_configuration_t(ctypes.Structure): 51 | _fields_ = [ 52 | ("color_format", ctypes.c_int), 53 | ("color_resolution", ctypes.c_int), 54 | ("depth_mode", ctypes.c_int), 55 | ("camera_fps", ctypes.c_int), 56 | ("color_track_enabled", ctypes.c_bool), 57 | ("depth_track_enabled", ctypes.c_bool), 58 | ("ir_track_enabled", ctypes.c_bool), 59 | ("imu_track_enabled", ctypes.c_bool), 60 | ("depth_delay_off_color_usec", ctypes.c_int32), 61 | ("wired_sync_mode", ctypes.c_int), 62 | ("subordinate_delay_off_master_usec", ctypes.c_uint32), 63 | ("start_timestamp_offset_usec", ctypes.c_uint32), 64 | ] 65 | 66 | 67 | k4a_record_configuration_t = _k4a_record_configuration_t 68 | 69 | 70 | class _k4a_record_video_settings_t(ctypes.Structure): 71 | _fields_ = [ 72 | ("width", ctypes.c_ulong), 73 | ("height", ctypes.c_ulong), 74 | ("frame_rate", ctypes.c_ulong), 75 | ] 76 | 77 | 78 | k4a_record_video_settings_t = _k4a_record_video_settings_t 79 | 80 | 81 | class _k4a_record_subtitle_settings_t(ctypes.Structure): 82 | _fields_ = [ 83 | ("high_freq_data", ctypes.c_bool), 84 | ] 85 | 86 | 87 | k4a_record_subtitle_settings_t = _k4a_record_subtitle_settings_t 88 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

pykinect-recorder

2 | 3 |
4 | 5 | 6 | 7 |
8 | 9 | 13 | 14 |
15 | 16 |
17 | 18 |
19 | 20 |
21 | 22 | 23 | ## Description 24 | The pykinect-recorder is an educational/industrial library that provides sensor recording (including audio), playback, and computer vision soultions through a python wrapper of the Azure Kinect Sensor SDK. 25 | 26 | Recording and playback example below. 27 | 28 | 29 | 30 | 31 | ## Documentation 32 | 33 | You can find the API documentation on our website: https://pykinect-recorder.readthedocs.io/en/latest/index.html. 34 | 35 | For details about API for Azure Kinect SDK please see Azure Kinect Sensor SDK github: https://github.com/microsoft/Azure-Kinect-Sensor-SDK. 36 | 37 | 38 | ## Features 39 | 40 | - [x] See RGB, IR, Depth, IMU and Audio data when recoding. 41 | - [x] Control recording option (FPS, brightness, ...). 42 | - [x] Change layout with drag and drop. 43 | - [x] Playback recorded video. 44 | - [ ] 3D reconstruction viewer with streaming/recorded video. 45 | - [ ] Sync devices 46 | - [ ] screen zoom in-out 47 | - [ ] imu, microphone panel redesign 48 | - [ ] Recording audio. 49 | - [ ] Deep learning inference (mediapipe and native) with streaming/recorded video. 50 | - [ ] Intel RealSense 51 | - [ ] Zenmuse SDK for Python 52 | 53 | 54 | ## Prerequisites 55 | 56 | ### Environment 57 | - Windows 10 (Recommended) 58 | - Windows 11 59 | 60 | ### Install Azure Kinect SDK 61 | - Make sure you download Azure Kinect SDK before using this repo. 62 | - SDK version '1.4.1' supported in release 0.9.3. 63 | - You can download Azure Kinect SDK [here](https://github.com/microsoft/Azure-Kinect-Sensor-SDK/blob/develop/docs/usage.md). 64 | 65 | 66 | ## Installation 67 | 68 | ### Using pip 69 | ```bash 70 | python -m venv .venv 71 | .venv/Scripts/activate.ps1 72 | pip install pykinect-recorder 73 | pykinect 74 | ``` 75 | 76 | ### Using .exe 77 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/synology_utils.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | 4 | from synology_api import downloadstation, filestation 5 | 6 | 7 | def upload(args: argparse.ArgumentParser): 8 | try: 9 | f1 = filestation.FileStation( 10 | args.Synology_Ip, 11 | args.Synology_Port, 12 | args.Username, 13 | args.Password, 14 | args.secure, 15 | args.cert_verify, 16 | args.dsm_version, 17 | args.debug, 18 | args.otp_code, 19 | ) 20 | 21 | success = f1.upload_file(args.dest_path, args.file_path, verify=args.cert_verify) 22 | if type(success) == tuple: 23 | raise ConnectionAbortedError 24 | else: 25 | print(success) 26 | 27 | except ValueError: 28 | print("Connect Failed!! Please Check your ip, port, username, password, secure") 29 | 30 | except FileNotFoundError: 31 | print("file path does not exists!!! Please Check before run") 32 | 33 | except ConnectionAbortedError: 34 | print("dest path does not match!!! Please Check before run") 35 | 36 | 37 | def download(args: argparse.ArgumentParser): 38 | pass 39 | 40 | 41 | if __name__ == "__main__": 42 | parser = argparse.ArgumentParser( 43 | prog="Synology Nas Upload/Download", 44 | description="Uploading video and audio which is recoded by Azure Kinect", 45 | ) 46 | 47 | ## Synology nas 48 | parser.add_argument( 49 | "--Synology_Ip", 50 | type=str, 51 | default="hnvlab.synology.me", 52 | help="Set Ip address such as xxx.synology.me or 111.111.1.1", 53 | ) 54 | parser.add_argument("--Synology_Port", type=str, default="", help="") 55 | parser.add_argument("--Username", type=str, default="", help="") 56 | parser.add_argument("--Password", type=str, default="", help="") 57 | parser.add_argument("--secure", type=bool, default=True, help="Set True if https is required") 58 | parser.add_argument( 59 | "--cert_verify", 60 | type=bool, 61 | default=True, 62 | help="Set True if you want to verify your certificate", 63 | ) 64 | parser.add_argument("--dsm_version", type=int, default=7) 65 | parser.add_argument("--debug", type=bool, default=True) 66 | parser.add_argument("--otp_code", type=str, default=None) 67 | 68 | parser.add_argument( 69 | "--dest_path", 70 | type=str, 71 | default="/dataset/test", 72 | help="Destination path in synology nas", 73 | ) 74 | parser.add_argument("--file_path", type=str, default="ecord_with_mic.py", help="Upload file path") 75 | 76 | args = parser.parse_args() 77 | # upload(args) 78 | download(args) 79 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/playback_sensors.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | from PySide6.QtCore import Qt, QTimer, QThread 4 | from PySide6.QtGui import QImage 5 | from ...pyk4a.k4arecord.playback import Playback 6 | from ...pyk4a.utils import colorize 7 | from ..signals import all_signals 8 | 9 | 10 | class PlaybackSensors(QThread): 11 | def __init__(self, playback: Playback) -> None: 12 | super().__init__() 13 | self.playback = playback 14 | dict_fps = {0: "5", 1: "15", 2: "30"} 15 | self.device_fps = int(dict_fps[self.playback.get_record_configuration()._handle.camera_fps]) 16 | 17 | self.timer = QTimer() 18 | self.timer.setInterval(1000 / self.device_fps) 19 | self.timer.timeout.connect(self.run) 20 | all_signals.playback_signals.time_control.connect(self.change_timestamp) 21 | 22 | def change_timestamp(self, time: int): 23 | self.playback.seek_timestamp(time) 24 | self.update_next_frame() 25 | 26 | def update_next_frame(self): 27 | try: 28 | _, current_frame = self.playback.update() 29 | current_imu_data = self.playback.get_next_imu_sample() 30 | current_rgb_frame = current_frame.get_color_image() 31 | current_depth_frame = current_frame.get_colored_depth_image() 32 | current_ir_frame = current_frame.get_ir_image() 33 | 34 | if current_rgb_frame[0]: 35 | rgb_frame = cv2.cvtColor(current_rgb_frame[1], cv2.COLOR_BGR2RGB) 36 | h, w, ch = rgb_frame.shape 37 | rgb_frame = QImage(rgb_frame, w, h, ch * w, QImage.Format_RGB888) 38 | all_signals.playback_signals.rgb_image.emit(rgb_frame) 39 | 40 | if current_depth_frame[0]: 41 | depth_frame = colorize(current_depth_frame[1], (None, 5000), cv2.COLORMAP_HSV) 42 | h, w, ch = depth_frame.shape 43 | depth_frame = QImage(depth_frame, w, h, w * ch, QImage.Format_RGB888) 44 | all_signals.playback_signals.depth_image.emit(depth_frame) 45 | 46 | if current_ir_frame[0]: 47 | ir_frame = colorize(current_ir_frame[1], (None, 5000), cv2.COLORMAP_BONE) 48 | h, w, ch = ir_frame.shape 49 | ir_frame = QImage(ir_frame, w, h, w * ch, QImage.Format_RGB888) 50 | all_signals.playback_signals.ir_image.emit(ir_frame) 51 | 52 | acc_time = current_imu_data.acc_time 53 | acc_data = current_imu_data.acc 54 | gyro_data = current_imu_data.gyro 55 | 56 | all_signals.playback_signals.video_fps.emit(int(self.device_fps)) 57 | all_signals.playback_signals.record_time.emit(acc_time / 1e6) 58 | all_signals.playback_signals.imu_acc_data.emit(acc_data) 59 | all_signals.playback_signals.imu_gyro_data.emit(gyro_data) 60 | except: 61 | self.timer.stop() 62 | 63 | def run(self): 64 | all_signals.playback_signals.time_value.emit(1e6//self.device_fps) 65 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/split_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import sys 4 | import pandas as pd 5 | from glob import glob 6 | from pathlib import Path 7 | from pykinect_recorder.pyk4a.pykinect import initialize_libraries 8 | from pykinect_recorder.pyk4a.k4arecord import Playback 9 | from pykinect_recorder.pyk4a.k4arecord._k4arecord import K4A_PLAYBACK_SEEK_BEGIN 10 | 11 | 12 | def colorize( 13 | image, 14 | clipping_range, 15 | colormap, 16 | ): 17 | if clipping_range[0] or clipping_range[1]: 18 | img = image.clip(clipping_range[0], clipping_range[1]) # type: ignore 19 | else: 20 | img = image.copy() 21 | img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U) 22 | img = cv2.applyColorMap(img, colormap) 23 | return img 24 | 25 | 26 | if __name__ == "__main__": 27 | initialize_libraries() 28 | # with open("dataset_list.txt", "r", encoding="utf-8") as f: 29 | # file_paths = f.readlines() 30 | file_paths = glob(os.path.join(Path.home(), "Desktop/baby", "*.mkv")) 31 | root_path = "datas" 32 | if not os.path.exists(root_path): 33 | os.mkdir(root_path) 34 | print(len(file_paths)) 35 | 36 | for i, file_path in enumerate(file_paths): 37 | print(i, file_path) 38 | file_name = file_path.split("\\")[-1][:-4] 39 | os.makedirs(os.path.join(root_path, file_name, "rgb"), exist_ok=True) 40 | os.makedirs(os.path.join(root_path, file_name, "ir"), exist_ok=True) 41 | playback = Playback(file_path) 42 | playback.seek_timestamp(offset=333555, origin=K4A_PLAYBACK_SEEK_BEGIN) 43 | cnt = 0 44 | # frame = 100 45 | color_h, color_w = None, None 46 | depth_h, depth_w = None, None 47 | 48 | while True: 49 | ret, current_frame = playback.update() 50 | if ret: 51 | current_rgb_frame = current_frame.get_color_image() 52 | current_ir_frame = current_frame.get_ir_image() 53 | if current_ir_frame[0]: 54 | ir_frame = colorize(current_ir_frame[1], (None, 5000), cv2.COLORMAP_BONE) 55 | depth_h, depth_w, _ = ir_frame.shape 56 | cv2.imwrite( 57 | os.path.join(root_path, file_name, "ir", f"{file_name}_ir_{str(cnt).zfill(6)}.png"), 58 | ir_frame, 59 | ) 60 | 61 | if current_rgb_frame[0]: 62 | rgb_frame = current_rgb_frame[1] 63 | color_h, color_w, _ = rgb_frame.shape 64 | cv2.imwrite( 65 | os.path.join(root_path, file_name, "rgb", f"{file_name}_rgb_{str(cnt).zfill(6)}.jpg"), 66 | rgb_frame, 67 | [cv2.IMWRITE_JPEG_QUALITY, 100], 68 | ) 69 | cnt += 1 70 | else: 71 | break 72 | 73 | df = pd.read_csv("metadata.csv") 74 | length = df.shape[0] 75 | df.loc[length] = [file_name, 333555, cnt, f"'({color_h}, {color_w})'", f"'({depth_h}, {depth_w})'"] 76 | df.to_csv("metadata.csv", index=False) 77 | print(file_name) 78 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/viewer_imu_sensors.py: -------------------------------------------------------------------------------- 1 | from PySide6.QtCore import Qt, QSize 2 | from PySide6.QtWidgets import QFrame, QVBoxLayout, QHBoxLayout, QGridLayout, QSizePolicy 3 | 4 | from ..common_widgets import Label 5 | 6 | 7 | class ImuSensors(QFrame): 8 | def __init__(self, min_size: tuple[int, int], max_size: tuple[int, int]) -> None: 9 | super().__init__() 10 | self.setMinimumSize(QSize(min_size[0], min_size[1])) 11 | self.setMaximumSize(QSize(max_size[0], max_size[1])) 12 | self.setContentsMargins(0, 0, 0, 0) 13 | self.setObjectName("IMUSensor") 14 | self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) 15 | 16 | self.main_layout = QVBoxLayout() 17 | self.main_layout.setSpacing(0) 18 | self.main_layout.setContentsMargins(0, 0, 0, 0) 19 | self.main_layout.setAlignment(Qt.AlignTop) 20 | 21 | self.label_title = Label("IMU Sensor", orientation=Qt.AlignCenter) 22 | self.label_title.setMinimumHeight(30) 23 | self.label_title.setMaximumHeight(50) 24 | 25 | self.grid_layout = QGridLayout() 26 | self.speed_layout = QHBoxLayout() 27 | self.speed_layout.setSpacing(5) 28 | self.speed_layout.setContentsMargins(0, 0, 0, 0) 29 | 30 | self.label_time = Label("Time(s) : ") 31 | self.label_time.setMinimumHeight(30) 32 | self.label_time.setMaximumHeight(50) 33 | 34 | self.label_fps = Label("FPS : ") 35 | self.label_fps.setMinimumHeight(30) 36 | self.label_fps.setMaximumHeight(50) 37 | 38 | self.speed_layout.addWidget(self.label_time) 39 | self.speed_layout.addWidget(self.label_fps) 40 | 41 | self.acc_layout = QVBoxLayout() 42 | self.acc_layout.setSpacing(5) 43 | self.acc_layout.setContentsMargins(0, 0, 0, 0) 44 | 45 | self.label_acc_title = Label("Accelerometer") 46 | self.label_acc_x = Label("X : ") 47 | self.label_acc_y = Label("Y : ") 48 | self.label_acc_z = Label("Z : ") 49 | 50 | self.gyro_layout = QVBoxLayout() 51 | self.gyro_layout.setSpacing(5) 52 | self.gyro_layout.setContentsMargins(0, 0, 0, 0) 53 | 54 | self.label_gyro_title = Label("Gyroscope") 55 | self.label_gyro_x = Label("X : ") 56 | self.label_gyro_y = Label("Y : ") 57 | self.label_gyro_z = Label("Z : ") 58 | 59 | self.acc_layout.addWidget(self.label_acc_title) 60 | self.acc_layout.addWidget(self.label_acc_x) 61 | self.acc_layout.addWidget(self.label_acc_y) 62 | self.acc_layout.addWidget(self.label_acc_z) 63 | 64 | self.gyro_layout.addWidget(self.label_gyro_title) 65 | self.gyro_layout.addWidget(self.label_gyro_x) 66 | self.gyro_layout.addWidget(self.label_gyro_y) 67 | self.gyro_layout.addWidget(self.label_gyro_z) 68 | 69 | self.main_layout.addWidget(self.label_title) 70 | self.main_layout.addLayout(self.speed_layout) 71 | self.main_layout.addWidget(self.label_time) 72 | self.main_layout.addLayout(self.acc_layout) 73 | self.main_layout.addWidget(Label()) 74 | self.main_layout.addLayout(self.gyro_layout) 75 | 76 | self.setLayout(self.main_layout) 77 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4abt/body2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | from pykinect_azure.k4abt.joint2d import Joint2d 5 | from pykinect_azure.k4abt._k4abtTypes import K4ABT_JOINT_COUNT, K4ABT_SEGMENT_PAIRS 6 | from pykinect_azure.k4abt._k4abtTypes import ( 7 | k4abt_skeleton2D_t, 8 | k4abt_body2D_t, 9 | body_colors, 10 | ) 11 | from pykinect_azure.k4a._k4atypes import K4A_CALIBRATION_TYPE_DEPTH 12 | 13 | 14 | class Body2d: 15 | def __init__(self, body2d_handle): 16 | if body2d_handle: 17 | self._handle = body2d_handle 18 | self.id = body2d_handle.id 19 | self.initialize_skeleton() 20 | 21 | def __del__(self): 22 | self.destroy() 23 | 24 | def json(self): 25 | return self._handle.__iter__() 26 | 27 | def numpy(self): 28 | return np.array([joint.numpy() for joint in self.joints]) 29 | 30 | def is_valid(self): 31 | return self._handle 32 | 33 | def handle(self): 34 | return self._handle 35 | 36 | def destroy(self): 37 | if self.is_valid(): 38 | self._handle = None 39 | 40 | def initialize_skeleton(self): 41 | joints = np.ndarray((K4ABT_JOINT_COUNT,), dtype=np.object_) 42 | 43 | for i in range(K4ABT_JOINT_COUNT): 44 | joints[i] = Joint2d(self._handle.skeleton.joints2D[i], i) 45 | 46 | self.joints = joints 47 | 48 | def draw(self, image, only_segments=False): 49 | color = ( 50 | int(body_colors[self.id][0]), 51 | int(body_colors[self.id][1]), 52 | int(body_colors[self.id][2]), 53 | ) 54 | 55 | for segmentId in range(len(K4ABT_SEGMENT_PAIRS)): 56 | segment_pair = K4ABT_SEGMENT_PAIRS[segmentId] 57 | point1 = self.joints[segment_pair[0]].get_coordinates() 58 | point2 = self.joints[segment_pair[1]].get_coordinates() 59 | 60 | if (point1[0] == 0 and point1[1] == 0) or (point2[0] == 0 and point2[1] == 0): 61 | continue 62 | image = cv2.line(image, point1, point2, color, 2) 63 | 64 | if only_segments: 65 | return image 66 | 67 | for joint in self.joints: 68 | image = cv2.circle(image, joint.get_coordinates(), 3, color, 3) 69 | 70 | return image 71 | 72 | @staticmethod 73 | def create(body_handle, calibration, bodyIdx, dest_camera): 74 | skeleton2d_handle = k4abt_skeleton2D_t() 75 | body2d_handle = k4abt_body2D_t() 76 | 77 | for jointID, joint in enumerate(body_handle.skeleton.joints): 78 | skeleton2d_handle.joints2D[jointID].position = calibration.convert_3d_to_2d( 79 | joint.position, K4A_CALIBRATION_TYPE_DEPTH, dest_camera 80 | ) 81 | skeleton2d_handle.joints2D[jointID].confidence_level = joint.confidence_level 82 | 83 | body2d_handle.skeleton = skeleton2d_handle 84 | body2d_handle.id = bodyIdx 85 | 86 | return Body2d(body2d_handle) 87 | 88 | def __str__(self): 89 | """Print the current settings and a short explanation""" 90 | message = f"Body Id: {self.id}\n\n" 91 | 92 | for joint in self.joints: 93 | message += str(joint) 94 | 95 | return message 96 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4abt/tracker.py: -------------------------------------------------------------------------------- 1 | from pykinect_azure.k4abt import _k4abt 2 | from pykinect_azure.k4abt.frame import Frame 3 | from pykinect_azure.k4abt._k4abtTypes import k4abt_tracker_default_configuration 4 | from pykinect_azure.k4a.device import Device 5 | from pykinect_azure.k4a._k4atypes import K4A_WAIT_INFINITE 6 | from pykinect_azure.utils import get_k4abt_lite_model_path 7 | 8 | 9 | class Tracker: 10 | def __init__(self, calibration, model_type): 11 | self.calibration = calibration 12 | self._handle = self.create(model_type) 13 | self.frame = None 14 | 15 | def __del__(self): 16 | self.destroy() 17 | 18 | def is_valid(self): 19 | return self._handle 20 | 21 | def is_frame_initialized(self): 22 | return self.frame 23 | 24 | def handle(self): 25 | return self._handle 26 | 27 | def destroy(self): 28 | if self.is_valid(): 29 | _k4abt.k4abt_tracker_destroy(self._handle) 30 | self._handle = None 31 | 32 | def update(self, capture=None, timeout_in_ms=K4A_WAIT_INFINITE): 33 | # Add capture to the body tracker processing queue 34 | if capture: 35 | self.enqueue_capture(capture.handle(), timeout_in_ms) 36 | else: 37 | self.enqueue_capture(Device.capture.handle(), timeout_in_ms) 38 | 39 | return self.pop_result(timeout_in_ms) 40 | 41 | def enqueue_capture(self, capture_handle, timeout_in_ms=K4A_WAIT_INFINITE): 42 | _k4abt.VERIFY( 43 | _k4abt.k4abt_tracker_enqueue_capture(self._handle, capture_handle, timeout_in_ms), 44 | "Body tracker capture enqueue failed!", 45 | ) 46 | 47 | def pop_result(self, timeout_in_ms=K4A_WAIT_INFINITE): 48 | if self.is_frame_initialized(): 49 | self.frame.release() 50 | _k4abt.VERIFY( 51 | _k4abt.k4abt_tracker_pop_result(self._handle, self.frame.handle(), timeout_in_ms), 52 | "Body tracker get body frame failed!", 53 | ) 54 | else: 55 | frame_handle = _k4abt.k4abt_frame_t() 56 | _k4abt.VERIFY( 57 | _k4abt.k4abt_tracker_pop_result(self._handle, frame_handle, timeout_in_ms), 58 | "Body tracker get body frame failed!", 59 | ) 60 | self.frame = Frame(frame_handle, self.calibration) 61 | 62 | return self.frame 63 | 64 | def set_temporal_smoothing(self, smoothing_factor): 65 | _k4abt.k4abt_tracker_set_temporal_smoothing(self._handle, smoothing_factor) 66 | 67 | def shutdown(self): 68 | _k4abt.k4abt_tracker_shutdown(self._handle) 69 | 70 | def create(self, model_type): 71 | tracker_config = self.get_tracker_configuration(model_type) 72 | 73 | tracker_handle = _k4abt.k4abt_tracker_t() 74 | _k4abt.VERIFY( 75 | _k4abt.k4abt_tracker_create(self.calibration.handle(), tracker_config, tracker_handle), 76 | "Body tracker initialization failed!", 77 | ) 78 | 79 | return tracker_handle 80 | 81 | def get_tracker_configuration(self, model_type): 82 | tracker_config = k4abt_tracker_default_configuration 83 | 84 | if model_type == _k4abt.K4ABT_LITE_MODEL: 85 | tracker_config.model_path = get_k4abt_lite_model_path() 86 | 87 | return tracker_config 88 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/record_sensors.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import cv2 4 | from PySide6.QtCore import Qt, QThread, QTimer 5 | from PySide6.QtGui import QImage 6 | from PySide6.QtMultimedia import ( 7 | QAudioFormat, 8 | QAudioSource, 9 | QMediaDevices, 10 | ) 11 | 12 | from ..signals import all_signals 13 | from ...pyk4a import Device 14 | from ...pyk4a.utils import colorize 15 | 16 | 17 | RESOLUTION = 4 18 | 19 | 20 | class RecordSensors(QThread): 21 | def __init__(self, device: Device) -> None: 22 | super().__init__() 23 | self.device = device 24 | self.audio_input = None 25 | self.input_devices = QMediaDevices.audioInputs() 26 | 27 | dict_fps = {0: "5", 1: "15", 2: "30"} 28 | self.device_fps = int(dict_fps[self.device.configuration.camera_fps]) 29 | 30 | self.timer = QTimer() 31 | self.timer.setInterval(1000 / self.device_fps) 32 | self.timer.timeout.connect(self.update_next_frame) 33 | 34 | def update_next_frame(self): 35 | current_frame = self.device.update() 36 | current_imu_data = self.device.update_imu() 37 | current_rgb_frame = current_frame.get_color_image() 38 | current_depth_frame = current_frame.get_colored_depth_image() 39 | current_ir_frame = current_frame.get_ir_image() 40 | 41 | if current_rgb_frame[0]: 42 | rgb_frame = cv2.cvtColor(current_rgb_frame[1], cv2.COLOR_BGR2RGB) 43 | h, w, ch = rgb_frame.shape 44 | rgb_frame = QImage(rgb_frame, w, h, ch * w, QImage.Format_RGB888) 45 | all_signals.record_signals.rgb_image.emit(rgb_frame) 46 | 47 | if current_depth_frame[0]: 48 | depth_frame = colorize(current_depth_frame[1], (None, 5000), cv2.COLORMAP_HSV) 49 | h, w, ch = depth_frame.shape 50 | depth_frame = QImage(depth_frame, w, h, w * ch, QImage.Format_RGB888) 51 | all_signals.record_signals.depth_image.emit(depth_frame) 52 | 53 | if current_ir_frame[0]: 54 | ir_frame = colorize(current_ir_frame[1], (None, 5000), cv2.COLORMAP_BONE) 55 | h, w, ch = ir_frame.shape 56 | ir_frame = QImage(ir_frame, w, h, w * ch, QImage.Format_RGB888) 57 | all_signals.record_signals.ir_image.emit(ir_frame) 58 | 59 | end_time = time.time() 60 | acc_data = current_imu_data.acc 61 | gyro_data = current_imu_data.gyro 62 | 63 | # audio 64 | data = self.io_device.readAll() 65 | available_samples = data.size() // RESOLUTION 66 | 67 | all_signals.record_signals.video_fps.emit(int(self.device_fps)) 68 | all_signals.record_signals.record_time.emit((end_time-self.start_time)) 69 | all_signals.record_signals.imu_acc_data.emit(acc_data) 70 | all_signals.record_signals.imu_gyro_data.emit(gyro_data) 71 | all_signals.record_signals.audio_data.emit([data, available_samples]) 72 | 73 | def start_audio(self): 74 | self.ready_audio() 75 | self.io_device = self.audio_input.start() 76 | self.start_time = time.time() 77 | 78 | def stop_audio(self): 79 | self.audio_input.stop() 80 | self.io_device = None 81 | 82 | def ready_audio(self) -> None: 83 | # https://github.com/ShadarRim/opencvpythonvideoplayer/blob/master/player.py 84 | format_audio = QAudioFormat() 85 | format_audio.setSampleRate(44200) 86 | format_audio.setChannelCount(3) 87 | format_audio.setSampleFormat(QAudioFormat.SampleFormat.UInt8) 88 | self.audio_input = QAudioSource(self.input_devices[0], format_audio) 89 | -------------------------------------------------------------------------------- /pykinect_recorder/main_window.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from PySide6.QtCore import Qt, QSize, Slot 4 | from PySide6.QtGui import QIcon, QScreen 5 | from PySide6.QtWidgets import ( 6 | QHBoxLayout, QMainWindow, QVBoxLayout, QWidget, 7 | QSizePolicy, QApplication 8 | ) 9 | 10 | from .renderer.components.topbar import Topbar 11 | from .renderer.components.sidebar_menu import SidebarMenus 12 | from .renderer.components.sidebar_control import StackedSidebar 13 | from .renderer.components.viewer_control import StackedViewer 14 | from .renderer.components.statusbar import StatusBar 15 | from .renderer.signals import all_signals 16 | from .pyk4a.utils import get_root 17 | 18 | 19 | class MainWindow(QMainWindow): 20 | def __init__(self, width, height) -> None: 21 | super().__init__() 22 | self.setWindowTitle("pykinect recorder") 23 | self.setWindowIcon(QIcon(os.path.join(get_root(), "renderer/public/kinect-sensor.ico"))) 24 | self.initial_window() 25 | 26 | def initial_window(self) -> None: 27 | self.setMinimumSize(QSize(1310, 740)) 28 | self.setMaximumSize(QSize(1580, 1080)) 29 | self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) 30 | self.setWindowFlags(Qt.FramelessWindowHint) 31 | self.is_maximize = False 32 | 33 | main_widget = QWidget() 34 | main_layout = QVBoxLayout(main_widget) 35 | main_layout.setSpacing(0) 36 | main_layout.setContentsMargins(0, 0, 0, 0) 37 | main_layout.setAlignment(Qt.AlignTop | Qt.AlignmentFlag.AlignLeft) 38 | 39 | self.topbar = Topbar() 40 | main_layout.addWidget(self.topbar) 41 | 42 | main_sub_layout = QHBoxLayout() 43 | # main_sub_layout.setSpacing(0) 44 | # main_sub_layout.setContentsMargins(0, 0, 0, 0) 45 | main_sub_layout.setAlignment(Qt.AlignLeft) 46 | 47 | self.sidebar_menus = SidebarMenus() 48 | self.stacked_sidebar = StackedSidebar() 49 | # self.stacked_sidebar.setStyleSheet("border: 1px solid blue;") 50 | 51 | content_layout = QHBoxLayout() 52 | self.stacked_viewer = StackedViewer() 53 | # self.stacked_viewer.setStyleSheet("border: 1px solid red;") 54 | content_layout.addWidget(self.stacked_viewer, Qt.AlignmentFlag.AlignLeft) 55 | content_layout.setAlignment(Qt.AlignTop | Qt.AlignLeft) 56 | 57 | main_sub_layout.addWidget(self.sidebar_menus) 58 | main_sub_layout.addWidget(self.stacked_sidebar) 59 | main_sub_layout.addLayout(content_layout) 60 | main_layout.addLayout(main_sub_layout) 61 | 62 | self.status_bar = StatusBar() 63 | main_layout.addWidget(self.status_bar) 64 | self.setCentralWidget(main_widget) 65 | 66 | self.topbar.mouseMoveEvent = self.moveWindow 67 | all_signals.window_control.connect(self.window_control) 68 | 69 | @Slot(str) 70 | def window_control(self, value): 71 | if value == "minimize": 72 | self.showMinimized() 73 | elif value == "maximize": 74 | if self.is_maximize is False: 75 | self.showFullScreen() 76 | self.is_maximize = True 77 | else: 78 | self.resize(QSize(1280, 740)) 79 | center = QScreen.availableGeometry(QApplication.primaryScreen()).center() 80 | geo = self.frameGeometry() 81 | geo.moveCenter(center) 82 | self.move(geo.topLeft()) 83 | self.is_maximize = False 84 | else: 85 | self.close() 86 | 87 | def mousePressEvent(self, event) -> None: 88 | self.dragPos = event.globalPos() 89 | 90 | def moveWindow(self, event) -> None: 91 | if event.buttons() == Qt.LeftButton: 92 | self.move(self.pos() + event.globalPos() - self.dragPos) 93 | self.dragPos = event.globalPos() 94 | event.accept() 95 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/sidebar_menu.py: -------------------------------------------------------------------------------- 1 | from PySide6.QtCore import QSize, Qt 2 | from PySide6.QtWidgets import QVBoxLayout, QFrame, QPushButton 3 | 4 | import qtawesome as qta 5 | from ..signals import all_signals 6 | 7 | 8 | class SidebarMenus(QFrame): 9 | def __init__(self) -> None: 10 | super().__init__() 11 | self.setFixedWidth(55) 12 | self.setMinimumHeight(670) 13 | self.setMaximumHeight(2160) 14 | self.setStyleSheet("background-color: #333333; border-radius: 0px") 15 | 16 | main_layout = QVBoxLayout() 17 | main_layout.setSpacing(0) 18 | main_layout.setContentsMargins(0, 0, 0, 0) 19 | main_layout.setAlignment(Qt.AlignLeft) 20 | menu_layout = QVBoxLayout() 21 | menu_layout.setSpacing(0) 22 | menu_layout.setContentsMargins(0, 0, 0, 0) 23 | menu_layout.setAlignment(Qt.AlignTop | Qt.AlignCenter) 24 | 25 | self.btn_recorder_menu = self.make_icons( 26 | qta.icon("fa.video-camera", color="#d7d7d7"), 27 | "Recording Viewer" 28 | ) 29 | self.btn_explorer_menu = self.make_icons( 30 | qta.icon("ph.monitor-play-fill", color="#777777"), 31 | "Explorer & Playback" 32 | ) 33 | self.btn_deeplearning_menu = self.make_icons( 34 | qta.icon("fa.crosshairs", color="#777777"), 35 | "Deep Learning Solution" 36 | ) 37 | 38 | menu_layout.addWidget(self.btn_recorder_menu) 39 | menu_layout.addWidget(self.btn_explorer_menu) 40 | menu_layout.addWidget(self.btn_deeplearning_menu) 41 | main_layout.addLayout(menu_layout) 42 | 43 | option_layout = QVBoxLayout() 44 | btn_option = self.make_icons(qta.icon("fa.gear"), "Pykinect Recorder Option") 45 | option_layout.addWidget(btn_option) 46 | main_layout.addLayout(option_layout) 47 | self.setLayout(main_layout) 48 | 49 | self.btn_recorder_menu.clicked.connect(self.clicked_recorder) 50 | self.btn_explorer_menu.clicked.connect(self.clicked_explorer) 51 | self.btn_deeplearning_menu.clicked.connect(self.clicked_solution) 52 | 53 | def make_icons(self, icon: qta, tooltip: str, scale: float = 0.8) -> QPushButton: 54 | w, h = int(45 * scale), int(45 * scale) 55 | _btn = QPushButton(icon, "") 56 | _btn.setFixedSize(55, 55) 57 | _btn.setIconSize(QSize(w, h)) 58 | _btn.setToolTip(f"{tooltip}") 59 | _btn.setStyleSheet( 60 | """ 61 | QPushButton { 62 | border: 0px solid #ffffff; 63 | } 64 | QPushButton:hover { 65 | background-color: #252526; 66 | } 67 | QToolTip { 68 | font:"Arial"; font-size: 15px; color: #ffffff; border: 1px solid #ffffff; 69 | } 70 | """ 71 | ) 72 | return _btn 73 | 74 | def clicked_recorder(self): 75 | self.btn_recorder_menu.setIcon(qta.icon("fa.video-camera", color="#d7d7d7")) 76 | self.btn_explorer_menu.setIcon(qta.icon("ph.monitor-play-fill", color="#777777")) 77 | self.btn_deeplearning_menu.setIcon(qta.icon("fa.crosshairs", color="#777777")) 78 | all_signals.option_signals.stacked_sidebar_status.emit("recorder") 79 | 80 | def clicked_explorer(self): 81 | self.btn_recorder_menu.setIcon(qta.icon("fa.video-camera", color="#777777")) 82 | self.btn_explorer_menu.setIcon(qta.icon("ph.monitor-play-fill", color="#d7d7d7")) 83 | self.btn_deeplearning_menu.setIcon(qta.icon("fa.crosshairs", color="#777777")) 84 | all_signals.option_signals.stacked_sidebar_status.emit("explorer") 85 | 86 | def clicked_solution(self): 87 | self.btn_recorder_menu.setIcon(qta.icon("fa.video-camera", color="#777777")) 88 | self.btn_explorer_menu.setIcon(qta.icon("ph.monitor-play-fill", color="#777777")) 89 | self.btn_deeplearning_menu.setIcon(qta.icon("fa.crosshairs", color="#d7d7d7")) 90 | all_signals.option_signals.stacked_sidebar_status.emit("solution") 91 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4a/configuration.py: -------------------------------------------------------------------------------- 1 | from . import _k4a 2 | 3 | 4 | class Configuration: 5 | def __init__(self, configuration_handle=None): 6 | if configuration_handle: 7 | self._handle = configuration_handle 8 | else: 9 | self.create() 10 | 11 | def handle(self): 12 | return self._handle 13 | 14 | def __setattr__(self, name, value): 15 | """Run on change function when configuration parameters are changed""" 16 | 17 | if hasattr(self, name): 18 | if name != "_handle": 19 | if int(self.__dict__[name]) != value: 20 | self.__dict__[name] = value 21 | self.on_value_change() 22 | else: 23 | self.__dict__[name] = value 24 | else: 25 | self.__dict__[name] = value 26 | 27 | def __str__(self): 28 | """Print the current settings and a short explanation""" 29 | message = ( 30 | "Device configuration: \n" 31 | f"\tcolor_format: {self.color_format} \n\t(0:JPG, 1:NV12, 2:YUY2, 3:BGRA32)\n\n" 32 | f"\tcolor_resolution: {self.color_resolution} \n\t(0:OFF, 1:720p, 2:1080p, 3:1440p, 4:1536p, 5:2160p, 6:3072p)\n\n" 33 | f"\tdepth_mode: {self.depth_mode} \n\t(0:OFF, 1:NFOV_2X2BINNED, 2:NFOV_UNBINNED,3:WFOV_2X2BINNED, 4:WFOV_UNBINNED, 5:Passive IR)\n\n" 34 | f"\tcamera_fps: {self.camera_fps} \n\t(0:5 FPS, 1:15 FPS, 2:30 FPS)\n\n" 35 | f"\tsynchronized_images_only: {self.synchronized_images_only} \n\t(True of False). Drop images if the color and depth are not synchronized\n\n" 36 | f"\tdepth_delay_off_color_usec: {self.depth_delay_off_color_usec} us. \n\tDelay between the color image and the depth image\n\n" 37 | f"\twired_sync_mode: {self.wired_sync_mode}\n\t(0:Standalone mode, 1:Master mode, 2:Subordinate mode)\n\n" 38 | f"\tsubordinate_delay_off_master_usec: {self.subordinate_delay_off_master_usec} us.\n\tThe external synchronization timing.\n\n" 39 | f"\tdisable_streaming_indicator: {self.disable_streaming_indicator} \n\t(True or False). Streaming indicator automatically turns on when the color or depth camera's are in use.\n\n" 40 | ) 41 | return message 42 | 43 | def create(self): 44 | self.color_format = _k4a.K4A_IMAGE_FORMAT_COLOR_MJPG 45 | self.color_resolution = _k4a.K4A_COLOR_RESOLUTION_720P 46 | self.depth_mode = _k4a.K4A_DEPTH_MODE_NFOV_UNBINNED 47 | self.camera_fps = _k4a.K4A_FRAMES_PER_SECOND_30 48 | self.synchronized_images_only = False 49 | self.depth_delay_off_color_usec = 0 50 | self.wired_sync_mode = _k4a.K4A_WIRED_SYNC_MODE_STANDALONE 51 | self.subordinate_delay_off_master_usec = 0 52 | self.disable_streaming_indicator = False 53 | self.on_value_change() 54 | 55 | def create_from_handle(self, configuration_handle): 56 | self.color_format = configuration_handle.color_format 57 | self.color_resolution = configuration_handle.color_resolution 58 | self.depth_mode = configuration_handle.depth_mode 59 | self.camera_fps = configuration_handle.camera_fps 60 | self.synchronized_images_only = configuration_handle.synchronized_images_only 61 | self.depth_delay_off_color_usec = configuration_handle.depth_delay_off_color_usec 62 | self.wired_sync_mode = configuration_handle.wired_sync_mode 63 | self.subordinate_delay_off_master_usec = configuration_handle.subordinate_delay_off_master_usec 64 | self.disable_streaming_indicator = configuration_handle.disable_streaming_indicator 65 | 66 | self._handle = configuration_handle 67 | 68 | def on_value_change(self): 69 | self._handle = _k4a.k4a_device_configuration_t( 70 | self.color_format, 71 | self.color_resolution, 72 | self.depth_mode, 73 | self.camera_fps, 74 | self.synchronized_images_only, 75 | self.depth_delay_off_color_usec, 76 | self.wired_sync_mode, 77 | self.subordinate_delay_off_master_usec, 78 | self.disable_streaming_indicator, 79 | ) 80 | 81 | 82 | default_configuration = Configuration() 83 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4a/capture.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | from . import _k4a 4 | from .image import Image 5 | from .transformation import Transformation 6 | from ..utils import smooth_depth_image 7 | 8 | 9 | class Capture: 10 | def __init__(self, capture_handle, calibration): 11 | self._handle = capture_handle 12 | self.calibration = calibration 13 | self.camera_transform = Transformation(calibration) 14 | 15 | def __del__(self): 16 | self.reset() 17 | 18 | def is_valid(self): 19 | return self._handle 20 | 21 | def handle(self): 22 | return self._handle 23 | 24 | def reset(self): 25 | if self.is_valid(): 26 | self.release_handle() 27 | self._handle = None 28 | 29 | def release_handle(self): 30 | if self.is_valid(): 31 | _k4a.k4a_capture_release(self._handle) 32 | 33 | @staticmethod 34 | def create(): 35 | handle = _k4a.k4a_capture_t 36 | _k4a.VERIFY(Capture._k4a.k4a_capture_create(handle), "Create capture failed!") 37 | 38 | return Capture(handle) 39 | 40 | def get_color_image_object(self): 41 | return Image(_k4a.k4a_capture_get_color_image(self._handle)) 42 | 43 | def get_depth_image_object(self): 44 | return Image(_k4a.k4a_capture_get_depth_image(self._handle)) 45 | 46 | def get_ir_image_object(self): 47 | return Image(_k4a.k4a_capture_get_ir_image(self._handle)) 48 | 49 | def get_transformed_depth_object(self): 50 | return self.camera_transform.depth_image_to_color_camera(self.get_depth_image_object()) 51 | 52 | def get_transformed_color_object(self): 53 | return self.camera_transform.color_image_to_depth_camera( 54 | self.get_depth_image_object(), self.get_color_image_object() 55 | ) 56 | 57 | def get_pointcloud_object(self, calibration_type=_k4a.K4A_CALIBRATION_TYPE_DEPTH): 58 | return self.camera_transform.depth_image_to_point_cloud(self.get_depth_image_object(), calibration_type) 59 | 60 | def get_transformed_pointcloud_object(self): 61 | return self.camera_transform.depth_image_to_point_cloud( 62 | self.get_transformed_depth_object(), _k4a.K4A_CALIBRATION_TYPE_COLOR 63 | ) 64 | 65 | def get_color_image(self): 66 | return self.get_color_image_object().to_numpy() 67 | 68 | def get_depth_image(self): 69 | return self.get_depth_image_object().to_numpy() 70 | 71 | def get_colored_depth_image(self): 72 | ret, depth_image = self.get_depth_image() 73 | if not ret: 74 | return ret, None 75 | 76 | return ret, self.color_depth_image(depth_image) 77 | 78 | def get_ir_image(self): 79 | return self.get_ir_image_object().to_numpy() 80 | 81 | def get_transformed_depth_image(self): 82 | return self.get_transformed_depth_object().to_numpy() 83 | 84 | def get_transformed_colored_depth_image(self): 85 | ret, transformed_depth_image = self.get_transformed_depth_image() 86 | 87 | return ret, self.color_depth_image(transformed_depth_image) 88 | 89 | def get_transformed_color_image(self): 90 | return self.get_transformed_color_object().to_numpy() 91 | 92 | def get_smooth_depth_image(self, maximum_hole_size=10): 93 | ret, depth_image = self.get_depth_image() 94 | return ret, smooth_depth_image(depth_image, maximum_hole_size) 95 | 96 | def get_smooth_colored_depth_image(self, maximum_hole_size=10): 97 | ret, smooth_depth_image = self.get_smooth_depth_image(maximum_hole_size) 98 | return ret, self.color_depth_image(smooth_depth_image) 99 | 100 | def get_pointcloud(self, calibration_type=_k4a.K4A_CALIBRATION_TYPE_DEPTH): 101 | ret, points = self.get_pointcloud_object(calibration_type).to_numpy() 102 | points = points.reshape((-1, 3)) 103 | return ret, points 104 | 105 | def get_transformed_pointcloud(self): 106 | ret, points = self.get_transformed_pointcloud_object().to_numpy() 107 | points = points.reshape((-1, 3)) 108 | return ret, points 109 | 110 | @staticmethod 111 | def color_depth_image(depth_image): 112 | depth_color_image = cv2.convertScaleAbs( 113 | depth_image, alpha=0.05 114 | ) # alpha is fitted by visual comparison with Azure k4aviewer results 115 | depth_color_image = cv2.applyColorMap(depth_color_image, cv2.COLORMAP_JET) 116 | 117 | return depth_color_image 118 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4a/transformation.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | 3 | from . import _k4a 4 | from .image import Image 5 | 6 | 7 | class Transformation: 8 | def __init__(self, calibration): 9 | self.calibration = calibration 10 | self._handle = _k4a.k4a_transformation_create(calibration.handle()) 11 | self.color_resolution = Resolution( 12 | calibration.handle().color_camera_calibration.resolution_width, 13 | calibration.handle().color_camera_calibration.resolution_height, 14 | ) 15 | self.depth_resolution = Resolution( 16 | calibration.handle().depth_camera_calibration.resolution_width, 17 | calibration.handle().depth_camera_calibration.resolution_height, 18 | ) 19 | 20 | def __del__(self): 21 | self.destroy() 22 | 23 | def is_valid(self): 24 | return self._handle 25 | 26 | def handle(self): 27 | return self._handle 28 | 29 | def destroy(self): 30 | if self.is_valid(): 31 | _k4a.k4a_transformation_destroy(self._handle) 32 | self._handle = None 33 | 34 | def depth_image_to_color_camera(self, depth_image): 35 | if not depth_image.is_valid(): 36 | return Image() 37 | 38 | transformed_depth_image = Image.create( 39 | depth_image.format, 40 | self.color_resolution.width, 41 | self.color_resolution.height, 42 | self.color_resolution.width * 2, 43 | ) 44 | 45 | _k4a.k4a_transformation_depth_image_to_color_camera( 46 | self._handle, depth_image.handle(), transformed_depth_image.handle() 47 | ) 48 | 49 | return transformed_depth_image 50 | 51 | def depth_image_to_color_camera_custom( 52 | self, 53 | depth_image, 54 | custom_image, 55 | interpolation=_k4a.K4A_TRANSFORMATION_INTERPOLATION_TYPE_LINEAR, 56 | ): 57 | if not depth_image.is_valid() or not custom_image.is_valid(): 58 | return Image() 59 | 60 | transformed_custom_image = Image.create( 61 | custom_image.format, 62 | self.color_resolution.width, 63 | self.color_resolution.height, 64 | self.color_resolution.width * self.get_custom_bytes_per_pixel(custom_image), 65 | ) 66 | 67 | transformed_depth_image = Image.create( 68 | _k4a.K4A_IMAGE_FORMAT_DEPTH16, 69 | self.color_resolution.width, 70 | self.color_resolution.height, 71 | self.color_resolution.width * 2, 72 | ) 73 | 74 | invalid_custom_value = ctypes.c_uint32() 75 | 76 | _k4a.k4a_transformation_depth_image_to_color_camera_custom( 77 | self._handle, 78 | depth_image.handle(), 79 | custom_image.handle(), 80 | transformed_depth_image.handle(), 81 | transformed_custom_image.handle(), 82 | interpolation, 83 | invalid_custom_value, 84 | ) 85 | 86 | return transformed_custom_image 87 | 88 | def color_image_to_depth_camera(self, depth_image, color_image): 89 | if not depth_image.is_valid() or not color_image.is_valid(): 90 | return Image() 91 | 92 | transformed_color_image = Image.create( 93 | _k4a.K4A_IMAGE_FORMAT_COLOR_BGRA32, 94 | self.depth_resolution.width, 95 | self.depth_resolution.height, 96 | self.depth_resolution.width * 4, 97 | ) 98 | 99 | _k4a.k4a_transformation_color_image_to_depth_camera( 100 | self._handle, 101 | depth_image.handle(), 102 | color_image.handle(), 103 | transformed_color_image.handle(), 104 | ) 105 | 106 | return transformed_color_image 107 | 108 | def depth_image_to_point_cloud(self, depth_image, calibration_type=_k4a.K4A_CALIBRATION_TYPE_DEPTH): 109 | if not depth_image.is_valid(): 110 | return Image() 111 | 112 | xyz_image = Image.create( 113 | _k4a.K4A_IMAGE_FORMAT_CUSTOM, 114 | depth_image.get_width_pixels(), 115 | depth_image.get_height_pixels(), 116 | depth_image.get_width_pixels() * 3 * 2, 117 | ) 118 | 119 | _k4a.k4a_transformation_depth_image_to_point_cloud( 120 | self._handle, depth_image.handle(), calibration_type, xyz_image.handle() 121 | ) 122 | 123 | return xyz_image 124 | 125 | def get_custom_bytes_per_pixel(self, custom_image): 126 | custom_image_format = custom_image.format 127 | 128 | if custom_image_format == _k4a.K4A_IMAGE_FORMAT_CUSTOM8: 129 | return 1 130 | else: 131 | return 2 132 | 133 | 134 | class Resolution: 135 | def __init__(self, width, height): 136 | self.width = width 137 | self.height = height 138 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4abt/frame.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | from pykinect_azure.k4abt import _k4abt 5 | from pykinect_azure.k4abt.body import Body 6 | from pykinect_azure.k4abt.body2d import Body2d 7 | from pykinect_azure.k4abt._k4abtTypes import k4abt_body_t, body_colors 8 | from pykinect_azure.k4a import Image, Capture, Transformation 9 | from pykinect_azure.k4a._k4atypes import K4A_CALIBRATION_TYPE_DEPTH 10 | 11 | 12 | class Frame: 13 | def __init__(self, frame_handle, calibration): 14 | if frame_handle: 15 | self._handle = frame_handle 16 | self.calibration = calibration 17 | self.transformation = Transformation(self.calibration) 18 | _k4abt.k4abt_frame_reference(self._handle) 19 | 20 | def __del__(self): 21 | self.reset() 22 | 23 | def json(self): 24 | bodies = self.get_bodies() 25 | 26 | if not bodies: 27 | return "" 28 | 29 | return [body.json() for body in bodies] 30 | 31 | def is_valid(self): 32 | return self._handle 33 | 34 | def handle(self): 35 | return self._handle 36 | 37 | def reset(self): 38 | if self.is_valid(): 39 | self.release() 40 | self._handle = None 41 | 42 | def release(self): 43 | if self.is_valid(): 44 | _k4abt.k4abt_frame_release(self._handle) 45 | 46 | def get_num_bodies(self): 47 | return _k4abt.k4abt_frame_get_num_bodies(self._handle) 48 | 49 | def get_body_skeleton(self, index=0): 50 | skeleton = _k4abt.k4abt_skeleton_t() 51 | 52 | _k4abt.VERIFY( 53 | _k4abt.k4abt_frame_get_body_skeleton(self._handle, index, skeleton), 54 | "Body tracker get body skeleton failed!", 55 | ) 56 | 57 | return skeleton 58 | 59 | def get_body_id(self, index=0): 60 | return _k4abt.k4abt_frame_get_body_id(self._handle, index) 61 | 62 | def get_bodies(self): 63 | bodies = [] 64 | 65 | # Get the number of people in the frame 66 | num_bodies = self.get_num_bodies() 67 | 68 | # Extract the skeleton of each person 69 | if num_bodies: 70 | for bodyIdx in range(num_bodies): 71 | bodies.append(self.get_body(bodyIdx)) 72 | 73 | return bodies 74 | 75 | def get_body(self, bodyIdx=0): 76 | body_handle = k4abt_body_t() 77 | body_handle.id = self.get_body_id(bodyIdx) 78 | body_handle.skeleton = self.get_body_skeleton(bodyIdx) 79 | 80 | return Body(body_handle) 81 | 82 | def get_body2d(self, bodyIdx=0, dest_camera=K4A_CALIBRATION_TYPE_DEPTH): 83 | body_handle = self.get_body(bodyIdx).handle() 84 | 85 | return Body2d.create(body_handle, self.calibration, bodyIdx, dest_camera) 86 | 87 | def draw_bodies( 88 | self, 89 | destination_image, 90 | dest_camera=K4A_CALIBRATION_TYPE_DEPTH, 91 | only_segments=False, 92 | ): 93 | num_bodies = self.get_num_bodies() 94 | 95 | for body_id in range(num_bodies): 96 | destination_image = self.draw_body2d(destination_image, body_id, dest_camera, only_segments) 97 | 98 | return destination_image 99 | 100 | def draw_body2d( 101 | self, 102 | destination_image, 103 | bodyIdx=0, 104 | dest_camera=K4A_CALIBRATION_TYPE_DEPTH, 105 | only_segments=False, 106 | ): 107 | return self.get_body2d(bodyIdx, dest_camera).draw(destination_image, only_segments) 108 | 109 | def get_device_timestamp_usec(self): 110 | return _k4abt.k4abt_frame_get_device_timestamp_usec(self._handle) 111 | 112 | def get_body_index_map(self): 113 | return Image(_k4abt.k4abt_frame_get_body_index_map(self._handle)) 114 | 115 | def get_body_index_map_image(self): 116 | return self.get_body_index_map().to_numpy() 117 | 118 | def get_transformed_body_index_map(self): 119 | depth_image = self.get_capture().get_depth_image_object() 120 | return self.transformation.depth_image_to_color_camera_custom(depth_image, self.get_body_index_map()) 121 | 122 | def get_transformed_body_index_map_image(self): 123 | transformed_body_index_map = self.get_transformed_body_index_map() 124 | return transformed_body_index_map.to_numpy() 125 | 126 | def get_segmentation_image(self): 127 | ret, body_index_map = self.get_body_index_map_image() 128 | return ret, np.dstack([cv2.LUT(body_index_map, body_colors[:, i]) for i in range(3)]) 129 | 130 | def get_transformed_segmentation_image(self): 131 | ret, transformed_body_index_map = self.get_transformed_body_index_map_image() 132 | return ret, np.dstack([cv2.LUT(transformed_body_index_map, body_colors[:, i]) for i in range(3)]) 133 | 134 | def get_capture(self): 135 | return Capture(_k4abt.k4abt_frame_get_capture(self._handle), self.calibration._handle) 136 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/topbar.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from PySide6.QtGui import QAction, QIcon 4 | from PySide6.QtCore import Qt, QSize 5 | from PySide6.QtWidgets import ( 6 | QHBoxLayout, QFrame, QPushButton, 7 | QMenuBar, QMenu, QApplication, QToolBar 8 | ) 9 | import qtawesome as qta 10 | 11 | from ..signals import all_signals 12 | from ..common_widgets import Label 13 | from ...pyk4a.utils import get_root 14 | 15 | 16 | class Topbar(QFrame): 17 | def __init__(self) -> None: 18 | super().__init__() 19 | self.setFixedHeight(40) 20 | self.setMaximumWidth(4000) 21 | self.setContentsMargins(0, 0, 0, 0) 22 | self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint) 23 | 24 | self.setObjectName("Topbar") 25 | self.setStyleSheet(""" 26 | QFrame#Topbar { 27 | background-color: #323233; 28 | padding: 0px; margin: 0px; 29 | border-radius: 0px; 30 | } 31 | """) 32 | 33 | main_layout = QHBoxLayout() 34 | main_layout.setSpacing(0) 35 | main_layout.setContentsMargins(0, 0, 0, 0) 36 | main_layout.setAlignment(Qt.AlignLeft) 37 | 38 | self.label_icon = Label() 39 | self.label_icon.setStyleSheet("margin-left: 6px;") 40 | self.label_icon.setFixedSize(40, 40) 41 | self.icon = QIcon(os.path.join(get_root(), "renderer/public/kinect-sensor.ico")) 42 | self.pixmap = self.icon.pixmap(25, 25) 43 | self.label_icon.setPixmap(self.pixmap) 44 | main_layout.addWidget(self.label_icon) 45 | 46 | self.menubar = QMenuBar() 47 | self.menubar.setFixedWidth(100) 48 | self.menubar.setContentsMargins(0, 0, 0, 0) 49 | self.menubar.setStyleSheet(""" 50 | QMenuBar { 51 | font-size: 15px; 52 | background-color: #323233; 53 | border: none; 54 | } 55 | QMenuBar:item:selected { 56 | border-color: white; 57 | border-radius: 0px; 58 | } 59 | """) 60 | 61 | self.file_menu = QMenu("File") 62 | self.help_menu = QMenu("Help") 63 | self.file_menu.setContentsMargins(0, 0, 0, 0) 64 | self.help_menu.setContentsMargins(0, 0, 0, 0) 65 | 66 | self.exit_action = QAction("Exit") 67 | self.info_action = QAction("Info") 68 | 69 | self.file_menu.addAction(self.exit_action) 70 | self.help_menu.addAction(self.info_action) 71 | self.menubar.addMenu(self.file_menu) 72 | self.menubar.addMenu(self.help_menu) 73 | main_layout.addWidget(self.menubar) 74 | 75 | self.title_layout = QHBoxLayout() 76 | self.title_layout.setAlignment(Qt.AlignCenter) 77 | self.label_title = Label("pykinect-recorder", font="Arial", fontsize=12, orientation=Qt.AlignCenter) 78 | self.title_layout.addWidget(self.label_title) 79 | main_layout.addLayout(self.title_layout) 80 | 81 | self.right_btn_layout = QHBoxLayout() 82 | self.right_btn_layout.setSpacing(0) 83 | self.right_btn_layout.setContentsMargins(0, 0, 0, 0) 84 | self.btn_minimize = self.make_icons(qta.icon("msc.chrome-minimize"), "minimize") 85 | self.btn_maximize = self.make_icons(qta.icon("msc.chrome-maximize"), "maximize") 86 | self.btn_close = self.make_icons(qta.icon("msc.chrome-close"), "close") 87 | self.right_btn_layout.addWidget(self.btn_minimize) 88 | self.right_btn_layout.addWidget(self.btn_maximize) 89 | self.right_btn_layout.addWidget(self.btn_close) 90 | main_layout.addLayout(self.right_btn_layout) 91 | self.setLayout(main_layout) 92 | 93 | self.exit_action.triggered.connect(self.quit_window) 94 | self.info_action.triggered.connect(self.get_window_info) 95 | self.btn_minimize.clicked.connect(self.right_btn_clicked) 96 | self.btn_maximize.clicked.connect(self.right_btn_clicked) 97 | self.btn_close.clicked.connect(self.right_btn_clicked) 98 | 99 | def make_icons(self, icon: qta, tooltip: str, scale: float = 0.8) -> QPushButton: 100 | w, h = int(35 * scale), int(35 * scale) 101 | btn = QPushButton(icon, "") 102 | btn.setObjectName(tooltip) 103 | btn.setFixedSize(38, 38) 104 | btn.setIconSize(QSize(w, h)) 105 | btn.setToolTip(f"{tooltip}") 106 | btn.setStyleSheet(""" 107 | QPushButton { 108 | border: none; border-radius: 0px; 109 | } 110 | QToolTip { 111 | font:"Arial"; font-size: 15px; color: #ffffff; border: 1px solid #ffffff; 112 | } 113 | """) 114 | return btn 115 | 116 | def right_btn_clicked(self): 117 | all_signals.window_control.emit(self.sender().objectName()) 118 | 119 | def quit_window(self): 120 | QApplication.instance().quit() 121 | 122 | def get_window_info(self): 123 | print("pass") -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4a/image.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | from . import _k4a 5 | 6 | 7 | class Image: 8 | _handle = None 9 | buffer_pointer = None 10 | 11 | def __init__(self, image_handle=None): 12 | self._handle = image_handle 13 | # Get the pointer to the buffer containing the image data 14 | self.buffer_pointer = self.get_buffer() if self.is_valid() else None 15 | 16 | def __del__(self): 17 | self.reset() 18 | 19 | def is_valid(self): 20 | return self._handle or self.buffer_pointer is not None 21 | 22 | def handle(self): 23 | return self._handle 24 | 25 | def reset(self): 26 | if self.is_valid(): 27 | _k4a.k4a_image_release(self._handle) 28 | self._handle = None 29 | 30 | @staticmethod 31 | def create(image_format, width_pixels, height_pixels, stride_bytes): 32 | handle = _k4a.k4a_image_t() 33 | _k4a.VERIFY( 34 | _k4a.k4a_image_create(image_format, width_pixels, height_pixels, stride_bytes, handle), 35 | "Create image failed!", 36 | ) 37 | 38 | return Image(handle) 39 | 40 | @property 41 | def width(self): 42 | return self.get_width_pixels() 43 | 44 | @property 45 | def height(self): 46 | return self.get_height_pixels() 47 | 48 | @property 49 | def stride(self): 50 | return self.get_stride_bytes() 51 | 52 | @property 53 | def format(self): 54 | return self.get_format() 55 | 56 | @property 57 | def size(self): 58 | return self.get_size() 59 | 60 | def get_buffer(self): 61 | if not self._handle: 62 | return None 63 | 64 | return _k4a.k4a_image_get_buffer(self._handle) 65 | 66 | def get_size(self): 67 | if not self.is_valid(): 68 | return None 69 | 70 | return int(_k4a.k4a_image_get_size(self._handle)) 71 | 72 | def get_format(self): 73 | if not self.is_valid(): 74 | return None 75 | 76 | return int(_k4a.k4a_image_get_format(self._handle)) 77 | 78 | def get_width_pixels(self): 79 | if not self.is_valid(): 80 | return None 81 | 82 | return int(_k4a.k4a_image_get_width_pixels(self._handle)) 83 | 84 | def get_height_pixels(self): 85 | if not self.is_valid(): 86 | return None 87 | 88 | return int(_k4a.k4a_image_get_height_pixels(self._handle)) 89 | 90 | def get_stride_bytes(self): 91 | return int(_k4a.k4a_image_get_stride_bytes(self._handle)) 92 | 93 | def to_numpy(self): 94 | if not self.is_valid(): 95 | return False, None 96 | 97 | # Get the size of the buffer 98 | image_size = self.get_size() 99 | image_width = self.get_width_pixels() 100 | image_height = self.get_height_pixels() 101 | 102 | # Get the image format 103 | image_format = self.get_format() 104 | 105 | # Read the data in the buffer 106 | buffer_array = np.ctypeslib.as_array(self.buffer_pointer, shape=(image_size,)) 107 | 108 | # Parse buffer based on image formats 109 | if image_format == _k4a.K4A_IMAGE_FORMAT_COLOR_MJPG: 110 | return True, cv2.imdecode(np.frombuffer(buffer_array, dtype=np.uint8).copy(), -1) 111 | elif image_format == _k4a.K4A_IMAGE_FORMAT_COLOR_NV12: 112 | yuv_image = np.frombuffer(buffer_array, dtype=np.uint8).copy().reshape(int(image_height * 1.5), image_width) 113 | return True, cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR_NV12) 114 | elif image_format == _k4a.K4A_IMAGE_FORMAT_COLOR_YUY2: 115 | yuv_image = np.frombuffer(buffer_array, dtype=np.uint8).copy().reshape(image_height, image_width, 2) 116 | return True, cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR_YUY2) 117 | elif image_format == _k4a.K4A_IMAGE_FORMAT_COLOR_BGRA32: 118 | return True, np.frombuffer(buffer_array, dtype=np.uint8).copy().reshape(image_height, image_width, 4) 119 | elif image_format == _k4a.K4A_IMAGE_FORMAT_DEPTH16: 120 | return True, np.frombuffer(buffer_array, dtype=" NDArray: 17 | """Colorize image with OpenCV colormap. 18 | Args: 19 | image (NDArray[H,W]): Image to colorize. 20 | clipping_range (Optional[tuple[int, int]], optional): Clipping range for image. Defaults to (None, None). 21 | colormap (int, optional): OpenCV colormap. Defaults to cv2.COLORMAP_HSV. 22 | Returns: 23 | NDArray: Colorized image. 24 | """ 25 | if clipping_range[0] or clipping_range[1]: 26 | image = image.clip(clipping_range[0], clipping_range[1]) # type: ignore 27 | else: 28 | image = image.copy() 29 | image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U) 30 | image = cv2.applyColorMap(image, colormap) 31 | return image 32 | 33 | def get_root() -> Path: 34 | """Get root path for load assets. 35 | Args: 36 | None. 37 | Returns: 38 | Path: Root Directory. 39 | """ 40 | return Path(__file__).parent.parent 41 | 42 | class Open3dVisualizer: 43 | def __init__(self): 44 | self.point_cloud = o3d.geometry.PointCloud() 45 | self.o3d_started = False 46 | 47 | self.vis = o3d.visualization.Visualizer() 48 | self.vis.create_window() 49 | 50 | def __call__(self, points_3d, rgb_image=None): 51 | self.update(points_3d, rgb_image) 52 | 53 | def update(self, points_3d, rgb_image=None): 54 | # Add values to vectors 55 | self.point_cloud.points = o3d.utility.Vector3dVector(points_3d) 56 | if rgb_image is not None: 57 | colors = cv2.cvtColor(rgb_image, cv2.COLOR_BGRA2RGB).reshape(-1, 3) / 255 58 | self.point_cloud.colors = o3d.utility.Vector3dVector(colors) 59 | 60 | self.point_cloud.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) 61 | 62 | # Add geometries if it is the first time 63 | if not self.o3d_started: 64 | self.vis.add_geometry(self.point_cloud) 65 | self.o3d_started = True 66 | 67 | else: 68 | self.vis.update_geometry(self.point_cloud) 69 | 70 | self.vis.poll_events() 71 | self.vis.update_renderer() 72 | 73 | 74 | def smooth_depth_image(depth_image, max_hole_size=10): 75 | """Smoothes depth image by filling the holes using inpainting method 76 | 77 | Parameters: 78 | depth_image(Image): Original depth image 79 | max_hole_size(int): Maximum size of hole to fill 80 | 81 | Returns: 82 | Image: Smoothed depth image 83 | 84 | Remarks: 85 | Bigger maximum hole size will try to fill bigger holes but requires longer time 86 | """ 87 | mask = np.zeros(depth_image.shape, dtype=np.uint8) 88 | mask[depth_image == 0] = 1 89 | 90 | # Do not include in the mask the holes bigger than the maximum hole size 91 | kernel = np.ones((max_hole_size, max_hole_size), np.uint8) 92 | erosion = cv2.erode(mask, kernel, iterations=1) 93 | mask = mask - erosion 94 | 95 | smoothed_depth_image = cv2.inpaint(depth_image.astype(np.uint16), mask, max_hole_size, cv2.INPAINT_NS) 96 | 97 | return smoothed_depth_image 98 | 99 | 100 | def get_k4a_module_path(): 101 | # Check if running in Jetson Nano or similar ARM chips 102 | if platform.machine().lower() == "aarch64": 103 | return r"/usr/lib/aarch64-linux-gnu/libk4a.so" 104 | 105 | # For non-Arm chips, first check if it is running linux 106 | if platform.system().lower() == "linux": 107 | return r"/usr/lib/x86_64-linux-gnu/libk4a.so" 108 | 109 | # In Windows check the architecture 110 | if platform.machine().lower() == "amd64": 111 | return "C:\\Program Files\\Azure Kinect SDK v1.4.1\\sdk\\windows-desktop\\amd64\\release\\bin\\k4a.dll" 112 | 113 | # Otherwise return the x86 Windows version 114 | return "C:\\Program Files\\Azure Kinect SDK v1.4.1\\sdk\\windows-desktop\\x86\\release\\bin\\k4a.dll" 115 | 116 | 117 | def get_k4abt_module_path(): 118 | # Check if running in Jetson Nano or similar ARM chips 119 | if platform.machine().lower() == "aarch64": 120 | print( 121 | "Kinect Body Tracking is not implemented yet in ARM. Check https://feedback.azure.com/forums/920053 for more info." 122 | ) 123 | sys.exit(1) 124 | 125 | # For non-Arm chips, first check if it is running linux 126 | if platform.system().lower() == "linux": 127 | return "libk4abt.so" 128 | 129 | # Otherwise return the Windows version 130 | return "C:\\Program Files\\Azure Kinect Body Tracking SDK\\sdk\\windows-desktop\\amd64\\release\\bin\\k4abt.dll" 131 | 132 | 133 | def get_k4arecord_module_path(modulePath): 134 | return modulePath.replace("k4a", "k4arecord") 135 | 136 | 137 | def get_k4abt_lite_model_path(): 138 | # Check if it is a Linux system 139 | if platform.system().lower() == "linux": 140 | return None 141 | 142 | # Return the Windows version 143 | return "C:/Program Files/Azure Kinect Body Tracking SDK/sdk/windows-desktop/amd64/release/bin/dnn_model_2_0_lite_op11.onnx".encode( 144 | "utf-8" 145 | ) 146 | 147 | 148 | def get_dict(struct): 149 | result = {} 150 | for field, _ in struct._fields_: 151 | value = getattr(struct, field) 152 | # if the type is not a primitive and it evaluates to False ... 153 | if (type(value) not in [int, float, bool]) and not bool(value): 154 | # it's a null pointer 155 | value = None 156 | elif hasattr(value, "_length_") and hasattr(value, "_type_"): 157 | # Probably an array 158 | value = np.array(list(value)) 159 | elif hasattr(value, "_fields_"): 160 | # Probably another struct 161 | value = get_dict(value) 162 | result[field] = value 163 | return result 164 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4arecord/playback.py: -------------------------------------------------------------------------------- 1 | from . import _k4arecord 2 | from .datablock import Datablock 3 | from .record_configuration import RecordConfiguration 4 | from ..k4a import _k4a 5 | from ..k4a.capture import Capture 6 | from ..k4a.calibration import Calibration 7 | from ..k4a.imu_sample import ImuSample 8 | from .record import Record 9 | from ..k4a.configuration import Configuration 10 | 11 | 12 | class Playback: 13 | def __init__(self, filepath): 14 | self._handle = _k4arecord.k4a_playback_t() 15 | self._capture = None 16 | self._datablock = None 17 | self.clipping = None 18 | 19 | self.open(filepath) 20 | self.calibration = self.get_calibration() 21 | 22 | def __del__(self): 23 | self.close() 24 | 25 | def open(self, filepath): 26 | _k4arecord.VERIFY( 27 | _k4arecord.k4a_playback_open(filepath.encode("utf-8"), self._handle), 28 | "Failed to open recording!", 29 | ) 30 | 31 | def update(self): 32 | return self.get_next_capture() if self.clipping is None else self.get_next_capture_with_record() 33 | 34 | def is_valid(self): 35 | return self._handle != None 36 | 37 | def is_capture_initialized(self): 38 | return self._capture 39 | 40 | def is_datablock_initialized(self): 41 | return self._datablock 42 | 43 | def close(self): 44 | if self.is_valid(): 45 | _k4arecord.k4a_playback_close(self._handle) 46 | self._handle = None 47 | 48 | def get_calibration(self): 49 | calibration_handle = _k4arecord.k4a_calibration_t() 50 | if self.is_valid(): 51 | _k4arecord.VERIFY( 52 | _k4arecord.k4a_playback_get_calibration(self._handle, calibration_handle), 53 | "Failed to read device calibration from recording!", 54 | ) 55 | 56 | return Calibration(calibration_handle) 57 | 58 | def get_record_configuration(self): 59 | config = _k4arecord.k4a_record_configuration_t() 60 | 61 | if self.is_valid(): 62 | _k4arecord.VERIFY( 63 | _k4arecord.k4a_playback_get_record_configuration(self._handle, config), 64 | "Failed to read record configuration!", 65 | ) 66 | 67 | return RecordConfiguration(config) 68 | 69 | def get_next_capture(self): 70 | capture_handle = _k4a.k4a_capture_t() 71 | 72 | if self.is_capture_initialized(): 73 | self._capture.release_handle() 74 | self._capture._handle = capture_handle 75 | else: 76 | self._capture = Capture(capture_handle, self.calibration) 77 | 78 | ret = _k4arecord.k4a_playback_get_next_capture(self._handle, capture_handle) != _k4arecord.K4A_STREAM_RESULT_EOF 79 | 80 | return ret, self._capture 81 | 82 | def get_next_capture_with_record(self): 83 | if self.is_capture_initialized(): 84 | self._capture.release_handle() 85 | 86 | capture_handle = _k4a.k4a_capture_t() 87 | ret = _k4arecord.k4a_playback_get_next_capture(self._handle, capture_handle) != _k4arecord.K4A_STREAM_RESULT_EOF 88 | 89 | if self.is_capture_initialized(): 90 | self._capture._handle = capture_handle 91 | else: 92 | self._capture = Capture(capture_handle, self.calibration) 93 | 94 | return ret, self._capture 95 | 96 | def get_previous_capture(self): 97 | capture_handle = _k4a.k4a_capture_t() 98 | 99 | if self.is_capture_initialized(): 100 | self._capture.release_handle() 101 | self._capture._handle = capture_handle 102 | else: 103 | self._capture = Capture(capture_handle, self.calibration) 104 | 105 | ret = ( 106 | _k4arecord.k4a_playback_get_previous_capture(self._handle, capture_handle) 107 | != _k4arecord.K4A_STREAM_RESULT_EOF 108 | ) 109 | 110 | return ret, self._capture 111 | 112 | def get_next_imu_sample(self): 113 | imu_sample_struct = _k4a.k4a_imu_sample_t() 114 | _k4a.VERIFY( 115 | _k4arecord.k4a_playback_get_next_imu_sample(self._handle, imu_sample_struct), 116 | "Get next imu sample failed!", 117 | ) 118 | 119 | # Convert the structure into a dictionary 120 | _imu_sample = ImuSample(imu_sample_struct) 121 | 122 | return _imu_sample 123 | 124 | def get_previous_imu_sample(self): 125 | imu_sample_struct = _k4a.k4a_imu_sample_t() 126 | _k4a.VERIFY( 127 | _k4arecord.k4a_playback_get_previous_imu_sample(self._handle, imu_sample_struct), 128 | "Get previous imu sample failed!", 129 | ) 130 | 131 | # Convert the structure into a dictionary 132 | _imu_sample = ImuSample(imu_sample_struct) 133 | 134 | return _imu_sample 135 | 136 | def seek_timestamp(self, offset=0, origin=_k4arecord.K4A_PLAYBACK_SEEK_BEGIN): 137 | _k4a.VERIFY( 138 | _k4arecord.k4a_playback_seek_timestamp(self._handle, offset, origin), 139 | "Seek recording failed!", 140 | ) 141 | 142 | def get_recording_length(self): 143 | return int(_k4arecord.k4a_playback_get_recording_length_usec(self._handle)) 144 | 145 | def set_color_conversion(self, format=_k4a.K4A_IMAGE_FORMAT_DEPTH16): 146 | _k4a.VERIFY( 147 | _k4arecord.k4a_playback_set_color_conversion(self._handle, format), 148 | "Seek color conversio failed!", 149 | ) 150 | 151 | def get_next_data_block(self, track): 152 | block_handle = _k4arecord.k4a_playback_data_block_t() 153 | _k4a.VERIFY( 154 | _k4arecord.k4a_playback_get_next_data_block(self._handle, track, block_handle), 155 | "Get next data block failed!", 156 | ) 157 | 158 | if self.is_datablock_initialized(): 159 | self._datablock._handle = block_handle 160 | else: 161 | self._datablock = Datablock(block_handle) 162 | 163 | return self._datablock 164 | 165 | def get_previous_data_block(self, track): 166 | block_handle = _k4arecord.k4a_playback_data_block_t() 167 | _k4a.VERIFY( 168 | _k4arecord.k4a_playback_get_previous_data_block(self._handle, track, block_handle), 169 | "Get previous data block failed!", 170 | ) 171 | 172 | if self.is_datablock_initialized(): 173 | self._datablock._handle = block_handle 174 | else: 175 | self._datablock = Datablock(block_handle) 176 | 177 | return self._datablock 178 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/sidebar_explorer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import datetime 4 | from pathlib import Path 5 | 6 | import qtawesome as qta 7 | from PySide6.QtGui import QImage, QPixmap 8 | from PySide6.QtCore import Qt, Signal, Slot, QSize 9 | from PySide6.QtWidgets import ( 10 | QFrame, QWidget, QVBoxLayout, QScrollArea, 11 | QPushButton, QHBoxLayout, QFileDialog 12 | ) 13 | 14 | from ..common_widgets import Label 15 | from ..signals import all_signals 16 | from ...pyk4a.pykinect import start_playback, initialize_libraries 17 | 18 | 19 | class ExplorerSidebar(QFrame): 20 | def __init__(self) -> None: 21 | super().__init__() 22 | initialize_libraries() 23 | self.setStyleSheet(" background-color: #252526; border-radius: 0px; ") 24 | 25 | self.main_layout = QVBoxLayout() 26 | self.title_layout = QHBoxLayout() 27 | self.base_path = os.path.join(Path.home(), "Videos") 28 | 29 | self.label_dirpath = Label(self.base_path) 30 | self.label_dirpath.setFixedSize(180, 50) 31 | 32 | self.btn_reload = self.make_icons(qta.icon("mdi6.reload"), "Reload", scale=0.7) 33 | self.btn_search = self.make_icons(qta.icon("ri.search-line"), "Search directory", scale=0.7) 34 | 35 | self.title_layout.addWidget(self.label_dirpath) 36 | self.title_layout.addWidget(self.btn_reload) 37 | self.title_layout.addWidget(self.btn_search) 38 | 39 | self.layout_scroll = QScrollArea() 40 | self.layout_scroll.setWidgetResizable(True) 41 | self.layout_scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) 42 | self.layout_scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) 43 | 44 | self.main_layout.addLayout(self.title_layout) 45 | self.main_layout.addWidget(self.layout_scroll) 46 | self.setMaximumHeight(1080) 47 | self.setFixedWidth(300) 48 | self.setLayout(self.main_layout) 49 | self.set_scrollarea(self.base_path) 50 | 51 | self.btn_reload.clicked.connect(self.reload_dir) 52 | self.btn_search.clicked.connect(self.search_dir) 53 | 54 | def reload_dir(self) -> None: 55 | self.set_scrollarea(self.base_path) 56 | 57 | def search_dir(self) -> None: 58 | self.base_path = QFileDialog.getExistingDirectory(self, "Open Data Files", ".", QFileDialog.ShowDirsOnly) 59 | self.label_dirpath.setText(self.base_path) # /etc/ssh/ or /etc/ssh/config.txt 60 | self.set_scrollarea(self.base_path) 61 | 62 | def set_scrollarea(self, filedirs: str) -> None: 63 | self._widget = QWidget() 64 | layout_file = QVBoxLayout() 65 | layout_file.setAlignment(Qt.AlignmentFlag.AlignTop) 66 | 67 | for filedir in Path(filedirs).iterdir(): 68 | _filename = str(filedir).split("\\")[-1] 69 | if _filename[-4:] == ".mkv": 70 | try: 71 | fileinfo = _FileInfo() 72 | playback = start_playback(str(filedir)) 73 | handle = playback.get_record_configuration()._handle 74 | start_time = handle.start_timestamp_offset_usec 75 | 76 | # Thumbnail 77 | # playback.seek_timestamp(start_time) 78 | # _, current_frame = playback.update() 79 | # rgb_frame = current_frame.get_color_image() 80 | # thumbnail = cv2.cvtColor(rgb_frame[1], cv2.COLOR_BGR2RGB) 81 | # thumbnail = QImage(thumbnail, 30, 30, 30*3, QImage.Format_RGB888) 82 | # fileinfo.label_thumbnail.setPixmap(QPixmap.fromImage(thumbnail)) 83 | 84 | record_length = (playback.get_recording_length()-start_time) // 1e6 85 | fsize = os.path.getsize(filedir) / (2**30) 86 | record_time = str(datetime.timedelta(seconds=record_length)) 87 | playback.close() 88 | 89 | font_metrics = fileinfo.label_file_name.fontMetrics() 90 | elided_text = font_metrics.elidedText(_filename, Qt.ElideRight, fileinfo.label_file_name.width()) 91 | fileinfo.label_file_name.setText(elided_text) 92 | fileinfo.label_metadata.setText(f"{record_time} ({fsize:.2f}GB)") 93 | layout_file.addWidget(fileinfo) 94 | fileinfo.Filename.connect(self.emit_file_path) 95 | except: 96 | pass 97 | 98 | self._widget.setLayout(layout_file) 99 | self.layout_scroll.setWidget(self._widget) 100 | 101 | def make_icons(self, icon: qta, tooltip: str, scale: float = 0.8) -> QPushButton: 102 | w, h = int(35 * scale), int(35 * scale) 103 | btn = QPushButton(icon, "") 104 | btn.setObjectName(tooltip) 105 | btn.setFixedSize(40, 40) 106 | btn.setIconSize(QSize(w, h)) 107 | btn.setToolTip(f"{tooltip}") 108 | btn.setStyleSheet(""" 109 | QPushButton { 110 | border-radius: 0px; 111 | } 112 | QPushButton:hover { 113 | border-color: white; 114 | } 115 | QToolTip { 116 | font:"Arial"; font-size: 15px; color: #ffffff; border: 1px solid #ffffff; 117 | } 118 | """) 119 | return btn 120 | 121 | @Slot(str) 122 | def emit_file_path(self, filename) -> None: 123 | tmp = os.path.join(self.base_path, filename) 124 | all_signals.playback_signals.playback_filepath.emit(tmp) 125 | 126 | 127 | class _FileInfo(QPushButton): 128 | Filename = Signal(str) 129 | 130 | def __init__(self) -> None: 131 | super().__init__() 132 | self.setFixedSize(240, 60) 133 | self.setObjectName("FileInfo") 134 | self.setStyleSheet( 135 | """ 136 | QPushButton#FileInfo { 137 | border-color: white; 138 | border-radius: 0px; 139 | } 140 | QPushButton#FileInfo:hover { 141 | border-color: red; 142 | } 143 | """ 144 | ) 145 | 146 | main_layout = QVBoxLayout() 147 | self.thumbnail_layout = QHBoxLayout() 148 | # self.label_thumbnail = Label() 149 | self.label_file_name = Label("File name: ", "Arial", 10, Qt.AlignLeft) 150 | self.label_file_name.setWordWrap(True) 151 | # self.label_file_name.setFixedWidth(240) 152 | # self.thumbnail_layout.addWidget(self.label_thumbnail) 153 | self.thumbnail_layout.addWidget(self.label_file_name) 154 | main_layout.addLayout(self.thumbnail_layout) 155 | 156 | self.metadata_layout = QHBoxLayout() 157 | self.label_metadata = Label("", "Arial", 10, Qt.AlignLeft) 158 | self.metadata_layout.addWidget(self.label_metadata) 159 | main_layout.addLayout(self.metadata_layout) 160 | self.setLayout(main_layout) 161 | 162 | self.clicked.connect(self.emitName) 163 | 164 | def emitName(self) -> None: 165 | self.Filename.emit(self.label_file_name.text()) 166 | -------------------------------------------------------------------------------- /tests/test_ctype_types.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | 4 | rng = np.random.default_rng() 5 | 6 | sys.path.insert(1, "../") 7 | import pykinect_azure as pykinect 8 | 9 | 10 | def rand_int(size): 11 | return rng.integers(0, 100, size=size) 12 | 13 | 14 | def check_float2_t(float_val, real_val): 15 | assert float_val.xy.x == real_val[0] 16 | assert float_val.xy.y == real_val[1] 17 | assert float_val.v[0] == real_val[0] 18 | assert float_val.v[1] == real_val[1] 19 | 20 | 21 | def check_float3_t(float_val, real_val): 22 | assert float_val.xyz.x == real_val[0] 23 | assert float_val.xyz.y == real_val[1] 24 | assert float_val.xyz.z == real_val[2] 25 | assert float_val.v[0] == real_val[0] 26 | assert float_val.v[1] == real_val[1] 27 | assert float_val.v[2] == real_val[2] 28 | 29 | 30 | def check_quat_t(quat_val, real_val): 31 | assert quat_val.wxyz.w == real_val[0] 32 | assert quat_val.wxyz.x == real_val[1] 33 | assert quat_val.wxyz.y == real_val[2] 34 | assert quat_val.wxyz.z == real_val[3] 35 | assert quat_val.v[0] == real_val[0] 36 | assert quat_val.v[1] == real_val[1] 37 | assert quat_val.v[2] == real_val[2] 38 | assert quat_val.v[3] == real_val[3] 39 | 40 | 41 | def check_joint_t(joint_val, real_pos, real_quat, real_conf): 42 | check_float3_t(joint_val.position, real_pos) 43 | check_quat_t(joint_val.orientation, real_quat) 44 | assert joint_val.confidence_level == real_conf 45 | 46 | 47 | def check_joint2d_t(joint_val, real_pos, real_conf): 48 | check_float2_t(joint_val.position, real_pos) 49 | assert joint_val.confidence_level == real_conf 50 | 51 | 52 | def check_skeleton_t(skeleton_val, real_pos_array, real_quat_array, real_conf_array): 53 | for i in range(pykinect.K4ABT_JOINT_COUNT): 54 | check_joint_t( 55 | skeleton_val.joints[i], 56 | real_pos_array[i], 57 | real_quat_array[i], 58 | real_conf_array[i], 59 | ) 60 | 61 | 62 | def check_skeleton2d_t(skeleton_val, real_pos_array, real_conf_array): 63 | for i in range(pykinect.K4ABT_JOINT_COUNT): 64 | check_joint2d_t(skeleton_val.joints2D[i], real_pos_array[i], real_conf_array[i]) 65 | 66 | 67 | def test_float2_t(): 68 | pos_xy = rand_int(2) 69 | float2_val = pykinect.k4a_float2_t(pos_xy) 70 | check_float2_t(float2_val, pos_xy) 71 | 72 | 73 | def test_float3_t(): 74 | pos_xyz = rand_int(3) 75 | float3_val = pykinect.k4a_float3_t(pos_xyz) 76 | check_float3_t(float3_val, pos_xyz) 77 | 78 | 79 | def test_quaternion_t(): 80 | quat = rand_int(4) 81 | quat_val = pykinect.k4a_quaternion_t(quat) 82 | check_quat_t(quat_val, quat) 83 | 84 | 85 | def test_joint_t(): 86 | joint_pos = rand_int(3) 87 | joint_quat = rand_int(4) 88 | joint_conf = rand_int(1)[0] 89 | joint_val = pykinect.k4abt_joint_t(joint_pos, joint_quat, joint_conf) 90 | check_joint_t(joint_val, joint_pos, joint_quat, joint_conf) 91 | 92 | 93 | def test_joint2d_t(): 94 | joint2d_pos = rand_int(2) 95 | joint2d_conf = rand_int(1)[0] 96 | joint2d_val = pykinect.k4abt_joint2D_t(joint2d_pos, joint2d_conf) 97 | check_joint2d_t(joint2d_val, joint2d_pos, joint2d_conf) 98 | 99 | 100 | def test_skeleton_t(): 101 | skeleton_pos_array = [rand_int(3) for i in range(pykinect.K4ABT_JOINT_COUNT)] 102 | skeleton_quat_array = [rand_int(4) for i in range(pykinect.K4ABT_JOINT_COUNT)] 103 | skeleton_conf_array = rand_int(pykinect.K4ABT_JOINT_COUNT) 104 | joints = [ 105 | pykinect.k4abt_joint_t(skeleton_pos_array[i], skeleton_quat_array[i], skeleton_conf_array[i]) 106 | for i in range(pykinect.K4ABT_JOINT_COUNT) 107 | ] 108 | skeleton_val = pykinect.k4abt_skeleton_t(joints) 109 | check_skeleton_t(skeleton_val, skeleton_pos_array, skeleton_quat_array, skeleton_conf_array) 110 | 111 | 112 | def test_skeleton2d_t(): 113 | skeleton2d_pos_array = [rand_int(2) for i in range(pykinect.K4ABT_JOINT_COUNT)] 114 | skeleton2d_conf_array = rand_int(pykinect.K4ABT_JOINT_COUNT) 115 | joints = [ 116 | pykinect.k4abt_joint2D_t(skeleton2d_pos_array[i], skeleton2d_conf_array[i]) 117 | for i in range(pykinect.K4ABT_JOINT_COUNT) 118 | ] 119 | skeleton2d_val = pykinect.k4abt_skeleton2D_t(joints) 120 | check_skeleton2d_t(skeleton2d_val, skeleton2d_pos_array, skeleton2d_conf_array) 121 | 122 | 123 | def test_body_t(): 124 | body_id = rand_int(1)[0] 125 | skeleton_pos_array = [rand_int(3) for i in range(pykinect.K4ABT_JOINT_COUNT)] 126 | skeleton_quat_array = [rand_int(4) for i in range(pykinect.K4ABT_JOINT_COUNT)] 127 | skeleton_conf_array = rand_int(pykinect.K4ABT_JOINT_COUNT) 128 | joints = [ 129 | pykinect.k4abt_joint_t(skeleton_pos_array[i], skeleton_quat_array[i], skeleton_conf_array[i]) 130 | for i in range(pykinect.K4ABT_JOINT_COUNT) 131 | ] 132 | skeleton_val = pykinect.k4abt_skeleton_t(joints) 133 | body_val = pykinect.k4abt_body_t(body_id, skeleton_val) 134 | assert body_val.id == body_id 135 | check_skeleton_t(body_val.skeleton, skeleton_pos_array, skeleton_quat_array, skeleton_conf_array) 136 | 137 | 138 | def test_body2d_t(): 139 | body_id = rand_int(1)[0] 140 | skeleton2d_pos_array = [rand_int(2) for i in range(pykinect.K4ABT_JOINT_COUNT)] 141 | skeleton2d_conf_array = rand_int(pykinect.K4ABT_JOINT_COUNT) 142 | joints = [ 143 | pykinect.k4abt_joint2D_t(skeleton2d_pos_array[i], skeleton2d_conf_array[i]) 144 | for i in range(pykinect.K4ABT_JOINT_COUNT) 145 | ] 146 | skeleton2d_val = pykinect.k4abt_skeleton2D_t(joints) 147 | body2d_val = pykinect.k4abt_body2D_t(body_id, skeleton2d_val) 148 | assert body2d_val.id == body_id 149 | check_skeleton2d_t(body2d_val.skeleton, skeleton2d_pos_array, skeleton2d_conf_array) 150 | 151 | 152 | def test_empty_values(): 153 | float2_val = pykinect.k4a_float2_t() 154 | check_float2_t(float2_val, [0, 0]) 155 | 156 | float3_val = pykinect.k4a_float3_t() 157 | check_float3_t(float3_val, [0, 0, 0]) 158 | 159 | quat_val = pykinect.k4a_quaternion_t() 160 | check_quat_t(quat_val, [0, 0, 0, 0]) 161 | 162 | joint_val = pykinect.k4abt_joint_t() 163 | check_joint_t(joint_val, [0, 0, 0], [0, 0, 0, 0], 0) 164 | 165 | joint2d_val = pykinect.k4abt_joint2D_t() 166 | check_joint2d_t(joint2d_val, [0, 0], 0) 167 | 168 | skeleton_val = pykinect.k4abt_skeleton_t() 169 | check_skeleton_t( 170 | skeleton_val, 171 | [[0, 0, 0] for i in range(pykinect.K4ABT_JOINT_COUNT)], 172 | [[0, 0, 0, 0] for i in range(pykinect.K4ABT_JOINT_COUNT)], 173 | [0 for i in range(pykinect.K4ABT_JOINT_COUNT)], 174 | ) 175 | 176 | skeleton2d_val = pykinect.k4abt_skeleton2D_t() 177 | check_skeleton2d_t( 178 | skeleton2d_val, 179 | [[0, 0] for i in range(pykinect.K4ABT_JOINT_COUNT)], 180 | [0 for i in range(pykinect.K4ABT_JOINT_COUNT)], 181 | ) 182 | 183 | 184 | if __name__ == "__main__": 185 | test_float2_t() 186 | test_float3_t() 187 | test_quaternion_t() 188 | test_joint_t() 189 | test_joint2d_t() 190 | test_skeleton_t() 191 | test_skeleton2d_t() 192 | test_body_t() 193 | test_body2d_t() 194 | 195 | test_empty_values() 196 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4a/device.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import ctypes 4 | import datetime 5 | from pathlib import Path 6 | 7 | from . import _k4a 8 | from .capture import Capture 9 | from .imu_sample import ImuSample 10 | from .calibration import Calibration 11 | from .configuration import Configuration 12 | from ..k4arecord.record import Record 13 | from ..k4a._k4atypes import K4A_WAIT_INFINITE 14 | from ..k4arecord._k4arecord import k4a_playback_get_next_capture, K4A_STREAM_RESULT_EOF 15 | 16 | 17 | class Device: 18 | calibration = None 19 | capture = None 20 | imu_sample = None 21 | filename_video = None 22 | 23 | def __init__(self, index: int = 0) -> None: 24 | self._handle = None 25 | self._handle = self.open(index) 26 | self.recording = False 27 | self.record = False 28 | self.is_imu = True 29 | 30 | def __del__(self) -> None: 31 | self.close() 32 | 33 | def is_valid(self) -> None: 34 | return self._handle 35 | 36 | def is_capture_initialized(self) -> None: 37 | return Device.capture 38 | 39 | def is_imu_sample_initialized(self) -> None: 40 | return Device.imu_sample 41 | 42 | def handle(self) -> None: 43 | return self._handle 44 | 45 | def start(self, configuration: Configuration, record=False, record_filepath="output.mkv") -> None: 46 | self.configuration = configuration 47 | self.start_cameras(configuration) 48 | self.start_imu() 49 | 50 | if record: 51 | self.record = Record(self._handle, self.configuration.handle(), record_filepath) 52 | self.record.add_imu_track() 53 | self.recording = True 54 | 55 | def close(self) -> None: 56 | if self.is_valid(): 57 | self.stop_cameras() 58 | self.stop_imu() 59 | _k4a.k4a_device_close(self._handle) 60 | 61 | # Clear members 62 | self._handle = None 63 | self.record = None 64 | self.recording = False 65 | 66 | def update(self, timeout_in_ms: int = K4A_WAIT_INFINITE) -> Capture: 67 | # Get cameras capture 68 | capture_handle = self.get_capture(timeout_in_ms) 69 | 70 | if self.is_capture_initialized(): 71 | Device.capture._handle = capture_handle 72 | else: 73 | Device.capture = Capture(capture_handle, Device.calibration) 74 | 75 | # Write capture if recording 76 | if self.recording: 77 | self.record.write_capture(Device.capture.handle()) 78 | 79 | return Device.capture 80 | 81 | def update_imu(self, timeout_in_ms: int = K4A_WAIT_INFINITE) -> ImuSample: 82 | # Get imu sample 83 | imu_sample = self.get_imu_sample(timeout_in_ms) 84 | 85 | if self.is_imu_sample_initialized(): 86 | Device.imu_sample._struct = imu_sample 87 | Device.imu_sample.parse_data() 88 | else: 89 | Device.imu_sample = ImuSample(imu_sample) 90 | 91 | if self.recording: 92 | self.record.write_imu(imu_sample) 93 | 94 | return Device.imu_sample 95 | 96 | def get_capture(self, timeout_in_ms: int = K4A_WAIT_INFINITE) -> _k4a.ctypes.POINTER: 97 | # Release current handle 98 | if self.is_capture_initialized(): 99 | Device.capture.release_handle() 100 | 101 | capture_handle = _k4a.k4a_capture_t() 102 | _k4a.VERIFY( 103 | _k4a.k4a_device_get_capture(self._handle, capture_handle, timeout_in_ms), 104 | "Get capture failed!", 105 | ) 106 | 107 | return capture_handle 108 | 109 | def get_imu_sample(self, timeout_in_ms: int = K4A_WAIT_INFINITE) -> _k4a.k4a_imu_sample_t: 110 | imu_sample = _k4a.k4a_imu_sample_t() 111 | 112 | _k4a.VERIFY( 113 | _k4a.k4a_device_get_imu_sample(self._handle, imu_sample, timeout_in_ms), 114 | "Get IMU failed!", 115 | ) 116 | 117 | return imu_sample 118 | 119 | def start_cameras(self, device_config: Configuration) -> None: 120 | Device.calibration = self.get_calibration(device_config.depth_mode, device_config.color_resolution) 121 | 122 | _k4a.VERIFY( 123 | _k4a.k4a_device_start_cameras(self._handle, device_config.handle()), 124 | "Start K4A cameras failed!", 125 | ) 126 | 127 | def stop_cameras(self) -> None: 128 | _k4a.k4a_device_stop_cameras(self._handle) 129 | 130 | def start_imu(self) -> None: 131 | _k4a.VERIFY(_k4a.k4a_device_start_imu(self._handle), "Start K4A IMU failed!") 132 | 133 | def stop_imu(self) -> None: 134 | _k4a.k4a_device_stop_imu(self._handle) 135 | 136 | # get device serial number 137 | def get_serialnum(self) -> ctypes.c_int: 138 | serial_number_size = ctypes.c_size_t() 139 | result = _k4a.k4a_device_get_serialnum(self._handle, None, serial_number_size) 140 | 141 | if result == _k4a.K4A_BUFFER_RESULT_TOO_SMALL: 142 | serial_number = ctypes.create_string_buffer(serial_number_size.value) 143 | 144 | _k4a.VERIFY( 145 | _k4a.k4a_device_get_serialnum(self._handle, serial_number, serial_number_size), 146 | "Read serial number failed!", 147 | ) 148 | 149 | return serial_number.value.decode("utf-8") 150 | 151 | # ctypes.c_int => Configuration에 있는 Enum type 152 | def get_calibration(self, depth_mode: ctypes.c_int, color_resolution: ctypes.c_int) -> Calibration: 153 | calibration_handle = _k4a.k4a_calibration_t() 154 | 155 | _k4a.VERIFY( 156 | _k4a.k4a_device_get_calibration(self._handle, depth_mode, color_resolution, calibration_handle), 157 | "Get calibration failed!", 158 | ) 159 | 160 | return Calibration(calibration_handle) 161 | 162 | def get_version(self): 163 | version = _k4a.k4a_hardware_version_t() 164 | 165 | _k4a.VERIFY(_k4a.k4a_device_get_version(self._handle, version), "Get version failed!") 166 | 167 | return version 168 | 169 | @staticmethod 170 | def open(index=0): 171 | device_handle = _k4a.k4a_device_t() 172 | 173 | _k4a.VERIFY(_k4a.k4a_device_open(index, device_handle), "Open K4A Device failed!") 174 | 175 | return device_handle 176 | 177 | @staticmethod 178 | def device_get_installed_count(): 179 | return int(_k4a.k4a_device_get_installed_count()) 180 | 181 | def get_playback_capture(self, playback_handle): 182 | capture_handle = _k4a.k4a_capture_t() 183 | ret = k4a_playback_get_next_capture(playback_handle, capture_handle) != K4A_STREAM_RESULT_EOF 184 | if ret: 185 | return capture_handle 186 | else: 187 | return None 188 | 189 | def save_frame_for_clip(self, playback_handle, playback_calibration): 190 | capture_handle = self.get_playback_capture(playback_handle) 191 | 192 | if self.is_capture_initialized(): 193 | Device.capture.release_handle() 194 | Device.capture._handle = capture_handle 195 | else: 196 | Device.capture = Capture(capture_handle, playback_calibration) 197 | 198 | if self.recording: 199 | self.record.write_capture(Device.capture.handle()) 200 | 201 | 202 | return Device.capture 203 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4a/calibration.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | 3 | from . import _k4a 4 | 5 | 6 | class Calibration: 7 | """ 8 | Wrapper for `k4a_calibration_t`. 9 | 10 | Provides member functions for `k4a_calibration_t`. 11 | """ 12 | 13 | def __init__(self, calibration_handle: _k4a.k4a_calibration_t): 14 | self._handle = calibration_handle 15 | self.color_params = self._handle.color_camera_calibration.intrinsics.parameters.param 16 | self.depth_params = self._handle.depth_camera_calibration.intrinsics.parameters.param 17 | 18 | def __del__(self): 19 | self.reset() 20 | 21 | def __str__(self): 22 | message = ( 23 | "Rgb Intrinsic parameters: \n" 24 | f"\tcx: {self.color_params.cx}\n" 25 | f"\tcy: {self.color_params.cy}\n" 26 | f"\tfx: {self.color_params.fx}\n" 27 | f"\tfy: {self.color_params.fy}\n" 28 | f"\tk1: {self.color_params.k1}\n" 29 | f"\tk2: {self.color_params.k2}\n" 30 | f"\tk3: {self.color_params.k3}\n" 31 | f"\tk4: {self.color_params.k4}\n" 32 | f"\tk5: {self.color_params.k5}\n" 33 | f"\tk6: {self.color_params.k6}\n" 34 | f"\tcodx: {self.color_params.codx}\n" 35 | f"\tcody: {self.color_params.cody}\n" 36 | f"\tp2: {self.color_params.p2}\n" 37 | f"\tp1: {self.color_params.p1}\n" 38 | f"\tmetric_radius: {self.color_params.metric_radius}\n" 39 | ) 40 | return message 41 | 42 | def get_matrix(self, camera: _k4a.k4a_calibration_type_t): 43 | if camera == _k4a.K4A_CALIBRATION_TYPE_COLOR: 44 | return [ 45 | [self.color_params.fx, 0, self.color_params.cx], 46 | [0, self.color_params.fy, self.color_params.cy], 47 | [0, 0, 1], 48 | ] 49 | elif camera == _k4a.K4A_CALIBRATION_TYPE_DEPTH: 50 | return [ 51 | [self.depth_params.fx, 0, self.depth_params.cx], 52 | [0, self.depth_params.fy, self.depth_params.cy], 53 | [0, 0, 1], 54 | ] 55 | 56 | def is_valid(self): 57 | return self._handle 58 | 59 | def handle(self): 60 | return self._handle 61 | 62 | def reset(self): 63 | if self.is_valid(): 64 | self._handle = None 65 | 66 | # 3D point of source_camera to 3D point of target_camera 67 | def convert_3d_to_3d( 68 | self, 69 | source_point3d: _k4a.k4a_float3_t, 70 | source_camera: _k4a.k4a_calibration_type_t, 71 | target_camera: _k4a.k4a_calibration_type_t, 72 | ) -> _k4a.k4a_float3_t: 73 | """ 74 | Transform a 3d point of a source coordinate system into a 3d point of the target coordinate system. 75 | 76 | Throws error on failure. 77 | 78 | Note: 79 | See also `k4a_calibration_3d_to_3d()`. 80 | 81 | Args: 82 | source_point3d (k4a_float3_t): The 3D coordinates in millimeters representing a point 83 | in `source_camera`. 84 | source_camera (k4a_calibration_type_t): The current camera. 85 | target_camera (k4a_calibration_type_t): The target camera. 86 | 87 | Returns: 88 | k4a_float3_t: Three dimensional floating point vector. 89 | """ 90 | target_point3d = _k4a.k4a_float3_t() 91 | 92 | _k4a.VERIFY( 93 | _k4a.k4a_calibration_3d_to_3d( 94 | self._handle, 95 | source_point3d, 96 | source_camera, 97 | target_camera, 98 | target_point3d, 99 | ), 100 | "Failed to convert from 3D to 3D", 101 | ) 102 | 103 | return target_point3d 104 | 105 | # 2D depth of source_camera to 3D point of target_camera 106 | def convert_2d_to_3d( 107 | self, 108 | source_point2d: _k4a.k4a_float2_t, 109 | source_depth: float, 110 | source_camera: _k4a.k4a_calibration_type_t, 111 | target_camera: _k4a.k4a_calibration_type_t, 112 | ) -> _k4a.k4a_float3_t: 113 | """ 114 | Transform a 2d pixel coordinate with an associated depth value of the source camera 115 | into a 3d point of the target coordinate system. 116 | 117 | Returns false if the point is invalid in the target coordinate system 118 | (and therefore target_point3d should not be used) Throws error if calibration contains 119 | invalid data. 120 | 121 | Args: 122 | source_point2d (_k4a.k4a_float2_t): The 2D pixel in `source_camera` coordinates. 123 | source_depth (float): The depth of `source_point2d` in millimeters. One way to derive the 124 | depth value in the color camera geometry is to use the function `k4a_transformation_depth_image_to_color_camera()`. 125 | source_camera (_k4a.k4a_calibration_type_t): The current camera. 126 | target_camera (_k4a.k4a_calibration_type_t): The target camera. 127 | 128 | Returns: 129 | _k4a.k4a_float3_t: Three dimensional floating point vector. 130 | """ 131 | target_point3d = _k4a.k4a_float3_t() 132 | valid = ctypes.c_int() 133 | 134 | _k4a.VERIFY( 135 | _k4a.k4a_calibration_2d_to_3d( 136 | self._handle, 137 | source_point2d, 138 | source_depth, 139 | source_camera, 140 | target_camera, 141 | target_point3d, 142 | valid, 143 | ), 144 | "Failed to convert from 2D to 3D", 145 | ) 146 | 147 | return target_point3d 148 | 149 | # 3D point of source_camera to 2D pixel of target_camera 150 | def convert_3d_to_2d( 151 | self, 152 | source_point3d: _k4a.k4a_float3_t, 153 | source_camera: _k4a.k4a_calibration_type_t, 154 | target_camera: _k4a.k4a_calibration_type_t, 155 | ) -> _k4a.k4a_float2_t: 156 | """ 157 | Transform a 3d point of a source coordinate system into a 2d pixel coordinate of the target 158 | camera. 159 | 160 | Returns false if the point is invalid in the target coordinate system 161 | (and therefore target_point2d should not be used) Throws error if calibration contains invalid data. 162 | 163 | Args: 164 | source_point3d (_k4a.k4a_float3_t): The 3D coordinates in millimeters representing 165 | a point in source_camera. 166 | source_camera (_k4a.k4a_calibration_type_t): _description_ 167 | target_camera (_k4a.k4a_calibration_type_t): _description_ 168 | 169 | Returns: 170 | _k4a.k4a_float2_t: _description_ 171 | """ 172 | target_point2d = _k4a.k4a_float2_t() 173 | valid = ctypes.c_int() 174 | 175 | _k4a.VERIFY( 176 | _k4a.k4a_calibration_3d_to_2d( 177 | self._handle, 178 | source_point3d, 179 | source_camera, 180 | target_camera, 181 | target_point2d, 182 | valid, 183 | ), 184 | "Failed to convert from 3D to 2D", 185 | ) 186 | 187 | return target_point2d 188 | 189 | # 2D depth of source_camera to 2D pixel of target_camera 190 | def convert_2d_to_2d( 191 | self, 192 | source_point2d: _k4a.k4a_float2_t, 193 | source_depth: float, 194 | source_camera: _k4a.k4a_calibration_type_t, 195 | target_camera: _k4a.k4a_calibration_type_t, 196 | ) -> _k4a.k4a_float2_t: 197 | """ 198 | 199 | 200 | Args: 201 | source_point2d (_k4a.k4a_float2_t): _description_ 202 | source_depth (float): _description_ 203 | source_camera (_k4a.k4a_calibration_type_t): _description_ 204 | target_camera (_k4a.k4a_calibration_type_t): _description_ 205 | 206 | Returns: 207 | _k4a.k4a_float2_t: _description_ 208 | """ 209 | target_point2d = _k4a.k4a_float2_t() 210 | valid = ctypes.c_int() 211 | 212 | _k4a.VERIFY( 213 | _k4a.k4a_calibration_2d_to_2d( 214 | self._handle, 215 | source_point2d, 216 | source_depth, 217 | source_camera, 218 | target_camera, 219 | target_point2d, 220 | valid, 221 | ), 222 | "Failed to convert from 2D to 2D", 223 | ) 224 | 225 | return target_point2d 226 | 227 | # 2D pixel of color_camera to 2D pixel of depth camera 228 | def convert_color_2d_to_depth_2d( 229 | self, source_point2d: _k4a.k4a_float2_t, depth_image: _k4a.k4a_image_t 230 | ) -> _k4a.k4a_float2_t: 231 | target_point2d = _k4a.k4a_float2_t() 232 | valid = ctypes.c_int() 233 | 234 | _k4a.VERIFY( 235 | _k4a.k4a_calibration_color_2d_to_depth_2d(self._handle, source_point2d, depth_image, target_point2d, valid), 236 | "Failed to convert from Color 2D to Depth 2D", 237 | ) 238 | 239 | return target_point2d 240 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/common_widgets.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Tuple, Union, List 3 | 4 | from PySide6.QtCore import Qt, QPoint, QRect, QSize, QTimer 5 | from PySide6.QtGui import QFont, QPen, QPainter, QFontMetrics, QColor 6 | from PySide6.QtWidgets import ( 7 | QLabel, QComboBox, QPushButton, QSlider, QFrame, QDialog, 8 | QVBoxLayout, QHBoxLayout, QSizePolicy, QProgressBar, QLineEdit 9 | ) 10 | 11 | from .signals import all_signals 12 | 13 | """ 14 | In script file, There are many custom widgets to use frequently in this project. 15 | 16 | Using custom widget can manage QWidget module more efficient. 17 | """ 18 | 19 | 20 | class ComboBox(QComboBox): 21 | def __init__(self, items: List[str], current_index: int, stylesheet: Union[str, os.PathLike] = None) -> None: 22 | super().__init__() 23 | self.addItems(items) 24 | self.setCurrentIndex(current_index) 25 | 26 | 27 | class PushButton(QPushButton): 28 | def __init__( 29 | self, text: str = "", font: str = "Arial", fontsize: int = 10, icon_path: str = "" 30 | ) -> None: 31 | super().__init__() 32 | self.setText(text) 33 | self.setFont(QFont(f"{font}", fontsize)) 34 | self.setStyleSheet(""" 35 | QPushButton { 36 | color: white; 37 | } 38 | QPushButton:hover { 39 | border-color: red; 40 | } 41 | """) 42 | 43 | 44 | class Slider(QSlider): 45 | def __init__( 46 | self, 47 | orientation, 48 | set_range_values: Tuple[int], 49 | set_value: int, 50 | ) -> None: 51 | super().__init__(orientation) 52 | self.setRange(*set_range_values) 53 | self.setValue(set_value) 54 | 55 | self.setStyleSheet( 56 | """ 57 | QSlider { 58 | margin: 0px; 59 | border-radius: 4px; 60 | } 61 | QSlider::sub-page:horizontal { 62 | background-color: #3f4042; 63 | height: 12px; 64 | border-radius: 4px; 65 | } 66 | QSlider::groove:horizontal { 67 | height: 12px; 68 | margin: 1px; 69 | border-radius: 4px; 70 | background-color: "#3f4042" 71 | } 72 | QSlider::handle:horizontal { 73 | border: 10px; 74 | margin: 0px; 75 | border-radius: 3px; 76 | background-color: "#00bcf8"; 77 | } 78 | QSlider:handle:horizontal:hover { 79 | background-color: "#4d96FF"; 80 | } 81 | QSlider:handle:horizontal:pressed { 82 | background-color: "#FFFFFF"; 83 | } 84 | """ 85 | ) 86 | 87 | def paintEvent(self, event): 88 | QSlider.paintEvent(self, event) 89 | 90 | curr_value = str(self.value()) 91 | painter = QPainter(self) 92 | painter.setPen(QPen(Qt.white)) 93 | 94 | font_metrics = QFontMetrics(self.font()) 95 | font_width = font_metrics.boundingRect(curr_value).width() 96 | 97 | rect = self.geometry() 98 | if self.orientation() == Qt.Horizontal: 99 | horizontal_x_pos = rect.width() // 2 - font_width // 2 - 5 100 | horizontal_y_pos = rect.height() * 0.67 101 | painter.drawText(QPoint(horizontal_x_pos, horizontal_y_pos), curr_value) 102 | else: 103 | pass 104 | 105 | 106 | class Label(QLabel): 107 | def __init__( 108 | self, 109 | text: str = "", 110 | font: str = "Arial", 111 | fontsize: int = 10, 112 | orientation=None, 113 | stylesheet: Union[str, os.PathLike] = None, 114 | ) -> None: 115 | super().__init__() 116 | self.setText(text) 117 | self.setFont(QFont(f"{font}", fontsize)) 118 | if orientation is not None: 119 | self.setAlignment(orientation) 120 | 121 | if stylesheet is not None: 122 | with open(os.path.join(os.path.split(__file__)[0], stylesheet), "r", encoding="utf-8") as f: 123 | stylesheet = f.read() 124 | print(stylesheet) 125 | self.setStyleSheet(str(stylesheet)) 126 | 127 | 128 | class Frame(QFrame): 129 | def __init__( 130 | self, 131 | text: str, 132 | min_size: Tuple[int, int], 133 | max_size: Tuple[int, int], 134 | layout: Union[QVBoxLayout, QHBoxLayout] = None, 135 | ) -> None: 136 | super().__init__() 137 | self.setMinimumSize(QSize(min_size[0], min_size[1])) 138 | self.setMaximumSize(QSize(max_size[0], max_size[1])) 139 | self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) 140 | self.setContentsMargins(0, 0, 0, 0) 141 | 142 | self.main_layout = QHBoxLayout() 143 | self.main_layout.setSpacing(0) 144 | self.main_layout.setContentsMargins(0, 0, 0, 0) 145 | 146 | self.frame_image = QFrame() 147 | self.frame_image.setObjectName("Frame_image") 148 | self.frame_image.setContentsMargins(0, 0, 0, 0) 149 | self.frame_image.setStyleSheet(""" 150 | QFrame#Frame_image { 151 | border: 1px solid white; 152 | border-radius: 0px; 153 | } 154 | """) 155 | self.frame_layout = QVBoxLayout(self.frame_image) 156 | self.frame_layout.setSpacing(0) 157 | self.frame_layout.setContentsMargins(0, 0, 0, 0) 158 | 159 | self.letter_box_frame = QFrame() 160 | self.letter_box_frame.setContentsMargins(0, 0, 0, 0) 161 | self.letter_box_frame.setStyleSheet( """ background-color: #1e1e1e; """ ) 162 | 163 | self.title_layout = QHBoxLayout() 164 | self.title_layout.setSpacing(0) 165 | self.title_layout.setContentsMargins(0, 0, 0, 0) 166 | self.title_name = Label(text, orientation=Qt.AlignCenter) 167 | self.title_name.setFixedHeight(30) 168 | self.title_name.setStyleSheet( 169 | """ 170 | background-color: #2c2e37; 171 | """ 172 | ) 173 | self.title_layout.addWidget(self.title_name) 174 | 175 | if layout is None: 176 | self.label_image = QLabel() 177 | self.frame_layout.addLayout(self.title_layout) 178 | self.frame_layout.addWidget(self.label_image) 179 | self.main_layout.addWidget(self.frame_image) 180 | 181 | if text in ["Depth Sensor", "IR Sensor"]: 182 | self.letter_box_frame.setMinimumSize(min_size[0]-min_size[1]-30, min_size[1]) 183 | self.letter_box_frame.setMaximumSize(max_size[0]-max_size[1]-30, max_size[1]) 184 | self.main_layout.addWidget(self.letter_box_frame) 185 | else: 186 | self.frame_layout.addLayout(self.title_layout) 187 | self.frame_layout.addLayout(layout) 188 | self.main_layout.addWidget(self.frame_image) 189 | 190 | self.setLayout(self.main_layout) 191 | 192 | 193 | class LineEdit(QLineEdit): 194 | def __init__(self, width: int = None, height: int = None, name: str = None) -> QLineEdit: 195 | super().__init__() 196 | if width: 197 | self.setFixedWidth(width) 198 | if height: 199 | self.setFixedHeight(height) 200 | if name: 201 | self.setObjectName(name) 202 | self.editingFinished.connect(self.emit_objname) 203 | 204 | def emit_objname(self): 205 | all_signals.option_signals.color_option.emit(self.objectName()) 206 | 207 | 208 | class HLine(QFrame): 209 | def __init__(self): 210 | super().__init__() 211 | self.setStyleSheet(" border-color: #808080; ") 212 | self.setFixedHeight(1) 213 | self.setContentsMargins(0, 0, 0, 0) 214 | 215 | 216 | class VLine(QFrame): 217 | def __init__(self): 218 | super().__init__() 219 | self.setStyleSheet(" border-color: #808080; ") 220 | self.setFixedWidth(1) 221 | self.setMaximumHeight(1000) 222 | self.setContentsMargins(0, 0, 0, 0) 223 | 224 | 225 | class ToggleButton(QPushButton): 226 | def __init__(self, parent=None) -> None: 227 | super().__init__(parent) 228 | self.bg_color = QColor(0, 188, 248) 229 | 230 | self.setCheckable(True) 231 | self.setChecked(True) 232 | self.setMinimumWidth(55) # 55 233 | self.setMinimumHeight(22) # 22 234 | self.clicked.connect(self._toggle) 235 | 236 | def paintEvent(self, event) -> None: 237 | if self.isChecked() is True: 238 | self.bg_color = QColor(255, 40, 40) 239 | else: 240 | self.bg_color = QColor(0, 188, 248) 241 | 242 | radius = 7 243 | width = 2 * radius + 2 244 | center = self.rect().center() 245 | 246 | painter = QPainter(self) 247 | painter.setRenderHint(QPainter.Antialiasing) 248 | painter.translate(center) 249 | painter.setBrush(QColor(0, 0, 0)) 250 | 251 | pen = QPen(Qt.black) 252 | pen.setWidth(2) 253 | pen.setStyle(Qt.PenStyle.MPenStyle) 254 | painter.setPen(pen) 255 | 256 | painter.setBrush(QColor(63, 64, 66)) 257 | painter.drawRoundedRect(QRect(-width - 1, -radius - 1, 2 * width + 2, 2 * radius + 2), 3, 3) 258 | 259 | painter.setBrush(self.bg_color) 260 | sw_rect = QRect(-width + 2, -radius + 1, 2 * radius - 2, 2 * radius - 2) 261 | if not self.isChecked(): 262 | sw_rect.moveLeft(width - radius * 2) 263 | painter.drawRoundedRect(sw_rect, 3, 3) 264 | 265 | def _toggle(self): 266 | self.toggle() 267 | 268 | 269 | class CustomProgressBarDialog(QDialog): 270 | def __init__(self, msec: int = 1000): 271 | super().__init__() 272 | self.setFixedSize(QSize(500, 200)) 273 | self.setWindowFlags(self.windowFlags() | Qt.WindowStaysOnTopHint) 274 | 275 | self.main_layout = QVBoxLayout() 276 | self.main_layout.setAlignment(Qt.AlignCenter) 277 | self.title_label = QLabel("Extract Frames...") 278 | self.progress_bar = QProgressBar() 279 | self.progress_bar.setFixedSize(QSize(450, 100)) 280 | self.main_layout.addWidget(self.title_label) 281 | self.main_layout.addWidget(self.progress_bar) 282 | self.setLayout(self.main_layout) 283 | 284 | self.cnt, self.boundary = 0, msec 285 | self.timer = QTimer() 286 | self.timer.setInterval(1) 287 | self.timer.timeout.connect(self.set_value) 288 | self.timer.start() 289 | 290 | def set_value(self): 291 | if self.cnt == self.boundary: 292 | self.close() 293 | self.cnt += 1 294 | tmp = (self.cnt / self.boundary) * 100 295 | self.progress_bar.setValue(tmp) 296 | self.progress_bar.setFormat("%.02f %%" % tmp) 297 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/viewer_sensors.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | import ctypes 5 | import datetime 6 | from pathlib import Path 7 | 8 | from PySide6.QtCore import Qt, Slot, QEvent, QMimeData, QPointF, QSize 9 | from PySide6.QtGui import QImage, QPixmap, QDrag 10 | from PySide6.QtWidgets import QHBoxLayout, QVBoxLayout, QFrame, QGridLayout 11 | 12 | from .record_sensors import RecordSensors 13 | from .viewer_imu_sensors import ImuSensors 14 | from .viewer_audio import AudioSensor 15 | from .sidebar_record_control import config_sidebar 16 | from ..signals import all_signals 17 | from ..common_widgets import Frame, VLine, CustomProgressBarDialog 18 | from ...pyk4a.k4a._k4a import k4a_device_set_color_control, k4a_image_set_exposure_time_usec 19 | from ...pyk4a.k4a._k4atypes import color_command_dict, K4A_COLOR_CONTROL_MODE_MANUAL 20 | from ...pyk4a.k4a.configuration import Configuration 21 | from ...pyk4a.pykinect import start_device 22 | 23 | 24 | SAMPLE_COUNT = 10000 25 | RESOLUTION = 4 26 | 27 | 28 | class SensorViewer(QFrame): 29 | def __init__(self) -> None: 30 | super().__init__() 31 | 32 | self.setMinimumSize(QSize(920, 670)) 33 | self.setMaximumSize(QSize(1190, 1030)) 34 | self.setContentsMargins(0, 0, 0, 0) 35 | self.setStyleSheet("background-color: #1e1e1e; border-radius: 0px;") 36 | 37 | self.device = None 38 | self.config = Configuration() 39 | self.color_control = None 40 | self.base_path = None 41 | self.emit_configs = config_sidebar 42 | 43 | self.main_layout = QGridLayout() 44 | self.main_layout.setSpacing(0) 45 | self.main_layout.setContentsMargins(0, 0, 0, 0) 46 | self.main_layout.setAlignment(Qt.AlignCenter) 47 | self.frame_rgb = Frame("RGB Sensor", min_size=(460, 330), max_size=(595, 510)) 48 | self.frame_depth = Frame("Depth Sensor", min_size=(460, 330), max_size=(595, 510)) 49 | self.frame_ir = Frame("IR Sensor", min_size=(460, 330), max_size=(595, 510)) 50 | 51 | self.sensor_data_layout = QHBoxLayout() 52 | self.sensor_data_layout.setSpacing(0) 53 | self.sensor_data_layout.setContentsMargins(0, 0, 0, 0) 54 | self.imu_senser = ImuSensors(min_size=(225, 300), max_size=(440, 480)) 55 | self.audio_sensor = AudioSensor(min_size=(225, 300), max_size=(440, 480)) 56 | 57 | self.v_line = QVBoxLayout() 58 | self.v_line.setSpacing(0) 59 | self.v_line.setContentsMargins(0, 0, 0, 0) 60 | self.v_line.addWidget(VLine()) 61 | 62 | self.sensor_data_layout.addWidget(self.imu_senser) 63 | self.sensor_data_layout.addLayout(self.v_line) 64 | self.sensor_data_layout.addWidget(self.audio_sensor) 65 | self.frame_subdata = Frame( 66 | "IMU & Audio Sensor", 67 | layout=self.sensor_data_layout, 68 | min_size=(460, 330), 69 | max_size=(595, 510) 70 | ) 71 | 72 | self.buffer = [QPointF(x, 0) for x in range(SAMPLE_COUNT)] 73 | self.main_layout.addWidget(self.frame_ir, 0, 0) 74 | self.main_layout.addWidget(self.frame_depth, 0, 1) 75 | self.main_layout.addWidget(self.frame_rgb, 1, 0) 76 | self.main_layout.addWidget(self.frame_subdata, 1, 1) 77 | 78 | self.setAcceptDrops(True) 79 | self.setLayout(self.main_layout) 80 | 81 | self.is_play = True 82 | self.is_record = True 83 | self.setLayout(self.main_layout) 84 | 85 | # UI option signals 86 | all_signals.option_signals.save_filepath.connect(self.set_base_path) 87 | all_signals.option_signals.sidebar_toggle.connect(self.set_config) 88 | all_signals.option_signals.device_option.connect(self.select_option) 89 | all_signals.option_signals.camera_option.connect(self.set_config) 90 | all_signals.option_signals.clear_frame.connect(self.clear_frame) 91 | 92 | # Recording signals 93 | all_signals.record_signals.rgb_image.connect(self.set_rgb_image) 94 | all_signals.record_signals.depth_image.connect(self.set_depth_image) 95 | all_signals.record_signals.ir_image.connect(self.set_ir_image) 96 | all_signals.record_signals.record_time.connect(self.set_time) 97 | all_signals.record_signals.video_fps.connect(self.set_fps) 98 | all_signals.record_signals.imu_acc_data.connect(self.set_acc_data) 99 | all_signals.record_signals.imu_gyro_data.connect(self.set_gyro_data) 100 | all_signals.record_signals.audio_data.connect(self.set_audio_data) 101 | 102 | def select_option(self, value): 103 | if value == "viewer": 104 | self.streaming() 105 | else: 106 | self.recording() 107 | 108 | def streaming(self) -> None: 109 | self.is_record = False 110 | self.play() 111 | 112 | def recording(self) -> None: 113 | self.is_record = True 114 | self.play() 115 | 116 | def play(self) -> None: 117 | if self.is_play: 118 | self.set_filename() 119 | for k, v in self.emit_configs["color"].items(): 120 | setattr(self.config, k, v) 121 | 122 | self.device = start_device( 123 | config=self.config, 124 | record=self.is_record, 125 | record_filepath=self.filename_video 126 | ) 127 | setattr(self.config, "depth_mode", self.emit_configs["depth_mode"]) 128 | for k, v in self.emit_configs["color_option"].items(): 129 | k4a_device_set_color_control( 130 | self.device._handle, color_command_dict[k], K4A_COLOR_CONTROL_MODE_MANUAL, ctypes.c_int32(int(v)) 131 | ) 132 | 133 | self.viewer = RecordSensors(device=self.device) 134 | self.viewer.start_audio() 135 | self.viewer.timer.start() 136 | self.is_play = False 137 | else: 138 | self.viewer.timer.stop() 139 | self.viewer.stop_audio() 140 | self.viewer.quit() 141 | self.device.close() 142 | self.is_play = True 143 | 144 | if self.is_record: 145 | wait_dialog = CustomProgressBarDialog(msec=500) 146 | wait_dialog.show() 147 | 148 | def set_filename(self) -> None: 149 | if self.base_path is None: 150 | self.base_path = os.path.join(Path.home(), "Videos") 151 | 152 | filename = datetime.datetime.now() 153 | filename = filename.strftime("%Y_%m_%d_%H_%M_%S") 154 | 155 | self.filename_video = os.path.join(self.base_path, f"{filename}.mkv") 156 | if sys.flags.debug: 157 | print(self.base_path, self.filename_video) 158 | 159 | @Slot(dict) 160 | def set_config(self, value: dict) -> None: 161 | self.emit_configs = value 162 | 163 | @Slot(str) 164 | def set_base_path(self, value: str) -> None: 165 | self.base_path = value 166 | 167 | def eventFilter(self, watched, event): 168 | if event.type() == QEvent.MouseButtonPress: 169 | self.mousePressEvent(event) 170 | elif event.type() == QEvent.MouseMove: 171 | self.mouseMoveEvent(event) 172 | elif event.type() == QEvent.MouseButtonRelease: 173 | self.mouseReleaseEvent(event) 174 | return super().eventFilter(watched, event) 175 | 176 | def get_index(self, pos): 177 | for i in range(self.main_layout.count()): 178 | if self.main_layout.itemAt(i).geometry().contains(pos) and i != self.target: 179 | return i 180 | 181 | def mousePressEvent(self, event): 182 | if event.button() == Qt.LeftButton: 183 | self.target = self.get_index(event.windowPos().toPoint()) 184 | else: 185 | self.target = None 186 | 187 | def mouseMoveEvent(self, event): 188 | if event.buttons() & Qt.LeftButton and self.target is not None: 189 | drag = QDrag(self.main_layout.itemAt(self.target).widget()) 190 | pix = self.main_layout.itemAt(self.target).widget().grab() 191 | mimedata = QMimeData() 192 | mimedata.setImageData(pix) 193 | drag.setMimeData(mimedata) 194 | drag.setPixmap(pix) 195 | drag.exec(Qt.DropAction.CopyAction | Qt.DropAction.MoveAction) 196 | 197 | def mouseReleaseEvent(self, event): 198 | self.target = None 199 | 200 | def dragEnterEvent(self, event): 201 | if event.mimeData().hasImage(): 202 | event.acceptProposedAction() 203 | else: 204 | event.ignore() 205 | 206 | def dragMoveEvent(self, event): 207 | event.accept() 208 | 209 | def dropEvent(self, event): 210 | if not event.source().geometry().contains(event.pos()): 211 | source = self.get_index(event.pos()) 212 | if source is None: 213 | return 214 | 215 | i, j = max(self.target, source), min(self.target, source) 216 | p1, p2 = self.main_layout.getItemPosition(i), self.main_layout.getItemPosition(j) 217 | 218 | self.main_layout.addItem(self.main_layout.takeAt(i), *p2) 219 | self.main_layout.addItem(self.main_layout.takeAt(j), *p1) 220 | event.accept() 221 | 222 | @Slot(QImage) 223 | def set_rgb_image(self, image: QImage) -> None: 224 | w, h = self.frame_rgb.label_image.width(), self.frame_rgb.label_image.height() 225 | image = image.scaled(w-5, h-5, Qt.KeepAspectRatio) 226 | self.frame_rgb.label_image.setPixmap(QPixmap.fromImage(image)) 227 | 228 | @Slot(QImage) 229 | def set_depth_image(self, image: QImage) -> None: 230 | w, h = self.frame_depth.label_image.width(), self.frame_depth.label_image.height() 231 | image = image.scaled(w-5, h-5, Qt.KeepAspectRatio) 232 | self.frame_depth.label_image.setPixmap(QPixmap.fromImage(image)) 233 | 234 | @Slot(QImage) 235 | def set_ir_image(self, image: QImage) -> None: 236 | w, h = self.frame_ir.label_image.width(), self.frame_ir.label_image.height() 237 | image = image.scaled(w-5, h-5, Qt.KeepAspectRatio) 238 | self.frame_ir.label_image.setPixmap(QPixmap.fromImage(image)) 239 | 240 | @Slot(float) 241 | def set_time(self, time) -> None: 242 | self.imu_senser.label_time.setText("Time(s) : %.3f" % time) 243 | 244 | @Slot(int) 245 | def set_fps(self, value) -> None: 246 | self.imu_senser.label_fps.setText("FPS : %d" % value) 247 | 248 | @Slot(list) 249 | def set_acc_data(self, values) -> None: 250 | self.imu_senser.label_acc_x.setText("X : %.5f" % values[0]) 251 | self.imu_senser.label_acc_y.setText("Y : %.5f" % values[1]) 252 | self.imu_senser.label_acc_z.setText("Z : %.5f" % values[2]) 253 | 254 | @Slot(float) 255 | def set_gyro_data(self, values) -> None: 256 | # self.imu_senser.label_gyro_x.setText("X : %.5f" % values[0]) 257 | self.imu_senser.label_gyro_x.setText(f"X : {values[0]:.5f}") 258 | self.imu_senser.label_gyro_y.setText("Y : %.5f" % values[1]) 259 | self.imu_senser.label_gyro_z.setText("Z : %.5f" % values[2]) 260 | 261 | @Slot(list) 262 | def set_audio_data(self, values) -> None: 263 | start = 0 264 | if values[1] < SAMPLE_COUNT: 265 | start = SAMPLE_COUNT - values[1] 266 | for s in range(start): 267 | self.buffer[s].setY(self.buffer[s + values[1]].y()) 268 | 269 | data_index = 0 270 | for s in range(start, SAMPLE_COUNT): 271 | value = (ord(values[0][data_index]) - 128) / 128 272 | self.buffer[s].setY(value) 273 | data_index = data_index + RESOLUTION 274 | 275 | self.audio_sensor.series.replace(self.buffer) 276 | 277 | def clear_frame(self): 278 | self.frame_rgb.label_image.clear() 279 | self.frame_depth.label_image.clear() 280 | self.frame_ir.label_image.clear() 281 | self.imu_senser.label_time.setText("Time(s) : ") 282 | self.imu_senser.label_fps.setText("FPS : ") 283 | self.imu_senser.label_acc_x.setText("X : ") 284 | self.imu_senser.label_acc_y.setText("Y : ") 285 | self.imu_senser.label_acc_z.setText("Z : ") 286 | self.imu_senser.label_gyro_x.setText("X : ") 287 | self.imu_senser.label_gyro_y.setText("Y : ") 288 | self.imu_senser.label_gyro_z.setText("Z : ") 289 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/viewer_playback.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from PySide6.QtCore import Qt, Slot, QEvent, QMimeData, QSize, QPointF 4 | from PySide6.QtGui import QImage, QPixmap, QDrag 5 | from PySide6.QtWidgets import ( 6 | QHBoxLayout, QPushButton, QFrame, QGridLayout, 7 | QDialog, QVBoxLayout 8 | ) 9 | import qtawesome as qta 10 | 11 | from .playback_sensors import PlaybackSensors 12 | from .viewer_video_clipping import VideoClippingDialog 13 | from .viewer_imu_sensors import ImuSensors 14 | from ..signals import all_signals 15 | from ..common_widgets import Frame, Slider, Label 16 | from ...pyk4a.pykinect import initialize_libraries, start_playback 17 | 18 | 19 | class PlaybackViewer(QFrame): 20 | def __init__(self) -> None: 21 | super().__init__() 22 | 23 | self.setMinimumSize(QSize(920, 670)) 24 | self.setMaximumSize(QSize(2000, 2000)) 25 | self.setStyleSheet("background-color: #1e1e1e;") 26 | self.viewer = None 27 | self.playback = None 28 | self.file_path = None 29 | self.is_run = False 30 | 31 | self.main_layout = QVBoxLayout() 32 | self.main_layout.setSpacing(0) 33 | self.main_layout.setContentsMargins(0, 0, 0, 0) 34 | 35 | self.captured_viewer_frame = CapturedImageViewer() 36 | self.bottom_layout = QHBoxLayout() 37 | self.bottom_layout.setSpacing(10) 38 | self.bottom_layout.setContentsMargins(0, 0, 0, 0) 39 | self.bottom_layout.setAlignment(Qt.AlignmentFlag.AlignCenter) 40 | 41 | self.btn_stop = self.make_icons(qta.icon("mdi.stop"), "Start & Stop", scale=0.7) 42 | self.btn_stop.setFixedSize(50, 50) 43 | self.btn_stop.setStyleSheet(""" 44 | QPushButton:hover { 45 | border-color: "white"; 46 | } 47 | QToolTip { 48 | font:"Arial"; font-size: 15px; color: #ffffff; border: 1px solid #ffffff; 49 | } 50 | """) 51 | # self.btn_clip = self.make_icons(qta.icon("mdi6.scissors-cutting"),"Video Clipping", scale=0.7) 52 | # self.btn_clip.setFixedSize(50, 50) 53 | # self.btn_clip.setStyleSheet(""" 54 | # QPushButton:hover { 55 | # border-color: "white"; 56 | # } 57 | # QToolTip { 58 | # font:"Arial"; font-size: 15px; color: #ffffff; border: 1px solid #ffffff; 59 | # } 60 | # """) 61 | 62 | self.slider_time = Slider(Qt.Orientation.Horizontal, (0, 1000000), 0) 63 | self.slider_time.setFixedHeight(40) 64 | self.slider_time.setMinimumWidth(400) 65 | self.slider_time.setMaximumWidth(2000) 66 | self.slider_time.setTickInterval(33333) 67 | 68 | self.bottom_layout.addWidget(self.btn_stop) 69 | # self.bottom_layout.addWidget(self.btn_clip) 70 | self.bottom_layout.addWidget(self.slider_time) 71 | 72 | self.main_layout.addWidget(self.captured_viewer_frame) 73 | self.main_layout.addLayout(self.bottom_layout) 74 | self.setLayout(self.main_layout) 75 | 76 | # playback signals 77 | self.btn_stop.clicked.connect(self.stop_playback) 78 | self.slider_time.valueChanged.connect(self.control_time) 79 | all_signals.playback_signals.time_value.connect(self.set_slider_value) 80 | all_signals.playback_signals.playback_filepath.connect(self.start_playback) 81 | 82 | # video clipping signals 83 | # self.btn_clip.clicked.connect(self.extract_video_to_frame) 84 | 85 | def make_icons(self, icon: qta, tooltip: str, scale: float = 0.8) -> QPushButton: 86 | w, h = int(35 * scale), int(35 * scale) 87 | btn = QPushButton(icon, "") 88 | btn.setFixedSize(40, 40) 89 | btn.setIconSize(QSize(w, h)) 90 | btn.setToolTip(f"{tooltip}") 91 | return btn 92 | 93 | def set_slider_value(self, value): 94 | _time = self.slider_time.value() + value 95 | self.slider_time.setValue(_time) 96 | 97 | @Slot(str) 98 | def start_playback(self, filepath) -> None: 99 | if self.viewer is not None: 100 | time.sleep(0.5) 101 | self.viewer.timer.stop() 102 | self.btn_stop.setIcon(qta.icon("mdi.stop")) 103 | self.playback.close() 104 | try: 105 | self.file_path = filepath 106 | initialize_libraries() 107 | self.playback = start_playback(filepath) 108 | 109 | self.is_run = True 110 | self.viewer = PlaybackSensors(playback=self.playback) 111 | self.start_time = self.playback.get_record_configuration()._handle.start_timestamp_offset_usec 112 | self.slider_time.setRange(self.start_time, self.playback.get_recording_length()-self.start_time) 113 | self.slider_time.setValue(self.start_time) 114 | self.viewer.timer.start() 115 | except: 116 | modal = QDialog() 117 | layout_modal = QVBoxLayout() 118 | e_message = Label( 119 | "Can't load video.
Please select another video.
", 120 | "Arial", 20, Qt.AlignmentFlag.AlignCenter 121 | ) 122 | layout_modal.addWidget(e_message) 123 | modal.setLayout(layout_modal) 124 | modal.setWindowTitle("Error Message") 125 | modal.resize(400, 200) 126 | modal.exec() 127 | 128 | def stop_playback(self): 129 | if self.is_run is True: 130 | self.is_run = False 131 | self.viewer.timer.stop() 132 | self.btn_stop.setIcon(qta.icon("fa.play")) 133 | else: 134 | self.is_run = True 135 | self.viewer.timer.start() 136 | self.btn_stop.setIcon(qta.icon("mdi.stop")) 137 | 138 | def control_time(self): 139 | if self.viewer is not None: 140 | all_signals.playback_signals.time_control.emit(self.slider_time.value()) 141 | 142 | # def extract_video_to_frame(self): 143 | # self.viewer.timer.stop() 144 | # video_clip_dialog = VideoClippingDialog(self.file_path) 145 | # video_clip_dialog.exec_() 146 | # self.viewer.timer.start() 147 | 148 | 149 | class CapturedImageViewer(QFrame): 150 | def __init__(self): 151 | super().__init__() 152 | self.target = None 153 | self.setContentsMargins(0, 0, 0, 0) 154 | 155 | self.main_layout = QGridLayout() 156 | self.main_layout.setSpacing(0) 157 | self.main_layout.setContentsMargins(0, 0, 0, 0) 158 | self.frame_rgb = Frame("RGB Sensor", min_size=(460, 300), max_size=(595, 510)) 159 | self.frame_depth = Frame("Depth Sensor", min_size=(460, 300), max_size=(595, 510)) 160 | self.frame_ir = Frame("IR Sensor", min_size=(460, 300), max_size=(595, 510)) 161 | 162 | self.sensor_data_layout = QHBoxLayout() 163 | self.sensor_data_layout.setSpacing(0) 164 | self.sensor_data_layout.setContentsMargins(0, 0, 0, 0) 165 | self.imu_senser = ImuSensors(min_size=(450, 270), max_size=(595, 480)) 166 | 167 | self.sensor_data_layout.addWidget(self.imu_senser) 168 | self.frame_subdata = Frame( 169 | "IMU Sensor", 170 | layout=self.sensor_data_layout, 171 | min_size=(460, 300), 172 | max_size=(595, 510) 173 | ) 174 | 175 | # UI option signal 176 | all_signals.option_signals.clear_frame.connect(self.clear_frame) 177 | 178 | # Playback signals 179 | all_signals.playback_signals.rgb_image.connect(self.set_rgb_image) 180 | all_signals.playback_signals.depth_image.connect(self.set_depth_image) 181 | all_signals.playback_signals.ir_image.connect(self.set_ir_image) 182 | all_signals.playback_signals.record_time.connect(self.set_time) 183 | all_signals.playback_signals.video_fps.connect(self.set_fps) 184 | all_signals.playback_signals.imu_acc_data.connect(self.set_acc_data) 185 | all_signals.playback_signals.imu_gyro_data.connect(self.set_gyro_data) 186 | 187 | self.main_layout.addWidget(self.frame_ir, 0, 0) 188 | self.main_layout.addWidget(self.frame_depth, 0, 1) 189 | self.main_layout.addWidget(self.frame_rgb, 1, 0) 190 | self.main_layout.addWidget(self.frame_subdata, 1, 1) 191 | 192 | self.setAcceptDrops(True) 193 | self.setLayout(self.main_layout) 194 | 195 | def eventFilter(self, watched, event): 196 | if event.type() == QEvent.MouseButtonPress: 197 | self.mousePressEvent(event) 198 | elif event.type() == QEvent.MouseMove: 199 | self.mouseMoveEvent(event) 200 | elif event.type() == QEvent.MouseButtonRelease: 201 | self.mouseReleaseEvent(event) 202 | return super().eventFilter(watched, event) 203 | 204 | def get_index(self, pos): 205 | for i in range(self.main_layout.count()): 206 | if self.main_layout.itemAt(i).geometry().contains(pos) and i != self.target: 207 | return i 208 | 209 | def mousePressEvent(self, event): 210 | if event.button() == Qt.LeftButton: 211 | self.target = self.get_index(event.windowPos().toPoint()) 212 | else: 213 | self.target = None 214 | 215 | def mouseMoveEvent(self, event): 216 | if event.buttons() & Qt.LeftButton and self.target is not None: 217 | drag = QDrag(self.main_layout.itemAt(self.target).widget()) 218 | pix = self.main_layout.itemAt(self.target).widget().grab() 219 | mimedata = QMimeData() 220 | mimedata.setImageData(pix) 221 | drag.setMimeData(mimedata) 222 | drag.setPixmap(pix) 223 | drag.exec(Qt.DropAction.CopyAction | Qt.DropAction.MoveAction) 224 | 225 | def mouseReleaseEvent(self, event): 226 | self.target = None 227 | 228 | def dragEnterEvent(self, event): 229 | if event.mimeData().hasImage(): 230 | event.acceptProposedAction() 231 | else: 232 | event.ignore() 233 | 234 | def dragMoveEvent(self, event): 235 | event.accept() 236 | 237 | def dropEvent(self, event): 238 | if not event.source().geometry().contains(event.pos()): 239 | source = self.get_index(event.pos()) 240 | if source is None: 241 | return 242 | 243 | i, j = max(self.target, source), min(self.target, source) 244 | p1, p2 = self.main_layout.getItemPosition(i), self.main_layout.getItemPosition(j) 245 | self.main_layout.addItem(self.main_layout.takeAt(i), *p2) 246 | self.main_layout.addItem(self.main_layout.takeAt(j), *p1) 247 | event.accept() 248 | 249 | @Slot(QImage) 250 | def set_rgb_image(self, image: QImage) -> None: 251 | w, h = self.frame_rgb.label_image.width(), self.frame_rgb.label_image.height() 252 | image = image.scaled(w-5, h-5, Qt.KeepAspectRatio) 253 | self.frame_rgb.label_image.setPixmap(QPixmap.fromImage(image)) 254 | 255 | @Slot(QImage) 256 | def set_depth_image(self, image: QImage) -> None: 257 | w, h = self.frame_depth.label_image.width(), self.frame_depth.label_image.height() 258 | image = image.scaled(w-5, h-5, Qt.KeepAspectRatio) 259 | self.frame_depth.label_image.setPixmap(QPixmap.fromImage(image)) 260 | 261 | @Slot(QImage) 262 | def set_ir_image(self, image: QImage) -> None: 263 | w, h = self.frame_ir.label_image.width(), self.frame_ir.label_image.height() 264 | image = image.scaled(w-5, h-5, Qt.KeepAspectRatio) 265 | self.frame_ir.label_image.setPixmap(QPixmap.fromImage(image)) 266 | 267 | @Slot(float) 268 | def set_fps(self, value) -> None: 269 | self.imu_senser.label_fps.setText("FPS : %.2f" % value) 270 | 271 | @Slot(float) 272 | def set_time(self, time) -> None: 273 | self.imu_senser.label_time.setText("Time(s) : %.3f" % time) 274 | 275 | @Slot(list) 276 | def set_acc_data(self, values) -> None: 277 | self.imu_senser.label_acc_x.setText("X : %.5f" % values[0]) 278 | self.imu_senser.label_acc_y.setText("Y : %.5f" % values[1]) 279 | self.imu_senser.label_acc_z.setText("Z : %.5f" % values[2]) 280 | 281 | @Slot(float) 282 | def set_gyro_data(self, values) -> None: 283 | self.imu_senser.label_gyro_x.setText("X : %.5f" % values[0]) 284 | self.imu_senser.label_gyro_y.setText("Y : %.5f" % values[1]) 285 | self.imu_senser.label_gyro_z.setText("Z : %.5f" % values[2]) 286 | 287 | def clear_frame(self): 288 | self.frame_rgb.label_image.clear() 289 | self.frame_depth.label_image.clear() 290 | self.frame_ir.label_image.clear() 291 | self.imu_senser.label_time.setText("Time(s) : ") 292 | self.imu_senser.label_fps.setText("FPS : ") 293 | self.imu_senser.label_acc_x.setText("X : ") 294 | self.imu_senser.label_acc_y.setText("Y : ") 295 | self.imu_senser.label_acc_z.setText("Z : ") 296 | self.imu_senser.label_gyro_x.setText("X : ") 297 | self.imu_senser.label_gyro_y.setText("Y : ") 298 | self.imu_senser.label_gyro_z.setText("Z : ") 299 | -------------------------------------------------------------------------------- /pykinect_recorder/pyk4a/k4abt/_k4abtTypes.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import numpy as np 3 | 4 | from pykinect_azure.k4a._k4atypes import k4a_float3_t, k4a_float2_t 5 | 6 | 7 | # K4A_DECLARE_HANDLE(k4abt_tracker_t); 8 | class _handle_k4abt_tracker_t(ctypes.Structure): 9 | _fields_ = [ 10 | ("_rsvd", ctypes.c_size_t), 11 | ] 12 | 13 | 14 | k4abt_tracker_t = ctypes.POINTER(_handle_k4abt_tracker_t) 15 | 16 | 17 | # K4A_DECLARE_HANDLE(k4abt_frame_t); 18 | class _handle_k4abt_frame_t(ctypes.Structure): 19 | _fields_ = [ 20 | ("_rsvd", ctypes.c_size_t), 21 | ] 22 | 23 | 24 | k4abt_frame_t = ctypes.POINTER(_handle_k4abt_frame_t) 25 | 26 | k4abt_result_t = ctypes.c_int 27 | K4ABT_RESULT_SUCCEEDED = 0 28 | K4ABT_RESULT_FAILED = 1 29 | 30 | # class k4abt_joint_id_t(CtypeIntEnum): 31 | K4ABT_JOINT_PELVIS = 0 32 | K4ABT_JOINT_SPINE_NAVEL = 1 33 | K4ABT_JOINT_SPINE_CHEST = 2 34 | K4ABT_JOINT_NECK = 3 35 | K4ABT_JOINT_CLAVICLE_LEFT = 4 36 | K4ABT_JOINT_SHOULDER_LEFT = 5 37 | K4ABT_JOINT_ELBOW_LEFT = 6 38 | K4ABT_JOINT_WRIST_LEFT = 7 39 | K4ABT_JOINT_HAND_LEFT = 8 40 | K4ABT_JOINT_HANDTIP_LEFT = 9 41 | K4ABT_JOINT_THUMB_LEFT = 10 42 | K4ABT_JOINT_CLAVICLE_RIGHT = 11 43 | K4ABT_JOINT_SHOULDER_RIGHT = 12 44 | K4ABT_JOINT_ELBOW_RIGHT = 13 45 | K4ABT_JOINT_WRIST_RIGHT = 14 46 | K4ABT_JOINT_HAND_RIGHT = 15 47 | K4ABT_JOINT_HANDTIP_RIGHT = 16 48 | K4ABT_JOINT_THUMB_RIGHT = 17 49 | K4ABT_JOINT_HIP_LEFT = 18 50 | K4ABT_JOINT_KNEE_LEFT = 19 51 | K4ABT_JOINT_ANKLE_LEFT = 20 52 | K4ABT_JOINT_FOOT_LEFT = 21 53 | K4ABT_JOINT_HIP_RIGHT = 22 54 | K4ABT_JOINT_KNEE_RIGHT = 23 55 | K4ABT_JOINT_ANKLE_RIGHT = 24 56 | K4ABT_JOINT_FOOT_RIGHT = 25 57 | K4ABT_JOINT_HEAD = 26 58 | K4ABT_JOINT_NOSE = 27 59 | K4ABT_JOINT_EYE_LEFT = 28 60 | K4ABT_JOINT_EAR_LEFT = 29 61 | K4ABT_JOINT_EYE_RIGHT = 30 62 | K4ABT_JOINT_EAR_RIGHT = 31 63 | K4ABT_JOINT_COUNT = 32 64 | 65 | K4ABT_JOINT_NAMES = [ 66 | "pelvis", 67 | "spine - navel", 68 | "spine - chest", 69 | "neck", 70 | "left clavicle", 71 | "left shoulder", 72 | "left elbow", 73 | "left wrist", 74 | "left hand", 75 | " left handtip", 76 | "left thumb", 77 | "right clavicle", 78 | "right shoulder", 79 | "right elbow", 80 | "right wrist", 81 | "right hand", 82 | "right handtip", 83 | "right thumb", 84 | "left hip", 85 | "left knee", 86 | "left ankle", 87 | "left foot", 88 | "right hip", 89 | "right knee", 90 | "right ankle", 91 | "right foot", 92 | "head", 93 | "nose", 94 | "left eye", 95 | "left ear", 96 | "right eye", 97 | "right ear", 98 | ] 99 | 100 | K4ABT_SEGMENT_PAIRS = [ 101 | [1, 0], 102 | [2, 1], 103 | [3, 2], 104 | [4, 2], 105 | [5, 4], 106 | [6, 5], 107 | [7, 6], 108 | [8, 7], 109 | [9, 8], 110 | [10, 7], 111 | [11, 2], 112 | [12, 11], 113 | [13, 12], 114 | [14, 13], 115 | [15, 14], 116 | [16, 15], 117 | [17, 14], 118 | [18, 0], 119 | [19, 18], 120 | [20, 19], 121 | [21, 20], 122 | [22, 0], 123 | [23, 22], 124 | [24, 23], 125 | [25, 24], 126 | [26, 3], 127 | [27, 26], 128 | [28, 26], 129 | [29, 26], 130 | [30, 26], 131 | [31, 26], 132 | ] 133 | 134 | # class k4abt_sensor_orientation_t(CtypeIntEnum): 135 | K4ABT_SENSOR_ORIENTATION_DEFAULT = 0 136 | K4ABT_SENSOR_ORIENTATION_CLOCKWISE90 = 1 137 | K4ABT_SENSOR_ORIENTATION_COUNTERCLOCKWISE90 = 2 138 | K4ABT_SENSOR_ORIENTATION_FLIP180 = 3 139 | 140 | # class k4abt_tracker_processing_mode_t(CtypeIntEnum): 141 | K4ABT_TRACKER_PROCESSING_MODE_GPU = 0 142 | K4ABT_TRACKER_PROCESSING_MODE_CPU = 1 143 | K4ABT_TRACKER_PROCESSING_MODE_GPU_CUDA = 2 144 | K4ABT_TRACKER_PROCESSING_MODE_GPU_TENSORRT = 3 145 | K4ABT_TRACKER_PROCESSING_MODE_GPU_DIRECTML = 4 146 | 147 | 148 | class _k4abt_tracker_configuration_t(ctypes.Structure): 149 | """ 150 | Configuration parameters for a k4abt body tracker. 151 | 152 | Used by k4abt_tracker_create() to specify the configuration of the k4abt tracker. 153 | 154 | Attributes: 155 | sensor_orientation (c_int): The sensor mounting orientation type. Setting the correct 156 | orientation can help the body tracker to achieve more accurate body tracking results. 157 | processing_mode (c_int): Specify whether to use CPU only mode or GPU mode to run the tracker. 158 | The CPU only mode doesn't require the machine to have a GPU to run this SDK. But it will 159 | be much slower than the GPU mode. 160 | gpu_device_id (c_int32): Specify the GPU device ID to run the tracker. The setting is not 161 | effective if the processing_mode setting is set to K4ABT_TRACKER_PROCESSING_MODE_CPU. 162 | For K4ABT_TRACKER_PROCESSING_MODE_GPU_CUDA and K4ABT_TRACKER_PROCESSING_MODE_GPU_TENSORRT 163 | modes, ID of the graphic card can be retrieved using the CUDA API. In case when 164 | processing_mode is K4ABT_TRACKER_PROCESSING_MODE_GPU_DIRECTML, the device ID corresponds 165 | to the enumeration order of hardware adapters as given by IDXGIFactory::EnumAdapters. 166 | A device_id of 0 always corresponds to the default adapter, which is typically the primary 167 | display GPU installed on the system. More information can be found in the ONNX Runtime 168 | Documentation. 169 | model_path (c_char_p): Specify the model file name and location used by the tracker. If specified, 170 | the tracker will use this model instead of the default one. 171 | """ 172 | 173 | _fields_ = [ 174 | ("sensor_orientation", ctypes.c_int), 175 | ("processing_mode", ctypes.c_int), 176 | ("gpu_device_id", ctypes.c_int32), 177 | ("model_path", ctypes.c_char_p), 178 | ] 179 | 180 | 181 | k4abt_tracker_configuration_t = _k4abt_tracker_configuration_t 182 | 183 | 184 | class _wxyz(ctypes.Structure): 185 | """ 186 | WXYZ or array representation of quaternion. 187 | 188 | Attributes: 189 | w (c_float): W representation of a quaternion. 190 | x (c_float): X representation of a quaternion. 191 | y (c_float): Y representation of a quaternion. 192 | z (c_float): Z representation of a quaternion. 193 | """ 194 | 195 | _fields_ = [ 196 | ("w", ctypes.c_float), 197 | ("x", ctypes.c_float), 198 | ("y", ctypes.c_float), 199 | ("z", ctypes.c_float), 200 | ] 201 | 202 | def __iter__(self): 203 | return {"w": self.w, "x": self.x, "y": self.y, "z": self.z} 204 | 205 | def __str__(self): 206 | return f"w:{self.w} x:{self.x} y:{self.y} z:{self.z}" 207 | 208 | 209 | class k4a_quaternion_t(ctypes.Union): 210 | """ 211 | Attributes: 212 | wxyz (_wxyz): W, X, Y, Z representation of a quaternion. 213 | v (c_float[4]): Array representation of a quaternion. 214 | """ 215 | 216 | _fields_ = [("wxyz", _wxyz), ("v", ctypes.c_float * 4)] 217 | 218 | def __init__(self, q=(0, 0, 0, 0)): 219 | super().__init__() 220 | self.wxyz = _wxyz(q[0], q[1], q[2], q[3]) 221 | 222 | def __iter__(self): 223 | wxyz = self.wxyz.__iter__() 224 | wxyz.update({"v": [v for v in self.v]}) 225 | return wxyz 226 | 227 | def __str__(self): 228 | return self.wxyz.__str__() 229 | 230 | 231 | # class k4abt_joint_confidence_level_t(CtypeIntEnum): 232 | K4ABT_JOINT_CONFIDENCE_NONE = 0 233 | K4ABT_JOINT_CONFIDENCE_LOW = 1 234 | K4ABT_JOINT_CONFIDENCE_MEDIUM = 2 235 | K4ABT_JOINT_CONFIDENCE_HIGH = 3 236 | K4ABT_JOINT_CONFIDENCE_LEVELS_COUNT = 4 237 | 238 | 239 | class _k4abt_joint_t(ctypes.Structure): 240 | """ 241 | Structure to define a single joint. 242 | 243 | The position and orientation together defines the coordinate system for the given joint. 244 | They are defined relative to the sensor global coordinate system. 245 | 246 | Attributes: 247 | position (k4a_float3_t): The position of the joint specified in millimeters. 248 | orientation (k4a_quaternion_t): The orientation of the joint specified in normalized quaternion. 249 | confidence_level (c_int): The confidence level of the joint. 250 | """ 251 | 252 | _fields_ = [ 253 | ("position", k4a_float3_t), 254 | ("orientation", k4a_quaternion_t), 255 | ("confidence_level", ctypes.c_int), 256 | ] 257 | 258 | def __init__(self, position=(0, 0, 0), orientation=(0, 0, 0, 0), confidence_level=0): 259 | super().__init__() 260 | self.position = k4a_float3_t(position) 261 | self.orientation = k4a_quaternion_t(orientation) 262 | self.confidence_level = confidence_level 263 | 264 | def __iter__(self): 265 | return { 266 | "position": self.position.__iter__(), 267 | "orientation": self.orientation.__iter__(), 268 | "confidence_level": self.confidence_level, 269 | } 270 | 271 | 272 | k4abt_joint_t = _k4abt_joint_t 273 | 274 | 275 | class k4abt_skeleton_t(ctypes.Structure): 276 | """ 277 | Structure to define joints for skeleton. 278 | 279 | Attributes: 280 | joints (_k4abt_joint_t[K4ABT_JOINT_COUNT]): The joints for the body. 281 | """ 282 | 283 | _fields_ = [ 284 | ("joints", _k4abt_joint_t * K4ABT_JOINT_COUNT), 285 | ] 286 | 287 | def __init__(self, joints=(_k4abt_joint_t() for i in range(K4ABT_JOINT_COUNT))): 288 | super().__init__() 289 | self.joints = (_k4abt_joint_t * K4ABT_JOINT_COUNT)(*joints) 290 | 291 | def __iter__(self): 292 | return {"joints": [joint.__iter__() for joint in self.joints]} 293 | 294 | 295 | class k4abt_body_t(ctypes.Structure): 296 | """ 297 | Structure to define body. 298 | 299 | Attributes: 300 | id (c_uint32): An id for the body that can be used for frame-to-frame correlation. 301 | skeleton (k4abt_skeleton_t): The skeleton information for the body. 302 | """ 303 | 304 | _fields_ = [ 305 | ("id", ctypes.c_uint32), 306 | ("skeleton", k4abt_skeleton_t), 307 | ] 308 | 309 | def __init__(self, id=0, skeleton=k4abt_skeleton_t()): 310 | super().__init__() 311 | self.id = id 312 | self.skeleton = skeleton 313 | 314 | def __iter__(self): 315 | return {"id": self.id, "skeleton": self.skeleton.__iter__()} 316 | 317 | 318 | class _k4abt_joint2D_t(ctypes.Structure): 319 | # https://microsoft.github.io/Azure-Kinect-Body-Tracking/release/1.1.x/struct_microsoft_1_1_azure_1_1_kinect_1_1_body_tracking_1_1_joint.html 320 | _fields_ = [ 321 | ("position", k4a_float2_t), 322 | ("confidence_level", ctypes.c_int), 323 | ] 324 | 325 | def __init__(self, position=(0, 0), confidence_level=0): 326 | super().__init__() 327 | self.position = k4a_float2_t(position) 328 | self.confidence_level = confidence_level 329 | 330 | def __iter__(self): 331 | return { 332 | "position": self.position.__iter__(), 333 | "confidence_level": self.confidence_level, 334 | } 335 | 336 | 337 | k4abt_joint2D_t = _k4abt_joint2D_t 338 | 339 | 340 | class k4abt_skeleton2D_t(ctypes.Structure): 341 | # https://microsoft.github.io/Azure-Kinect-Body-Tracking/release/1.1.x/struct_microsoft_1_1_azure_1_1_kinect_1_1_body_tracking_1_1_skeleton.html 342 | _fields_ = [ 343 | ("joints2D", _k4abt_joint2D_t * K4ABT_JOINT_COUNT), 344 | ] 345 | 346 | def __init__(self, joints=(_k4abt_joint2D_t() for i in range(K4ABT_JOINT_COUNT))): 347 | super().__init__() 348 | self.joints2D = (_k4abt_joint2D_t * K4ABT_JOINT_COUNT)(*joints) 349 | 350 | def __iter__(self): 351 | return {"joints2D": [joint.__iter__() for joint in self.joints2D]} 352 | 353 | 354 | class k4abt_body2D_t(ctypes.Structure): 355 | # https://microsoft.github.io/Azure-Kinect-Body-Tracking/release/1.1.x/struct_microsoft_1_1_azure_1_1_kinect_1_1_body_tracking_1_1_body.html 356 | _fields_ = [ 357 | ("id", ctypes.c_uint32), 358 | ("skeleton", k4abt_skeleton2D_t), 359 | ] 360 | 361 | def __init__(self, id=0, skeleton=k4abt_skeleton2D_t()): 362 | super().__init__() 363 | self.id = id 364 | self.skeleton = skeleton 365 | 366 | def __iter__(self): 367 | return {"id": self.id, "skeleton": self.skeleton.__iter__()} 368 | 369 | 370 | K4ABT_BODY_INDEX_MAP_BACKGROUND = 255 371 | K4ABT_INVALID_BODY_ID = 0xFFFFFFFF 372 | K4ABT_DEFAULT_TRACKER_SMOOTHING_FACTOR = 0.0 373 | 374 | K4ABT_DEFAULT_MODEL = 0 375 | K4ABT_LITE_MODEL = 1 376 | 377 | k4abt_tracker_default_configuration = k4abt_tracker_configuration_t() 378 | k4abt_tracker_default_configuration.sensor_orientation = K4ABT_SENSOR_ORIENTATION_DEFAULT 379 | k4abt_tracker_default_configuration.processing_mode = K4ABT_TRACKER_PROCESSING_MODE_GPU 380 | k4abt_tracker_default_configuration.gpu_device_id = 0 381 | 382 | body_colors = np.ones((256, 3), dtype=np.uint8) * K4ABT_BODY_INDEX_MAP_BACKGROUND 383 | body_colors[:7, :] = np.array( 384 | [ 385 | [202, 183, 42], 386 | [42, 61, 202], 387 | [42, 202, 183], 388 | [202, 42, 61], 389 | [183, 42, 202], 390 | [42, 202, 61], 391 | [141, 202, 42], 392 | ] 393 | ) 394 | -------------------------------------------------------------------------------- /pykinect_recorder/renderer/components/viewer_video_clipping.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | 4 | from superqt import QLabeledRangeSlider 5 | from PySide6.QtGui import QImage, QPixmap 6 | from PySide6.QtCore import Slot, Qt, QSize, QTimer 7 | from PySide6.QtWidgets import ( 8 | QDialog, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, 9 | QFrame, QWidget, QFileDialog, QProgressBar 10 | ) 11 | 12 | from ..signals import all_signals 13 | from ...pyk4a.utils import colorize 14 | from ...pyk4a.pykinect import start_playback, start_device, initialize_libraries 15 | from ...pyk4a.k4a.configuration import Configuration 16 | 17 | 18 | class VideoClippingDialog(QDialog): 19 | def __init__(self, file_name: str): 20 | super().__init__() 21 | self.setFixedSize(QSize(1120, 920)) 22 | 23 | self.cnt = 0 24 | self.root_path = None 25 | self.file_name = file_name 26 | self.clip_option = None 27 | self.save_file_name = self.file_name.split('/')[-1][:-4] 28 | self.left, self.right = None, None 29 | self.progress_dialog = ProgressBarDialog() 30 | self.fps_dict = {0: "5", 1: "15", 2: "30"} 31 | 32 | self.main_widget = QWidget() 33 | self.main_layout = QVBoxLayout() 34 | # top layout 35 | self.top_layout = QHBoxLayout() 36 | self.title_layout = QHBoxLayout() 37 | self.title_layout.setAlignment(Qt.AlignLeft) 38 | self.title_label = QLabel(f"{file_name.split('/')[-1]}") 39 | self.title_label.setFixedHeight(40) 40 | self.title_layout.addWidget(self.title_label) 41 | 42 | self.btn_layout = QHBoxLayout() 43 | self.btn_layout.setAlignment(Qt.AlignRight) 44 | self.save_btn = QPushButton("extract") 45 | self.save_btn.setFixedHeight(40) 46 | self.exit_btn = QPushButton("exit") 47 | self.exit_btn.setFixedHeight(40) 48 | self.btn_layout.addWidget(self.save_btn) 49 | self.btn_layout.addWidget(self.exit_btn) 50 | 51 | self.top_layout.addLayout(self.title_layout) 52 | self.top_layout.addLayout(self.btn_layout) 53 | self.main_layout.addLayout(self.top_layout) 54 | 55 | # media frame 56 | self.media_frame = QFrame() 57 | self.media_layout = QVBoxLayout() 58 | self.media_label = QLabel("Label") 59 | self.media_label.setFixedSize(QSize(1080, 720)) 60 | self.media_label.setAlignment(Qt.AlignCenter) 61 | self.media_label.setStyleSheet(" border-color: white; ") 62 | self.media_layout.addWidget(self.media_label) 63 | self.media_frame.setLayout(self.media_layout) 64 | self.main_layout.addWidget(self.media_frame) 65 | 66 | # time control layout 67 | self.time_layout = QHBoxLayout() 68 | self.time_layout.setAlignment(Qt.AlignCenter) 69 | self.time_slider = QLabeledRangeSlider(Qt.Horizontal, self) 70 | self.time_slider.setStyleSheet(""" 71 | QSlider { 72 | margin: 0px; 73 | border-radius: 4px; 74 | background-color: white; 75 | } 76 | QSlider::sub-page:horizontal { 77 | background-color: #3f4042; 78 | height: 12px; 79 | border-radius: 4px; 80 | } 81 | QSlider::groove:horizontal { 82 | height: 12px; 83 | margin: 1px; 84 | border-radius: 4px; 85 | background-color: "#3f4042" 86 | } 87 | QSlider::handle:horizontal { 88 | border: 10px; 89 | margin: 0px; 90 | border-radius: 3px; 91 | background-color: "#00bcf8"; 92 | } 93 | QSlider:handle:horizontal:hover { 94 | background-color: "#4d96FF"; 95 | } 96 | QSlider:handle:horizontal:pressed { 97 | background-color: "#000000"; 98 | } 99 | """) 100 | self.time_slider.setFixedSize(QSize(800, 80)) 101 | self.time_layout.addWidget(self.time_slider) 102 | self.main_layout.addLayout(self.time_layout) 103 | self.setLayout(self.main_layout) 104 | 105 | self.initialize_playback() 106 | self.save_btn.clicked.connect(self.select_root_path) 107 | self.exit_btn.clicked.connect(self.close_dialog) 108 | self.time_slider.valueChanged.connect(self.control_timestamp) 109 | all_signals.playback_signals.clip_option.connect(self.set_clip_option) 110 | 111 | self.playback.seek_timestamp(self.start_time+self.ticks) 112 | self.update_next_frame() 113 | 114 | def close_dialog(self): 115 | self.close() 116 | 117 | def initialize_playback(self): 118 | self.playback = start_playback(self.file_name) 119 | self.start_time = self.playback.get_record_configuration()._handle.start_timestamp_offset_usec 120 | self.device_fps = self.playback.get_record_configuration()._handle.camera_fps 121 | self.ticks = int(1e6 // int(self.fps_dict[self.device_fps])) 122 | 123 | self.left = 0 124 | self.right = (self.playback.get_recording_length()-self.start_time) // self.ticks - 1 125 | self.total_frame = self.right - self.left 126 | 127 | self.time_slider.setTickInterval(1) 128 | self.time_slider.setRange(self.left, self.right) 129 | self.time_slider.setValue([self.left, self.right]) 130 | 131 | def control_timestamp(self): 132 | cur_left, cur_right = self.time_slider.value() 133 | if cur_left != self.left: 134 | self.left = cur_left 135 | self.playback.seek_timestamp(self.start_time + self.left*self.ticks) 136 | elif cur_right != self.right: 137 | self.right = cur_right 138 | self.playback.seek_timestamp(self.start_time + self.right*self.ticks) 139 | self.update_next_frame() 140 | 141 | def update_next_frame(self): 142 | _, current_frame = self.playback.update() 143 | current_rgb_frame = current_frame.get_color_image() 144 | rgb_frame = current_rgb_frame[1] 145 | rgb_frame = cv2.cvtColor(rgb_frame, cv2.COLOR_BGR2RGB) 146 | 147 | h, w, ch = rgb_frame.shape 148 | rgb_frame = QImage(rgb_frame, w, h, ch * w, QImage.Format_RGB888) 149 | scaled_rgb_frame = rgb_frame.scaled(1080, 720, Qt.KeepAspectRatio) 150 | self.media_label.setPixmap(QPixmap.fromImage(scaled_rgb_frame)) 151 | 152 | @Slot(str) 153 | def set_clip_option(self, value): 154 | self.clip_option = value 155 | 156 | def select_root_path(self): 157 | option_dialog = SelectClipOptionDialog() 158 | option_dialog.exec() 159 | self.root_path = QFileDialog.getExistingDirectory(self, "Open Data Files", ".", QFileDialog.ShowDirsOnly) 160 | if self.clip_option == "mkv": 161 | self.extract_mkv() 162 | elif self.clip_option == "jpg": 163 | self.extract_frame() 164 | 165 | def extract_mkv(self): 166 | self.total_frame = self.right - self.left 167 | all_signals.playback_signals.video_total_frame.emit(int(self.total_frame)) 168 | config = self.record_config_to_config() 169 | 170 | initialize_libraries() 171 | self.device = start_device( 172 | config=config, 173 | record=True, 174 | record_filepath=os.path.join(self.root_path, self.save_file_name+"_extract.mkv") 175 | ) 176 | self.playback.seek_timestamp(self.left) 177 | 178 | self.timer = QTimer() 179 | self.timer.setInterval(0.001) 180 | self.timer.timeout.connect(self.save_to_mkv) 181 | self.timer.start() 182 | self.progress_dialog.exec() 183 | self.device.close() 184 | 185 | def record_config_to_config(self): 186 | record_config = self.playback.get_record_configuration() 187 | config = Configuration() 188 | 189 | setattr(config, "color_format", record_config._handle.color_format) 190 | setattr(config, "depth_mode", record_config._handle.depth_mode) 191 | setattr(config, "color_resolution", record_config._handle.color_resolution) 192 | setattr(config, "camera_fps", record_config._handle.camera_fps) 193 | return config 194 | 195 | def save_to_mkv(self): 196 | if self.cnt == self.total_frame: 197 | self.timer.stop() 198 | 199 | self.cnt += 1 200 | self.device.save_frame_for_clip(self.playback._handle, self.playback.calibration) 201 | all_signals.playback_signals.current_frame_cnt.emit(self.cnt) 202 | 203 | def extract_frame(self): 204 | self.total_frame = self.right - self.left 205 | all_signals.playback_signals.video_total_frame.emit(int(self.total_frame)) 206 | self.playback.seek_timestamp(self.left) 207 | os.makedirs(os.path.join(self.root_path, self.save_file_name, "rgb"), exist_ok=True) 208 | os.makedirs(os.path.join(self.root_path, self.save_file_name, "ir"), exist_ok=True) 209 | os.makedirs(os.path.join(self.root_path, self.save_file_name, "depth"), exist_ok=True) 210 | 211 | self.timer = QTimer() 212 | self.timer.setInterval(0.033) 213 | self.timer.timeout.connect(self.save_frame) 214 | self.timer.start() 215 | self.progress_dialog.exec() 216 | 217 | def save_frame(self): 218 | if self.cnt == self.total_frame: 219 | self.timer.stop() 220 | 221 | _, current_frame = self.playback.update() 222 | current_rgb_frame = current_frame.get_color_image() 223 | current_depth_frame = current_frame.get_depth_image() 224 | current_ir_frame = current_frame.get_ir_image() 225 | 226 | if current_ir_frame[0]: 227 | ir_frame = colorize(current_ir_frame[1], (None, 5000), cv2.COLORMAP_BONE) 228 | cv2.imwrite(os.path.join( 229 | self.root_path, self.save_file_name, "ir", f"{self.save_file_name}_ir_{str(self.cnt).zfill(6)}.png"), ir_frame, 230 | ) 231 | 232 | if current_depth_frame[0]: 233 | current_depth_frame = colorize(current_depth_frame[1], (None, 5000), cv2.COLORMAP_HSV) 234 | cv2.imwrite(os.path.join( 235 | self.root_path, self.save_file_name, "depth", f"{self.save_file_name}_depth_{str(self.cnt).zfill(6)}.png"), current_depth_frame, 236 | ) 237 | 238 | if current_rgb_frame[0]: 239 | rgb_frame = current_rgb_frame[1] 240 | cv2.imwrite(os.path.join( 241 | self.root_path, self.save_file_name, "rgb", f"{self.save_file_name}_rgb_{str(self.cnt).zfill(6)}.jpg"), rgb_frame, 242 | [cv2.IMWRITE_JPEG_QUALITY, 100] 243 | ) 244 | self.cnt += 1 245 | all_signals.playback_signals.current_frame_cnt.emit(self.cnt) 246 | 247 | 248 | class ProgressBarDialog(QDialog): 249 | def __init__(self): 250 | super().__init__() 251 | self.setFixedSize(QSize(500, 200)) 252 | self.setWindowFlags(self.windowFlags() | Qt.WindowStaysOnTopHint) 253 | self.total_frame = None 254 | 255 | self.main_layout = QVBoxLayout() 256 | self.main_layout.setAlignment(Qt.AlignCenter) 257 | self.title_label = QLabel("Extract Frames...") 258 | self.progress_bar = QProgressBar() 259 | self.progress_bar.setFixedSize(QSize(450, 100)) 260 | self.main_layout.addWidget(self.title_label) 261 | self.main_layout.addWidget(self.progress_bar) 262 | 263 | all_signals.playback_signals.current_frame_cnt.connect(self.set_value) 264 | all_signals.playback_signals.video_total_frame.connect(self.set_total_frame) 265 | self.setLayout(self.main_layout) 266 | 267 | @Slot(int) 268 | def set_total_frame(self, total): 269 | self.total_frame = total 270 | 271 | @Slot(int) 272 | def set_value(self, value): 273 | if value == self.total_frame: 274 | self.close() 275 | tmp = (value / self.total_frame) * 100 276 | self.progress_bar.setValue(tmp) 277 | self.progress_bar.setFormat("%.02f %%" % tmp) 278 | 279 | 280 | class SelectClipOptionDialog(QDialog): 281 | def __init__(self): 282 | super().__init__() 283 | self.setFixedSize(QSize(500, 200)) 284 | self.setWindowFlags(self.windowFlags() | Qt.WindowStaysOnTopHint) 285 | 286 | self.main_layout = QVBoxLayout() 287 | self.main_layout.setAlignment(Qt.AlignCenter) 288 | self.title_label = QLabel("Save To") 289 | 290 | self.btn_layout = QHBoxLayout() 291 | self.btn_mkv = QPushButton("video as '.mkv'") 292 | self.btn_mkv.setObjectName("btn_mkv") 293 | self.btn_mkv.setDisabled(True) 294 | self.btn_jpg = QPushButton("rgb/ir/depth frame as '.jpg'") 295 | self.btn_jpg.setObjectName("btn_jpg") 296 | self.btn_layout.addWidget(self.btn_mkv) 297 | self.btn_layout.addWidget(self.btn_jpg) 298 | self.main_layout.addWidget(self.title_label) 299 | self.main_layout.addLayout(self.btn_layout) 300 | 301 | self.setLayout(self.main_layout) 302 | self.btn_mkv.clicked.connect(self.emit_status) 303 | self.btn_jpg.clicked.connect(self.emit_status) 304 | 305 | def emit_status(self): 306 | if self.sender().objectName() == "btn_mkv": 307 | all_signals.playback_signals.clip_option.emit("mkv") 308 | else: 309 | all_signals.playback_signals.clip_option.emit("jpg") 310 | self.close() --------------------------------------------------------------------------------