├── .gitignore ├── Install-EnvVars.png ├── Install-Path.png ├── LICENSE ├── README.md ├── addon_config.mk ├── example-bodies-projected ├── addons.make └── src │ ├── main.cpp │ ├── ofApp.cpp │ └── ofApp.h ├── example-bodies-world ├── addons.make ├── bin │ └── data │ │ └── shaders │ │ ├── render.frag │ │ └── render.vert └── src │ ├── main.cpp │ ├── ofApp.cpp │ └── ofApp.h ├── example-multi ├── addons.make ├── bin │ └── data │ │ └── .gitkeep └── src │ ├── main.cpp │ ├── ofApp.cpp │ └── ofApp.h ├── example-pointcloud ├── addons.make └── src │ ├── main.cpp │ ├── ofApp.cpp │ └── ofApp.h ├── example-record ├── addons.make ├── bin │ └── data │ │ └── .gitkeep └── src │ ├── main.cpp │ ├── ofApp.cpp │ └── ofApp.h ├── example-scaled-depth ├── addons.make ├── config.make └── src │ ├── main.cpp │ ├── ofApp.cpp │ └── ofApp.h ├── example-shader ├── addons.make ├── bin │ └── data │ │ └── shaders │ │ ├── render.frag │ │ ├── render.geom │ │ └── render.vert └── src │ ├── main.cpp │ ├── ofApp.cpp │ └── ofApp.h ├── example-streams ├── addons.make └── src │ ├── main.cpp │ ├── ofApp.cpp │ └── ofApp.h ├── example-world-coord ├── addons.make ├── config.make └── src │ ├── main.cpp │ ├── ofApp.cpp │ └── ofApp.h ├── libs └── turbojpeg │ ├── include │ └── turbojpeg.h │ └── lib │ └── vs │ └── x64 │ ├── Debug │ └── turbojpeg-static.lib │ └── Release │ └── turbojpeg-static.lib ├── ofxaddons_thumbnail.png └── src ├── ofxAzureKinect.h └── ofxAzureKinect ├── BodyTracker.cpp ├── BodyTracker.h ├── Device.cpp ├── Device.h ├── Playback.cpp ├── Playback.h ├── Recorder.cpp ├── Recorder.h ├── Stream.cpp ├── Stream.h └── Types.h /.gitignore: -------------------------------------------------------------------------------- 1 | ######################### 2 | # general patterns 3 | ######################### 4 | 5 | docs/html 6 | docs/tagfile.xml 7 | 8 | */bin/* 9 | !*/bin/data/ 10 | 11 | # for bin folder in root 12 | /bin/* 13 | !/bin/data/ 14 | 15 | [Bb]uild/ 16 | [Oo]bj/ 17 | *.o 18 | [Dd]ebug*/ 19 | [Rr]elease*/ 20 | *.mode* 21 | *.app/ 22 | *.pyc 23 | .svn/ 24 | 25 | ######################### 26 | # IDE 27 | ######################### 28 | 29 | # XCode 30 | *.pbxuser 31 | *.perspective 32 | *.perspectivev3 33 | *.mode1v3 34 | *.mode2v3 35 | #XCode 4 36 | xcuserdata 37 | *.xcworkspace 38 | 39 | # Code::Blocks 40 | *.depend 41 | *.layout 42 | *.cbTemp 43 | 44 | # Visual Studio 45 | *.sdf 46 | *.opensdf 47 | *.suo 48 | *.pdb 49 | *.ilk 50 | *.aps 51 | .vs 52 | ipch/ 53 | 54 | # Eclipse 55 | .metadata 56 | local.properties 57 | .externalToolBuilders 58 | 59 | # Codelite 60 | *.session 61 | *.tags 62 | *.workspace.* 63 | 64 | ######################### 65 | # operating system 66 | ######################### 67 | 68 | # Linux 69 | *~ 70 | # KDE 71 | .directory 72 | .AppleDouble 73 | 74 | # OSX 75 | .DS_Store 76 | *.swp 77 | *~.nib 78 | # Thumbnails 79 | ._* 80 | 81 | # Windows 82 | # Windows image file caches 83 | Thumbs.db 84 | # Folder config file 85 | Desktop.ini 86 | 87 | #Android 88 | .csettings 89 | 90 | ######################### 91 | # packages 92 | ######################### 93 | 94 | # it's better to unpack these files and commit the raw source 95 | # git has its own built in compression methods 96 | *.7z 97 | *.dmg 98 | *.gz 99 | *.iso 100 | *.jar 101 | *.rar 102 | *.tar 103 | *.zip 104 | 105 | # Logs and databases 106 | *.log 107 | *.sql 108 | *.sqlite 109 | 110 | # Project files 111 | *.sln 112 | *.vcxproj 113 | *vcxproj.* 114 | icon.rc 115 | -------------------------------------------------------------------------------- /Install-EnvVars.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prisonerjohn/ofxAzureKinect/0fd9db1fc6ef616482f285e42c933da3471b9d17/Install-EnvVars.png -------------------------------------------------------------------------------- /Install-Path.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prisonerjohn/ofxAzureKinect/0fd9db1fc6ef616482f285e42c933da3471b9d17/Install-Path.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2019 Elie Zananiri - prisonerjohn.com 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ofxAzureKinect 2 | 3 | ofxAddon that allows you to use [Azure Kinect](https://azure.microsoft.com/en-us/services/kinect-dk/) in [openFrameworks](https://github.com/openframeworks/openFrameworks). 4 | 5 | * Get depth, color, depth to world, and color in depth frames as `ofPixels` or `ofTexture`. 6 | * Get point cloud VBO with texture coordinates in depth space. 7 | * Get body tracking skeleton and index texture. 8 | * Use multiple sensors per machine (tested up to 4!) 9 | * Set up sync mode (standalone, master, subordinate) with multiple devices when connected with sync cables. 10 | * Record and playback streams. 11 | * More coming soon... 12 | 13 | ## Installation 14 | 15 | The instructions below are based on the [Azure Kinect Sensor SDK Usage](https://github.com/microsoft/Azure-Kinect-Sensor-SDK/blob/develop/docs/usage.md) page. 16 | 17 | ### Windows 18 | 19 | * Install the [Azure Kinect Sensor SDK](https://docs.microsoft.com/en-us/azure/Kinect-dk/sensor-sdk-download). 20 | * Install the [Azure Kinect Body Tracking SDK](https://docs.microsoft.com/en-us/azure/Kinect-dk/body-sdk-download). 21 | * Add an environment variable for `AZUREKINECT_SDK` and set it to the Sensor SDK installation path (no trailing slash). The default is `C:\Program Files\Azure Kinect SDK v1.4.1`. 22 | * Add an environment variable for `AZUREKINECT_BODY_SDK` and set it to the Body SDK installation path (no trailing slash). The default is `C:\Program Files\Azure Kinect Body Tracking SDK`. 23 | 24 | ![Environment Variables](Install-EnvVars.png) 25 | 26 | * Add the path to the Sensor SDK `bin` folder to the `PATH` variable. The default is `%AZUREKINECT_SDK%\sdk\windows-desktop\amd64\release\bin`. 27 | * Add the path to the Body SDK `bin` folder to the `PATH` variable. The default is `%AZUREKINECT_BODY_SDK%\sdk\windows-desktop\amd64\release\bin`. 28 | * Add the path to the Body SDK `tools` folder to the `PATH` variable. The default is `%AZUREKINECT_BODY_SDK%\tools`. 29 | 30 | ![Path](Install-Path.png) 31 | 32 | * Clone this repository in your openFrameworks `addons` folder. 33 | * You can then use the OF Project Generator to generate projects with the appropriate headers and libraries included. ✌️ 34 | * Note that if you want to use body tracking, you will need to copy the cuDNN model file `dnn_model_2_0.onnx` from the Body SDK `tools` folder into your project's `bin` folder! 35 | 36 | ### Linux 37 | 38 | * Configure the [Linux Software Repository for Microsoft](https://docs.microsoft.com/en-us/windows-server/administration/linux-package-repository-for-microsoft-software). Note that for Ubuntu you'll need to set up the repo for 18.04 even if you're running newer versions. 39 | * Install the Azure Kinect Sensor SDK packages: `libk4a1.3` `libk4a1.3-dev` `k4a-tools` 40 | * Install the Azure Kinect Body Tracking SDK packages: `libk4abt1.0` `libk4abt1.0-dev` 41 | * Setup udev rules by copying [this file](https://github.com/microsoft/Azure-Kinect-Sensor-SDK/blob/develop/scripts/99-k4a.rules) to `/etc/udev/rules.d/99-k4a.rules`. 42 | * Install [libjpeg-turbo](https://sourceforge.net/projects/libjpeg-turbo/). 43 | * Clone this repository in your openFrameworks `addons` folder. 44 | * You can then use the OF Project Generator to generate projects with the appropriate headers and libraries included. 45 | 🐣 46 | 47 | ## Compatibility 48 | 49 | Tested with: 50 | * openFrameworks 0.10.x / 0.11.x 51 | * Windows 10, Visual Studio 2017 / 2019 52 | * Ubuntu 19.10, Qt Creator 53 | 54 | ## Examples 55 | 56 | Use the OF Project Generator to create the example project files. If everything is installed correctly, it should properly locate all required include and library files. 57 | 58 | * `example-streams` demonstrates how to get depth, color, infrared textures from the device. 59 | * `example-scaled-depth` demonstrates how to remap the depth data to a narrower (probably more useful) range. 60 | * `example-pointCloud` demonstrates how to draw the basic point cloud VBO from the device. 61 | * `example-world-coord` demonstrates how to get the world coordinate from the depth and depth to world data sets. 62 | * `example-shader` demonstrates how to reconstruct a point cloud using LUTs in a shader. 63 | * `example-bodies` demonstrates how to get the body tracking index texture and skeleton joint information in 3D. 64 | * `example-bodies-projected` demonstrates how to get the body tracking index texture and skeleton joint information in 2D. 65 | * `example-multi` demonstrates how to use multiple devices in a single app. 66 | * `example-record` demonstrates how to record and playback device streams. -------------------------------------------------------------------------------- /addon_config.mk: -------------------------------------------------------------------------------- 1 | # All variables and this file are optional, if they are not present the PG and the 2 | # makefiles will try to parse the correct values from the file system. 3 | # 4 | # Variables that specify exclusions can use % as a wildcard to specify that anything in 5 | # that position will match. A partial path can also be specified to, for example, exclude 6 | # a whole folder from the parsed paths from the file system 7 | # 8 | # Variables can be specified using = or += 9 | # = will clear the contents of that variable both specified from the file or the ones parsed 10 | # from the file system 11 | # += will add the values to the previous ones in the file or the ones parsed from the file 12 | # system 13 | # 14 | # The PG can be used to detect errors in this file, just create a new project with this addon 15 | # and the PG will write to the console the kind of error and in which line it is 16 | 17 | meta: 18 | ADDON_NAME = ofxAzureKinect 19 | ADDON_DESCRIPTION = Use Azure Kinect inside openFrameworks. 20 | ADDON_AUTHOR = Elie Zananiri 21 | ADDON_TAGS = "computer vision" "3D sensing" "kinect" "azure" 22 | ADDON_URL = https://github.com/prisonerjohn/ofxAzureKinect 23 | 24 | common: 25 | # dependencies with other addons, a list of them separated by spaces 26 | # or use += in several lines 27 | # ADDON_DEPENDENCIES = 28 | 29 | # include search paths, this will be usually parsed from the file system 30 | # but if the addon or addon libraries need special search paths they can be 31 | # specified here separated by spaces or one per line using += 32 | # ADDON_INCLUDES = 33 | 34 | # any special flag that should be passed to the compiler when using this 35 | # addon 36 | # ADDON_CFLAGS = 37 | 38 | # any special flag that should be passed to the linker when using this 39 | # addon, also used for system libraries with -lname 40 | # ADDON_LDFLAGS = 41 | 42 | # linux only, any library that should be included in the project using 43 | # pkg-config 44 | # ADDON_PKG_CONFIG_LIBRARIES = 45 | 46 | # osx/iOS only, any framework that should be included in the project 47 | # ADDON_FRAMEWORKS = 48 | 49 | # source files, these will be usually parsed from the file system looking 50 | # in the src folders in libs and the root of the addon. if your addon needs 51 | # to include files in different places or a different set of files per platform 52 | # they can be specified here 53 | # ADDON_SOURCES = 54 | 55 | # some addons need resources to be copied to the bin/data folder of the project 56 | # specify here any files that need to be copied, you can use wildcards like * and ? 57 | # ADDON_DATA = 58 | 59 | # when parsing the file system looking for libraries exclude this for all or 60 | # a specific platform 61 | # ADDON_LIBS_EXCLUDE = 62 | 63 | vs: 64 | ADDON_INCLUDES += $(AZUREKINECT_SDK)\sdk\include 65 | ADDON_INCLUDES += $(AZUREKINECT_BODY_SDK)\sdk\include 66 | ADDON_LIBS += $(AZUREKINECT_SDK)\sdk\windows-desktop\amd64\release\lib\k4a.lib 67 | ADDON_LIBS += $(AZUREKINECT_SDK)\sdk\windows-desktop\amd64\release\lib\k4arecord.lib 68 | ADDON_LIBS += $(AZUREKINECT_BODY_SDK)\sdk\windows-desktop\amd64\release\lib\k4abt.lib 69 | 70 | linux64: 71 | ADDON_INCLUDES += /usr/include 72 | ADDON_INCLUDES += /usr/include/k4a 73 | ADDON_LIBS += /usr/lib/libk4abt.so 74 | ADDON_LIBS += /usr/lib/x86_64-linux-gnu/libk4a.so 75 | ADDON_LIBS += /usr/lib/x86_64-linux-gnu/libk4arecord.so 76 | ADDON_LIBS += /opt/libjpeg-turbo/lib64/libturbojpeg.a 77 | 78 | linux: 79 | 80 | linuxarmv6l: 81 | #TODO needs EngineGLFW.cpp exclude 82 | 83 | 84 | linuxarmv7l: 85 | #TODO needs EngineGLFW.cpp exclude 86 | 87 | msys2: 88 | 89 | android/armeabi: 90 | 91 | android/armeabi-v7a: 92 | 93 | ios: 94 | # osx/iOS only, any framework that should be included in the project 95 | 96 | 97 | -------------------------------------------------------------------------------- /example-bodies-projected/addons.make: -------------------------------------------------------------------------------- 1 | ofxAzureKinect 2 | -------------------------------------------------------------------------------- /example-bodies-projected/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | int main() 4 | { 5 | ofGLFWWindowSettings settings; 6 | settings.setGLVersion(3, 2); 7 | settings.setSize(1280, 720); 8 | ofCreateWindow(settings); 9 | 10 | ofRunApp(new ofApp()); 11 | } 12 | -------------------------------------------------------------------------------- /example-bodies-projected/src/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup() 5 | { 6 | //ofSetLogLevel(OF_LOG_VERBOSE); 7 | 8 | ofLogNotice(__FUNCTION__) << "Found " << ofxAzureKinect::Device::getInstalledCount() << " installed devices."; 9 | 10 | if (kinectDevice.open()) 11 | { 12 | auto deviceSettings = ofxAzureKinect::DeviceSettings(); 13 | deviceSettings.syncImages = false; 14 | deviceSettings.depthMode = K4A_DEPTH_MODE_NFOV_UNBINNED; 15 | deviceSettings.updateIr = false; 16 | deviceSettings.updateColor = true; 17 | deviceSettings.colorResolution = K4A_COLOR_RESOLUTION_720P; 18 | deviceSettings.updateWorld = true; 19 | deviceSettings.updateVbo = false; 20 | kinectDevice.startCameras(deviceSettings); 21 | 22 | auto bodyTrackerSettings = ofxAzureKinect::BodyTrackerSettings(); 23 | bodyTrackerSettings.sensorOrientation = K4ABT_SENSOR_ORIENTATION_DEFAULT; 24 | //bodyTrackerSettings.processingMode = K4ABT_TRACKER_PROCESSING_MODE_CPU; 25 | bodyTrackerSettings.imageType = K4A_CALIBRATION_TYPE_COLOR; 26 | bodyTrackerSettings.updateBodiesImage = true; 27 | kinectDevice.startBodyTracker(bodyTrackerSettings); 28 | } 29 | } 30 | 31 | //-------------------------------------------------------------- 32 | void ofApp::exit() 33 | { 34 | kinectDevice.close(); 35 | } 36 | 37 | //-------------------------------------------------------------- 38 | void ofApp::update() 39 | { 40 | 41 | } 42 | 43 | //-------------------------------------------------------------- 44 | void ofApp::draw() 45 | { 46 | ofBackground(0); 47 | 48 | if (kinectDevice.isStreaming()) 49 | { 50 | // Draw the body index texture. 51 | // The pixels are not black, their color equals the body ID which is just a low number. 52 | kinectDevice.getBodyIndexTex().draw(0, 0); 53 | 54 | // Draw the projected joints onto the image. 55 | const auto& skeletons = kinectDevice.getBodySkeletons(); 56 | for (int i = 0; i < skeletons.size(); ++i) 57 | { 58 | for (int j = 0; j < K4ABT_JOINT_COUNT; ++j) 59 | { 60 | switch (skeletons[i].joints[j].confidenceLevel) 61 | { 62 | case K4ABT_JOINT_CONFIDENCE_MEDIUM: 63 | ofSetColor(ofColor::green); 64 | break; 65 | case K4ABT_JOINT_CONFIDENCE_LOW: 66 | ofSetColor(ofColor::yellow); 67 | break; 68 | case K4ABT_JOINT_CONFIDENCE_NONE: 69 | default: 70 | ofSetColor(ofColor::red); 71 | break; 72 | } 73 | ofDrawCircle(skeletons[i].joints[j].projPos, 5.0f); 74 | } 75 | } 76 | ofSetColor(ofColor::white); 77 | } 78 | 79 | std::ostringstream oss; 80 | oss << ofToString(ofGetFrameRate(), 2) + " FPS"; 81 | ofDrawBitmapStringHighlight(oss.str(), 10, 20); 82 | } 83 | 84 | //-------------------------------------------------------------- 85 | void ofApp::keyPressed(int key){ 86 | 87 | } 88 | 89 | //-------------------------------------------------------------- 90 | void ofApp::keyReleased(int key){ 91 | 92 | } 93 | 94 | //-------------------------------------------------------------- 95 | void ofApp::mouseMoved(int x, int y ){ 96 | 97 | } 98 | 99 | //-------------------------------------------------------------- 100 | void ofApp::mouseDragged(int x, int y, int button){ 101 | 102 | } 103 | 104 | //-------------------------------------------------------------- 105 | void ofApp::mousePressed(int x, int y, int button){ 106 | 107 | } 108 | 109 | //-------------------------------------------------------------- 110 | void ofApp::mouseReleased(int x, int y, int button){ 111 | 112 | } 113 | 114 | //-------------------------------------------------------------- 115 | void ofApp::mouseEntered(int x, int y){ 116 | 117 | } 118 | 119 | //-------------------------------------------------------------- 120 | void ofApp::mouseExited(int x, int y){ 121 | 122 | } 123 | 124 | //-------------------------------------------------------------- 125 | void ofApp::windowResized(int w, int h){ 126 | 127 | } 128 | 129 | //-------------------------------------------------------------- 130 | void ofApp::gotMessage(ofMessage msg){ 131 | 132 | } 133 | 134 | //-------------------------------------------------------------- 135 | void ofApp::dragEvent(ofDragInfo dragInfo){ 136 | 137 | } 138 | -------------------------------------------------------------------------------- /example-bodies-projected/src/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | #include "ofxAzureKinect.h" 6 | 7 | class ofApp 8 | : public ofBaseApp 9 | { 10 | public: 11 | void setup(); 12 | void exit(); 13 | 14 | void update(); 15 | void draw(); 16 | 17 | void keyPressed(int key); 18 | void keyReleased(int key); 19 | void mouseMoved(int x, int y); 20 | void mouseDragged(int x, int y, int button); 21 | void mousePressed(int x, int y, int button); 22 | void mouseReleased(int x, int y, int button); 23 | void mouseEntered(int x, int y); 24 | void mouseExited(int x, int y); 25 | void windowResized(int w, int h); 26 | void dragEvent(ofDragInfo dragInfo); 27 | void gotMessage(ofMessage msg); 28 | 29 | private: 30 | ofxAzureKinect::Device kinectDevice; 31 | }; 32 | -------------------------------------------------------------------------------- /example-bodies-world/addons.make: -------------------------------------------------------------------------------- 1 | ofxAzureKinect 2 | -------------------------------------------------------------------------------- /example-bodies-world/bin/data/shaders/render.frag: -------------------------------------------------------------------------------- 1 | #version 150 2 | 3 | // Custom attributes. 4 | 5 | in vec4 vColor; 6 | 7 | out vec4 fragColor; 8 | 9 | void main() 10 | { 11 | if (vColor.a == 0) 12 | { 13 | discard; 14 | } 15 | 16 | fragColor = vColor; 17 | } -------------------------------------------------------------------------------- /example-bodies-world/bin/data/shaders/render.vert: -------------------------------------------------------------------------------- 1 | #version 150 2 | 3 | // OF built-in attributes. 4 | 5 | uniform mat4 modelViewProjectionMatrix; 6 | 7 | // Custom attributes. 8 | 9 | #define BODY_INDEX_MAP_BACKGROUND 255 10 | 11 | const vec4[6] COLORS = vec4[] 12 | ( 13 | vec4(211 / 255.0, 248 / 255.0, 226 / 255.0, 1.0), 14 | vec4(228 / 255.0, 193 / 255.0, 249 / 255.0, 1.0), 15 | vec4(237 / 255.0, 231 / 255.0, 177 / 255.0, 1.0), 16 | vec4(246 / 255.0, 148 / 255.0, 193 / 255.0, 1.0), 17 | vec4(169 / 255.0, 222 / 255.0, 249 / 255.0, 1.0), 18 | vec4(255 / 255.0, 135 / 255.0, 111 / 255.0, 1.0) 19 | ); 20 | 21 | uniform sampler2DRect uDepthTex; // Sampler for the depth space data 22 | uniform sampler2DRect uBodyIndexTex; // Sampler for the body index data 23 | uniform sampler2DRect uWorldTex; // Transformation from kinect depth space to kinect world space 24 | 25 | uniform ivec2 uFrameSize; 26 | 27 | uniform int[6] uBodyIDs; 28 | 29 | out vec4 vColor; 30 | 31 | void main() 32 | { 33 | vec2 texCoord = vec2(gl_InstanceID % uFrameSize.x, gl_InstanceID / uFrameSize.x); 34 | 35 | float depth = texture(uDepthTex, texCoord).x; 36 | int bodyIndex = int(texture(uBodyIndexTex, texCoord).x * 255); 37 | vec4 ray = texture(uWorldTex, texCoord); 38 | 39 | if (depth != 0 && 40 | bodyIndex != BODY_INDEX_MAP_BACKGROUND && 41 | ray.x != 0 && ray.y != 0) 42 | { 43 | int bodyID = uBodyIDs[bodyIndex]; 44 | vColor = COLORS[bodyID % 6]; 45 | } 46 | else 47 | { 48 | vColor = vec4(0.0); 49 | } 50 | 51 | vec4 posWorld = vec4(1); 52 | posWorld.z = depth * 65535.0; // Remap to float range. 53 | posWorld.x = ray.x * posWorld.z; 54 | posWorld.y = ray.y * posWorld.z; 55 | 56 | gl_Position = modelViewProjectionMatrix * posWorld; 57 | } -------------------------------------------------------------------------------- /example-bodies-world/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | int main() 4 | { 5 | ofGLFWWindowSettings settings; 6 | settings.setGLVersion(3, 2); 7 | settings.setSize(1280, 720); 8 | ofCreateWindow(settings); 9 | 10 | ofRunApp(new ofApp()); 11 | } 12 | -------------------------------------------------------------------------------- /example-bodies-world/src/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup() 5 | { 6 | //ofSetLogLevel(OF_LOG_VERBOSE); 7 | 8 | ofLogNotice(__FUNCTION__) << "Found " << ofxAzureKinect::Device::getInstalledCount() << " installed devices."; 9 | 10 | if (kinectDevice.open()) 11 | { 12 | auto deviceSettings = ofxAzureKinect::DeviceSettings(); 13 | deviceSettings.syncImages = false; 14 | deviceSettings.depthMode = K4A_DEPTH_MODE_NFOV_UNBINNED; 15 | deviceSettings.updateIr = false; 16 | deviceSettings.updateColor = true; 17 | //deviceSettings.colorResolution = K4A_COLOR_RESOLUTION_1080P; 18 | deviceSettings.updateWorld = true; 19 | deviceSettings.updateVbo = false; 20 | kinectDevice.startCameras(deviceSettings); 21 | 22 | auto bodyTrackerSettings = ofxAzureKinect::BodyTrackerSettings(); 23 | bodyTrackerSettings.sensorOrientation = K4ABT_SENSOR_ORIENTATION_DEFAULT; 24 | //bodyTrackerSettings.processingMode = K4ABT_TRACKER_PROCESSING_MODE_CPU; 25 | kinectDevice.startBodyTracker(bodyTrackerSettings); 26 | } 27 | 28 | // Load shader. 29 | auto shaderSettings = ofShaderSettings(); 30 | shaderSettings.shaderFiles[GL_VERTEX_SHADER] = "shaders/render.vert"; 31 | shaderSettings.shaderFiles[GL_FRAGMENT_SHADER] = "shaders/render.frag"; 32 | shaderSettings.intDefines["BODY_INDEX_MAP_BACKGROUND"] = K4ABT_BODY_INDEX_MAP_BACKGROUND; 33 | shaderSettings.bindDefaults = true; 34 | if (shader.setup(shaderSettings)) 35 | { 36 | ofLogNotice(__FUNCTION__) << "Success loading shader!"; 37 | } 38 | 39 | // Setup vbo. 40 | std::vector verts(1); 41 | pointsVbo.setVertexData(verts.data(), verts.size(), GL_STATIC_DRAW); 42 | } 43 | 44 | //-------------------------------------------------------------- 45 | void ofApp::exit() 46 | { 47 | kinectDevice.close(); 48 | } 49 | 50 | //-------------------------------------------------------------- 51 | void ofApp::update() 52 | { 53 | 54 | } 55 | 56 | //-------------------------------------------------------------- 57 | void ofApp::draw() 58 | { 59 | ofBackground(0); 60 | 61 | camera.begin(); 62 | { 63 | ofPushMatrix(); 64 | { 65 | ofRotateXDeg(180); 66 | 67 | ofEnableDepthTest(); 68 | 69 | const auto& bodySkeletons = kinectDevice.getBodySkeletons(); 70 | 71 | constexpr int kMaxBodies = 6; 72 | int bodyIDs[kMaxBodies]; 73 | int i = 0; 74 | while (i < bodySkeletons.size()) 75 | { 76 | bodyIDs[i] = bodySkeletons[i].id; 77 | ++i; 78 | } 79 | while (i < kMaxBodies) 80 | { 81 | bodyIDs[i] = 0; 82 | ++i; 83 | } 84 | 85 | shader.begin(); 86 | { 87 | shader.setUniformTexture("uDepthTex", kinectDevice.getDepthTex(), 1); 88 | shader.setUniformTexture("uBodyIndexTex", kinectDevice.getBodyIndexTex(), 2); 89 | shader.setUniformTexture("uWorldTex", kinectDevice.getDepthToWorldTex(), 3); 90 | shader.setUniform2i("uFrameSize", kinectDevice.getDepthTex().getWidth(), kinectDevice.getDepthTex().getHeight()); 91 | shader.setUniform1iv("uBodyIDs", bodyIDs, kMaxBodies); 92 | 93 | int numPoints = kinectDevice.getDepthTex().getWidth() * kinectDevice.getDepthTex().getHeight(); 94 | pointsVbo.drawInstanced(GL_POINTS, 0, 1, numPoints); 95 | } 96 | shader.end(); 97 | 98 | ofDisableDepthTest(); 99 | 100 | for (const auto& skeleton : bodySkeletons) 101 | { 102 | // Draw joints. 103 | for (int i = 0; i < K4ABT_JOINT_COUNT; ++i) 104 | { 105 | auto joint = skeleton.joints[i]; 106 | ofPushMatrix(); 107 | { 108 | glm::mat4 transform = glm::translate(joint.position) * glm::toMat4(joint.orientation); 109 | ofMultMatrix(transform); 110 | 111 | ofDrawAxis(50.0f); 112 | 113 | if (joint.confidenceLevel >= K4ABT_JOINT_CONFIDENCE_MEDIUM) 114 | { 115 | ofSetColor(ofColor::green); 116 | } 117 | else if (joint.confidenceLevel >= K4ABT_JOINT_CONFIDENCE_LOW) 118 | { 119 | ofSetColor(ofColor::yellow); 120 | } 121 | else 122 | { 123 | ofSetColor(ofColor::red); 124 | } 125 | 126 | ofDrawSphere(10.0f); 127 | } 128 | ofPopMatrix(); 129 | } 130 | 131 | // Draw connections. 132 | skeletonMesh.setMode(OF_PRIMITIVE_LINES); 133 | auto& vertices = skeletonMesh.getVertices(); 134 | vertices.resize(50); 135 | int vdx = 0; 136 | 137 | // Spine. 138 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_PELVIS].position); 139 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_SPINE_NAVEL].position); 140 | 141 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_SPINE_NAVEL].position); 142 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_SPINE_CHEST].position); 143 | 144 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_SPINE_CHEST].position); 145 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_NECK].position); 146 | 147 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_NECK].position); 148 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_HEAD].position); 149 | 150 | // Head. 151 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_HEAD].position); 152 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_NOSE].position); 153 | 154 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_NOSE].position); 155 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_EYE_LEFT].position); 156 | 157 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_EYE_LEFT].position); 158 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_EAR_LEFT].position); 159 | 160 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_NOSE].position); 161 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_EYE_RIGHT].position); 162 | 163 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_EYE_RIGHT].position); 164 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_EAR_RIGHT].position); 165 | 166 | // Left Leg. 167 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_PELVIS].position); 168 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_HIP_LEFT].position); 169 | 170 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_HIP_LEFT].position); 171 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_KNEE_LEFT].position); 172 | 173 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_KNEE_LEFT].position); 174 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_ANKLE_LEFT].position); 175 | 176 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_ANKLE_LEFT].position); 177 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_FOOT_LEFT].position); 178 | 179 | // Right leg. 180 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_PELVIS].position); 181 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_HIP_RIGHT].position); 182 | 183 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_HIP_RIGHT].position); 184 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_KNEE_RIGHT].position); 185 | 186 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_KNEE_RIGHT].position); 187 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_ANKLE_RIGHT].position); 188 | 189 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_ANKLE_RIGHT].position); 190 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_FOOT_RIGHT].position); 191 | 192 | // Left arm. 193 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_NECK].position); 194 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_CLAVICLE_LEFT].position); 195 | 196 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_CLAVICLE_LEFT].position); 197 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_SHOULDER_LEFT].position); 198 | 199 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_SHOULDER_LEFT].position); 200 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_ELBOW_LEFT].position); 201 | 202 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_ELBOW_LEFT].position); 203 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_WRIST_LEFT].position); 204 | 205 | // Right arm. 206 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_NECK].position); 207 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_CLAVICLE_RIGHT].position); 208 | 209 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_CLAVICLE_RIGHT].position); 210 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_SHOULDER_RIGHT].position); 211 | 212 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_SHOULDER_RIGHT].position); 213 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_ELBOW_RIGHT].position); 214 | 215 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_ELBOW_RIGHT].position); 216 | vertices[vdx++] = toGlm(skeleton.joints[K4ABT_JOINT_WRIST_RIGHT].position); 217 | 218 | skeletonMesh.draw(); 219 | } 220 | } 221 | ofPopMatrix(); 222 | } 223 | camera.end(); 224 | 225 | std::ostringstream oss; 226 | oss << ofToString(ofGetFrameRate(), 2) + " FPS" << std::endl; 227 | oss << "Joint Smoothing: " << kinectDevice.getBodyTracker().jointSmoothing; 228 | ofDrawBitmapStringHighlight(oss.str(), 10, 20); 229 | } 230 | 231 | //-------------------------------------------------------------- 232 | void ofApp::keyPressed(int key){ 233 | 234 | } 235 | 236 | //-------------------------------------------------------------- 237 | void ofApp::keyReleased(int key){ 238 | 239 | } 240 | 241 | //-------------------------------------------------------------- 242 | void ofApp::mouseMoved(int x, int y ){ 243 | 244 | } 245 | 246 | //-------------------------------------------------------------- 247 | void ofApp::mouseDragged(int x, int y, int button) 248 | { 249 | if (button == 1) 250 | { 251 | kinectDevice.getBodyTracker().jointSmoothing = ofMap(x, 0, ofGetWidth(), 0.0f, 1.0f, true); 252 | } 253 | } 254 | 255 | //-------------------------------------------------------------- 256 | void ofApp::mousePressed(int x, int y, int button){ 257 | 258 | } 259 | 260 | //-------------------------------------------------------------- 261 | void ofApp::mouseReleased(int x, int y, int button){ 262 | 263 | } 264 | 265 | //-------------------------------------------------------------- 266 | void ofApp::mouseEntered(int x, int y){ 267 | 268 | } 269 | 270 | //-------------------------------------------------------------- 271 | void ofApp::mouseExited(int x, int y){ 272 | 273 | } 274 | 275 | //-------------------------------------------------------------- 276 | void ofApp::windowResized(int w, int h){ 277 | 278 | } 279 | 280 | //-------------------------------------------------------------- 281 | void ofApp::gotMessage(ofMessage msg){ 282 | 283 | } 284 | 285 | //-------------------------------------------------------------- 286 | void ofApp::dragEvent(ofDragInfo dragInfo){ 287 | 288 | } 289 | -------------------------------------------------------------------------------- /example-bodies-world/src/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | #include "ofxAzureKinect.h" 6 | 7 | class ofApp 8 | : public ofBaseApp 9 | { 10 | public: 11 | void setup(); 12 | void exit(); 13 | 14 | void update(); 15 | void draw(); 16 | 17 | void keyPressed(int key); 18 | void keyReleased(int key); 19 | void mouseMoved(int x, int y); 20 | void mouseDragged(int x, int y, int button); 21 | void mousePressed(int x, int y, int button); 22 | void mouseReleased(int x, int y, int button); 23 | void mouseEntered(int x, int y); 24 | void mouseExited(int x, int y); 25 | void windowResized(int w, int h); 26 | void dragEvent(ofDragInfo dragInfo); 27 | void gotMessage(ofMessage msg); 28 | 29 | private: 30 | ofxAzureKinect::Device kinectDevice; 31 | 32 | ofEasyCam camera; 33 | 34 | ofVbo pointsVbo; 35 | ofShader shader; 36 | 37 | ofVboMesh skeletonMesh; 38 | }; 39 | -------------------------------------------------------------------------------- /example-multi/addons.make: -------------------------------------------------------------------------------- 1 | ofxAzureKinect 2 | -------------------------------------------------------------------------------- /example-multi/bin/data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prisonerjohn/ofxAzureKinect/0fd9db1fc6ef616482f285e42c933da3471b9d17/example-multi/bin/data/.gitkeep -------------------------------------------------------------------------------- /example-multi/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | int main() 4 | { 5 | ofGLFWWindowSettings settings; 6 | settings.setGLVersion(3, 2); 7 | settings.setSize(640 * 2, 360 + 320); 8 | ofCreateWindow(settings); 9 | 10 | ofRunApp(new ofApp()); 11 | } 12 | -------------------------------------------------------------------------------- /example-multi/src/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup() 5 | { 6 | //ofSetLogLevel(OF_LOG_VERBOSE); 7 | ofSetVerticalSync(false); 8 | 9 | ofLogNotice(__FUNCTION__) << "Found " << ofxAzureKinect::Device::getInstalledCount() << " installed devices."; 10 | 11 | // The following will start all connected devices as standalone (no sync). 12 | //setupStandalone(); 13 | 14 | // The following will assign sync to devices based on serial number. 15 | setupMasterSubordinate(); 16 | 17 | // Add FPS counter for each device. 18 | fpsCounters.resize(kinectDevices.size()); 19 | } 20 | 21 | //-------------------------------------------------------------- 22 | void ofApp::setupStandalone() 23 | { 24 | int numConnected = ofxAzureKinect::Device::getInstalledCount(); 25 | 26 | auto kinectSettings = ofxAzureKinect::DeviceSettings(); 27 | kinectSettings.colorResolution = K4A_COLOR_RESOLUTION_720P; 28 | kinectSettings.syncImages = true; 29 | kinectSettings.updateWorld = false; 30 | 31 | for (int i = 0; i < numConnected; ++i) 32 | { 33 | auto device = std::make_shared(); 34 | if (device->open(i)) 35 | { 36 | device->startCameras(kinectSettings); 37 | 38 | kinectDevices.push_back(device); 39 | } 40 | } 41 | } 42 | 43 | //-------------------------------------------------------------- 44 | void ofApp::setupMasterSubordinate() 45 | { 46 | // Make sure to replace the following serials by the ones on your devices. 47 | const std::string serialMaster = "000224694712"; 48 | const std::string serialSubordinate = "000569192412"; 49 | 50 | auto kinectSettings = ofxAzureKinect::DeviceSettings(); 51 | kinectSettings.colorResolution = K4A_COLOR_RESOLUTION_720P; 52 | kinectSettings.syncImages = true; 53 | kinectSettings.updateWorld = false; 54 | 55 | // Open Master device. 56 | { 57 | auto device = std::make_shared(); 58 | if (device->open(serialMaster)) 59 | { 60 | kinectSettings.wiredSyncMode = K4A_WIRED_SYNC_MODE_MASTER; 61 | device->startCameras(kinectSettings); 62 | 63 | kinectDevices.push_back(device); 64 | } 65 | } 66 | 67 | // Open Subordinate device. 68 | { 69 | auto device = std::make_shared(); 70 | if (device->open(serialSubordinate)) 71 | { 72 | kinectSettings.wiredSyncMode = K4A_WIRED_SYNC_MODE_SUBORDINATE; 73 | //kinectSettings.subordinateDelayUsec = 100; 74 | device->startCameras(kinectSettings); 75 | 76 | kinectDevices.push_back(device); 77 | } 78 | } 79 | } 80 | 81 | //-------------------------------------------------------------- 82 | void ofApp::exit() 83 | { 84 | for (auto device : kinectDevices) 85 | { 86 | device->close(); 87 | device.reset(); 88 | } 89 | kinectDevices.clear(); 90 | } 91 | 92 | //-------------------------------------------------------------- 93 | void ofApp::update() 94 | { 95 | for (int i = 0;i < kinectDevices.size(); ++i) 96 | { 97 | if (kinectDevices[i]->isFrameNew()) 98 | { 99 | fpsCounters[i].newFrame(); 100 | } 101 | } 102 | } 103 | 104 | //-------------------------------------------------------------- 105 | void ofApp::draw() 106 | { 107 | ofBackground(128); 108 | 109 | int x = 0; 110 | for (int i = 0; i < kinectDevices.size(); ++i) 111 | { 112 | auto device = kinectDevices[i]; 113 | if (device->isStreaming()) 114 | { 115 | device->getColorTex().draw(x, 0, 640, 360); 116 | device->getDepthTex().draw(x, 360, 320, 320); 117 | device->getIrTex().draw(x + 320, 360, 320, 320); 118 | 119 | ofDrawBitmapStringHighlight(ofToString(fpsCounters[i].getFps(), 2) + " FPS", x + 10, 350, device->isFrameNew() ? ofColor::red : ofColor::black); 120 | 121 | x += 640; 122 | } 123 | } 124 | 125 | ofDrawBitmapStringHighlight(ofToString(ofGetFrameRate(), 2) + " FPS", 10, 20); 126 | } 127 | 128 | //-------------------------------------------------------------- 129 | void ofApp::keyPressed(int key){ 130 | 131 | } 132 | 133 | //-------------------------------------------------------------- 134 | void ofApp::keyReleased(int key){ 135 | 136 | } 137 | 138 | //-------------------------------------------------------------- 139 | void ofApp::mouseMoved(int x, int y ){ 140 | 141 | } 142 | 143 | //-------------------------------------------------------------- 144 | void ofApp::mouseDragged(int x, int y, int button){ 145 | 146 | } 147 | 148 | //-------------------------------------------------------------- 149 | void ofApp::mousePressed(int x, int y, int button){ 150 | 151 | } 152 | 153 | //-------------------------------------------------------------- 154 | void ofApp::mouseReleased(int x, int y, int button){ 155 | 156 | } 157 | 158 | //-------------------------------------------------------------- 159 | void ofApp::mouseEntered(int x, int y){ 160 | 161 | } 162 | 163 | //-------------------------------------------------------------- 164 | void ofApp::mouseExited(int x, int y){ 165 | 166 | } 167 | 168 | //-------------------------------------------------------------- 169 | void ofApp::windowResized(int w, int h){ 170 | 171 | } 172 | 173 | //-------------------------------------------------------------- 174 | void ofApp::gotMessage(ofMessage msg){ 175 | 176 | } 177 | 178 | //-------------------------------------------------------------- 179 | void ofApp::dragEvent(ofDragInfo dragInfo){ 180 | 181 | } 182 | -------------------------------------------------------------------------------- /example-multi/src/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | #include "ofxAzureKinect.h" 6 | 7 | class ofApp 8 | : public ofBaseApp 9 | { 10 | public: 11 | void setup(); 12 | void exit(); 13 | 14 | void update(); 15 | void draw(); 16 | 17 | void keyPressed(int key); 18 | void keyReleased(int key); 19 | void mouseMoved(int x, int y); 20 | void mouseDragged(int x, int y, int button); 21 | void mousePressed(int x, int y, int button); 22 | void mouseReleased(int x, int y, int button); 23 | void mouseEntered(int x, int y); 24 | void mouseExited(int x, int y); 25 | void windowResized(int w, int h); 26 | void dragEvent(ofDragInfo dragInfo); 27 | void gotMessage(ofMessage msg); 28 | 29 | void setupStandalone(); 30 | void setupMasterSubordinate(); 31 | 32 | private: 33 | std::vector> kinectDevices; 34 | std::vector fpsCounters; 35 | }; 36 | -------------------------------------------------------------------------------- /example-pointcloud/addons.make: -------------------------------------------------------------------------------- 1 | ofxAzureKinect 2 | -------------------------------------------------------------------------------- /example-pointcloud/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | int main() 4 | { 5 | ofGLFWWindowSettings settings; 6 | settings.setGLVersion(3, 2); 7 | settings.setSize(1280, 720); 8 | ofCreateWindow(settings); 9 | 10 | ofRunApp(new ofApp()); 11 | } 12 | -------------------------------------------------------------------------------- /example-pointcloud/src/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | // Uncomment this line to force the VBO to use the depth image size (max num points 512x512). 4 | // By default, it will use the color image size if available (max num points 1920x1080). 5 | //#define FORCE_VBO_DEPTH_SIZE 1 6 | 7 | //-------------------------------------------------------------- 8 | void ofApp::setup() 9 | { 10 | //ofSetLogLevel(OF_LOG_VERBOSE); 11 | 12 | ofLogNotice(__FUNCTION__) << "Found " << ofxAzureKinect::Device::getInstalledCount() << " installed devices."; 13 | 14 | if (kinectDevice.open()) 15 | { 16 | auto kinectSettings = ofxAzureKinect::DeviceSettings(); 17 | kinectSettings.updateIr = false; 18 | kinectSettings.updateColor = true; 19 | kinectSettings.colorResolution = K4A_COLOR_RESOLUTION_1080P; 20 | kinectSettings.updateVbo = true; 21 | #if FORCE_VBO_DEPTH_SIZE 22 | kinectSettings.forceVboToDepthSize = true; 23 | #endif 24 | kinectDevice.startCameras(kinectSettings); 25 | } 26 | } 27 | 28 | //-------------------------------------------------------------- 29 | void ofApp::exit() 30 | { 31 | kinectDevice.close(); 32 | } 33 | 34 | //-------------------------------------------------------------- 35 | void ofApp::update() 36 | { 37 | 38 | } 39 | 40 | //-------------------------------------------------------------- 41 | void ofApp::draw() 42 | { 43 | ofBackground(128); 44 | 45 | if (kinectDevice.isStreaming()) 46 | { 47 | cam.begin(); 48 | ofEnableDepthTest(); 49 | { 50 | ofDrawAxis(1.0f); 51 | 52 | ofPushMatrix(); 53 | { 54 | ofRotateXDeg(180); 55 | 56 | #if FORCE_VBO_DEPTH_SIZE 57 | const auto& colorTex = kinectDevice.getColorInDepthTex(); 58 | #else 59 | const auto& colorTex = kinectDevice.getColorTex(); 60 | #endif 61 | if (colorTex.isAllocated()) 62 | { 63 | colorTex.bind(); 64 | } 65 | kinectDevice.getPointCloudVbo().draw( 66 | GL_POINTS, 67 | 0, kinectDevice.getPointCloudVbo().getNumVertices()); 68 | if (colorTex.isAllocated()) 69 | { 70 | colorTex.unbind(); 71 | } 72 | } 73 | ofPopMatrix(); 74 | } 75 | ofDisableDepthTest(); 76 | cam.end(); 77 | } 78 | 79 | std::ostringstream oss; 80 | oss << ofToString(ofGetFrameRate(), 2) << " FPS" << std::endl 81 | << kinectDevice.getPointCloudVbo().getNumVertices() << " Points"; 82 | ofDrawBitmapStringHighlight(oss.str(), 10, 20); 83 | } 84 | 85 | //-------------------------------------------------------------- 86 | void ofApp::keyPressed(int key){ 87 | 88 | } 89 | 90 | //-------------------------------------------------------------- 91 | void ofApp::keyReleased(int key){ 92 | 93 | } 94 | 95 | //-------------------------------------------------------------- 96 | void ofApp::mouseMoved(int x, int y ){ 97 | 98 | } 99 | 100 | //-------------------------------------------------------------- 101 | void ofApp::mouseDragged(int x, int y, int button){ 102 | 103 | } 104 | 105 | //-------------------------------------------------------------- 106 | void ofApp::mousePressed(int x, int y, int button){ 107 | 108 | } 109 | 110 | //-------------------------------------------------------------- 111 | void ofApp::mouseReleased(int x, int y, int button){ 112 | 113 | } 114 | 115 | //-------------------------------------------------------------- 116 | void ofApp::mouseEntered(int x, int y){ 117 | 118 | } 119 | 120 | //-------------------------------------------------------------- 121 | void ofApp::mouseExited(int x, int y){ 122 | 123 | } 124 | 125 | //-------------------------------------------------------------- 126 | void ofApp::windowResized(int w, int h){ 127 | 128 | } 129 | 130 | //-------------------------------------------------------------- 131 | void ofApp::gotMessage(ofMessage msg){ 132 | 133 | } 134 | 135 | //-------------------------------------------------------------- 136 | void ofApp::dragEvent(ofDragInfo dragInfo){ 137 | 138 | } 139 | -------------------------------------------------------------------------------- /example-pointcloud/src/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | #include "ofxAzureKinect.h" 6 | 7 | class ofApp 8 | : public ofBaseApp 9 | { 10 | public: 11 | void setup(); 12 | void exit(); 13 | 14 | void update(); 15 | void draw(); 16 | 17 | void keyPressed(int key); 18 | void keyReleased(int key); 19 | void mouseMoved(int x, int y); 20 | void mouseDragged(int x, int y, int button); 21 | void mousePressed(int x, int y, int button); 22 | void mouseReleased(int x, int y, int button); 23 | void mouseEntered(int x, int y); 24 | void mouseExited(int x, int y); 25 | void windowResized(int w, int h); 26 | void dragEvent(ofDragInfo dragInfo); 27 | void gotMessage(ofMessage msg); 28 | 29 | private: 30 | ofxAzureKinect::Device kinectDevice; 31 | 32 | ofEasyCam cam; 33 | }; 34 | -------------------------------------------------------------------------------- /example-record/addons.make: -------------------------------------------------------------------------------- 1 | ofxAzureKinect 2 | -------------------------------------------------------------------------------- /example-record/bin/data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prisonerjohn/ofxAzureKinect/0fd9db1fc6ef616482f285e42c933da3471b9d17/example-record/bin/data/.gitkeep -------------------------------------------------------------------------------- /example-record/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | int main() 4 | { 5 | ofGLFWWindowSettings settings; 6 | settings.setGLVersion(3, 2); 7 | settings.setSize(1280 + 360, 720); 8 | ofCreateWindow(settings); 9 | 10 | ofRunApp(new ofApp()); 11 | } 12 | -------------------------------------------------------------------------------- /example-record/src/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup() 5 | { 6 | //ofSetLogLevel(OF_LOG_VERBOSE); 7 | 8 | ofLogNotice(__FUNCTION__) << "Found " << ofxAzureKinect::Device::getInstalledCount() << " installed devices."; 9 | 10 | bRecord = false; 11 | bPlayback = false; 12 | filename = ""; 13 | 14 | openDevice(); 15 | } 16 | 17 | //-------------------------------------------------------------- 18 | void ofApp::exit() 19 | { 20 | kinectDevice.close(); 21 | kinectPlayback.close(); 22 | } 23 | 24 | //-------------------------------------------------------------- 25 | void ofApp::update() 26 | { 27 | if (kinectDevice.isFrameNew()) 28 | { 29 | fpsDevice.newFrame(); 30 | } 31 | if (kinectPlayback.isFrameNew()) 32 | { 33 | fpsPlayback.newFrame(); 34 | } 35 | } 36 | 37 | //-------------------------------------------------------------- 38 | void ofApp::draw() 39 | { 40 | ofBackground(128); 41 | 42 | if (bPlayback) 43 | { 44 | if (kinectPlayback.isStreaming()) 45 | { 46 | kinectPlayback.getColorTex().draw(0, 0, 1280, 720); 47 | kinectPlayback.getDepthTex().draw(1280, 0, 360, 360); 48 | kinectPlayback.getIrTex().draw(1280, 360, 360, 360); 49 | } 50 | } 51 | else 52 | { 53 | if (kinectDevice.isStreaming()) 54 | { 55 | kinectDevice.getColorTex().draw(0, 0, 1280, 720); 56 | kinectDevice.getDepthTex().draw(1280, 0, 360, 360); 57 | kinectDevice.getIrTex().draw(1280, 360, 360, 360); 58 | } 59 | } 60 | 61 | std::ostringstream oss; 62 | oss << std::fixed << std::setprecision(2) 63 | << (bPlayback ? "PLAYBACK" : "DEVICE") << std::endl 64 | << std::endl 65 | << "APP: " << ofGetFrameRate() << " FPS" << std::endl 66 | << "K4A: " << (bPlayback ? fpsPlayback.getFps() : fpsDevice.getFps()) << " FPS" << std::endl 67 | << std::endl 68 | << "[TAB] toggle mode" << std::endl 69 | << "[SPACE] " << (bPlayback ? "open file" : "toggle recording"); 70 | ofDrawBitmapStringHighlight(oss.str(), 10, 20); 71 | } 72 | 73 | //-------------------------------------------------------------- 74 | void ofApp::openDevice() 75 | { 76 | // Close the playback stream. 77 | closePlayback(); 78 | 79 | // Open and start the live device stream. 80 | if (kinectDevice.open()) 81 | { 82 | auto deviceSettings = ofxAzureKinect::DeviceSettings(); 83 | deviceSettings.colorResolution = ofxAzureKinect::ColorResolution::K4A_COLOR_RESOLUTION_720P; 84 | deviceSettings.syncImages = false; 85 | deviceSettings.updateWorld = false; 86 | kinectDevice.startCameras(deviceSettings); 87 | } 88 | } 89 | 90 | //-------------------------------------------------------------- 91 | void ofApp::closeDevice() 92 | { 93 | kinectDevice.close(); 94 | } 95 | 96 | //-------------------------------------------------------------- 97 | void ofApp::openPlayback() 98 | { 99 | // Select a video file to play. 100 | auto result = ofSystemLoadDialog("Select an MKV Kinect recorder file:"); 101 | if (result.bSuccess) 102 | { 103 | // Close the live device stream. 104 | closeDevice(); 105 | 106 | // Close the playback stream. 107 | closePlayback(); 108 | 109 | // Open and start the playback stream. 110 | filename = result.fileName; 111 | if (kinectPlayback.open(result.filePath)) 112 | { 113 | auto playbackSettings = ofxAzureKinect::PlaybackSettings(); 114 | kinectPlayback.startPlayback(playbackSettings); 115 | } 116 | else 117 | { 118 | ofLogError(__FUNCTION__) << "Could not open file " << filename; 119 | } 120 | } 121 | } 122 | 123 | //-------------------------------------------------------------- 124 | void ofApp::closePlayback() 125 | { 126 | kinectPlayback.close(); 127 | } 128 | 129 | //-------------------------------------------------------------- 130 | void ofApp::keyPressed(int key) 131 | { 132 | if (key == OF_KEY_TAB) 133 | { 134 | bPlayback = !bPlayback; 135 | bPlayback ? openPlayback() : openDevice(); 136 | bRecord = false; 137 | } 138 | if (key == ' ') 139 | { 140 | if (bPlayback) 141 | { 142 | openPlayback(); 143 | } 144 | else 145 | { 146 | bRecord = !bRecord; 147 | bRecord ? kinectDevice.startRecording() : kinectDevice.stopRecording(); 148 | } 149 | } 150 | } 151 | 152 | //-------------------------------------------------------------- 153 | void ofApp::keyReleased(int key){ 154 | 155 | } 156 | 157 | //-------------------------------------------------------------- 158 | void ofApp::mouseMoved(int x, int y ){ 159 | 160 | } 161 | 162 | //-------------------------------------------------------------- 163 | void ofApp::mouseDragged(int x, int y, int button){ 164 | 165 | } 166 | 167 | //-------------------------------------------------------------- 168 | void ofApp::mousePressed(int x, int y, int button){ 169 | 170 | } 171 | 172 | //-------------------------------------------------------------- 173 | void ofApp::mouseReleased(int x, int y, int button){ 174 | 175 | } 176 | 177 | //-------------------------------------------------------------- 178 | void ofApp::mouseEntered(int x, int y){ 179 | 180 | } 181 | 182 | //-------------------------------------------------------------- 183 | void ofApp::mouseExited(int x, int y){ 184 | 185 | } 186 | 187 | //-------------------------------------------------------------- 188 | void ofApp::windowResized(int w, int h){ 189 | 190 | } 191 | 192 | //-------------------------------------------------------------- 193 | void ofApp::gotMessage(ofMessage msg){ 194 | 195 | } 196 | 197 | //-------------------------------------------------------------- 198 | void ofApp::dragEvent(ofDragInfo dragInfo){ 199 | 200 | } 201 | -------------------------------------------------------------------------------- /example-record/src/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | #include "ofxAzureKinect.h" 6 | 7 | class ofApp 8 | : public ofBaseApp 9 | { 10 | public: 11 | void setup(); 12 | void exit(); 13 | 14 | void update(); 15 | void draw(); 16 | 17 | void openDevice(); 18 | void closeDevice(); 19 | 20 | void openPlayback(); 21 | void closePlayback(); 22 | 23 | void keyPressed(int key); 24 | void keyReleased(int key); 25 | void mouseMoved(int x, int y); 26 | void mouseDragged(int x, int y, int button); 27 | void mousePressed(int x, int y, int button); 28 | void mouseReleased(int x, int y, int button); 29 | void mouseEntered(int x, int y); 30 | void mouseExited(int x, int y); 31 | void windowResized(int w, int h); 32 | void dragEvent(ofDragInfo dragInfo); 33 | void gotMessage(ofMessage msg); 34 | 35 | private: 36 | ofxAzureKinect::Device kinectDevice; 37 | ofFpsCounter fpsDevice; 38 | 39 | ofxAzureKinect::Playback kinectPlayback; 40 | ofFpsCounter fpsPlayback; 41 | 42 | bool bRecord; 43 | bool bPlayback; 44 | std::string filename; 45 | }; 46 | -------------------------------------------------------------------------------- /example-scaled-depth/addons.make: -------------------------------------------------------------------------------- 1 | ofxAzureKinect 2 | ofxGui 3 | -------------------------------------------------------------------------------- /example-scaled-depth/config.make: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prisonerjohn/ofxAzureKinect/0fd9db1fc6ef616482f285e42c933da3471b9d17/example-scaled-depth/config.make -------------------------------------------------------------------------------- /example-scaled-depth/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | int main() 4 | { 5 | ofGLFWWindowSettings settings; 6 | settings.setGLVersion(3, 2); 7 | settings.setSize(512 * 3, 512); 8 | ofCreateWindow(settings); 9 | 10 | ofRunApp(new ofApp()); 11 | } 12 | -------------------------------------------------------------------------------- /example-scaled-depth/src/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup() 5 | { 6 | //ofSetLogLevel(OF_LOG_VERBOSE); 7 | 8 | ofLogNotice(__FUNCTION__) << "Found " << ofxAzureKinect::Device::getInstalledCount() << " installed devices."; 9 | 10 | nearClip.set("Near Clip", 0.25f, 0.0f, 8.0f); 11 | farClip.set("Far Clip", 3.0f, 0.0f, 8.0f); 12 | 13 | guiPanel.setup("Scaled Depth", "settings.json"); 14 | guiPanel.add(nearClip); 15 | guiPanel.add(farClip); 16 | 17 | if (kinectDevice.open()) 18 | { 19 | auto kinectSettings = ofxAzureKinect::DeviceSettings(); 20 | kinectSettings.syncImages = false; 21 | kinectSettings.updateColor = false; 22 | kinectSettings.updateWorld = false; 23 | kinectDevice.startCameras(kinectSettings); 24 | } 25 | } 26 | 27 | //-------------------------------------------------------------- 28 | void ofApp::exit() 29 | { 30 | kinectDevice.close(); 31 | } 32 | 33 | //-------------------------------------------------------------- 34 | void ofApp::update() 35 | { 36 | if (kinectDevice.isFrameNew()) 37 | { 38 | kinectFps.newFrame(); 39 | } 40 | } 41 | 42 | //-------------------------------------------------------------- 43 | void ofApp::draw() 44 | { 45 | ofBackground(128); 46 | 47 | if (kinectDevice.isStreaming()) 48 | { 49 | const auto& depthPix = kinectDevice.getDepthPix(); 50 | 51 | if (!scaledDepthImage.isAllocated()) 52 | { 53 | scaledDepthImage.allocate(depthPix.getWidth(), depthPix.getHeight(), depthPix.getImageType()); 54 | } 55 | if (!linearDepthImage.isAllocated()) 56 | { 57 | linearDepthImage.allocate(depthPix.getWidth(), depthPix.getHeight(), depthPix.getImageType()); 58 | } 59 | 60 | auto& scaledPix = scaledDepthImage.getPixels(); 61 | auto& linearPix = linearDepthImage.getPixels(); 62 | 63 | // Convert from meters to millimeters. 64 | const float nearClipMm = nearClip * 1000; 65 | const float farClipMm = farClip * 1000; 66 | 67 | for (int i = 0; i < depthPix.size(); ++i) 68 | { 69 | if (depthPix[i] > farClipMm) 70 | { 71 | scaledPix[i] = linearPix[i] = 0; 72 | } 73 | else 74 | { 75 | scaledPix[i] = ofMap(depthPix[i], nearClipMm, farClipMm, 0, 255, true); 76 | linearPix[i] = ofMap(depthPix[i], nearClipMm, farClipMm, 0.0f, 1.0f, true); 77 | } 78 | } 79 | 80 | scaledDepthImage.update(); 81 | linearDepthImage.update(); 82 | 83 | kinectDevice.getDepthTex().draw(0, 0, 512, 512); 84 | scaledDepthImage.draw(512, 0, 512, 512); 85 | linearDepthImage.draw(1024, 0, 512, 512); 86 | } 87 | 88 | guiPanel.draw(); 89 | } 90 | 91 | //-------------------------------------------------------------- 92 | void ofApp::keyPressed(int key){ 93 | 94 | } 95 | 96 | //-------------------------------------------------------------- 97 | void ofApp::keyReleased(int key){ 98 | 99 | } 100 | 101 | //-------------------------------------------------------------- 102 | void ofApp::mouseMoved(int x, int y ){ 103 | 104 | } 105 | 106 | //-------------------------------------------------------------- 107 | void ofApp::mouseDragged(int x, int y, int button){ 108 | 109 | } 110 | 111 | //-------------------------------------------------------------- 112 | void ofApp::mousePressed(int x, int y, int button){ 113 | 114 | } 115 | 116 | //-------------------------------------------------------------- 117 | void ofApp::mouseReleased(int x, int y, int button){ 118 | 119 | } 120 | 121 | //-------------------------------------------------------------- 122 | void ofApp::mouseEntered(int x, int y){ 123 | 124 | } 125 | 126 | //-------------------------------------------------------------- 127 | void ofApp::mouseExited(int x, int y){ 128 | 129 | } 130 | 131 | //-------------------------------------------------------------- 132 | void ofApp::windowResized(int w, int h){ 133 | 134 | } 135 | 136 | //-------------------------------------------------------------- 137 | void ofApp::gotMessage(ofMessage msg){ 138 | 139 | } 140 | 141 | //-------------------------------------------------------------- 142 | void ofApp::dragEvent(ofDragInfo dragInfo){ 143 | 144 | } 145 | -------------------------------------------------------------------------------- /example-scaled-depth/src/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | #include "ofxAzureKinect.h" 6 | #include "ofxGui.h" 7 | 8 | class ofApp 9 | : public ofBaseApp 10 | { 11 | public: 12 | void setup(); 13 | void exit(); 14 | 15 | void update(); 16 | void draw(); 17 | 18 | void keyPressed(int key); 19 | void keyReleased(int key); 20 | void mouseMoved(int x, int y); 21 | void mouseDragged(int x, int y, int button); 22 | void mousePressed(int x, int y, int button); 23 | void mouseReleased(int x, int y, int button); 24 | void mouseEntered(int x, int y); 25 | void mouseExited(int x, int y); 26 | void windowResized(int w, int h); 27 | void dragEvent(ofDragInfo dragInfo); 28 | void gotMessage(ofMessage msg); 29 | 30 | private: 31 | ofxAzureKinect::Device kinectDevice; 32 | ofFpsCounter kinectFps; 33 | 34 | ofParameter nearClip; 35 | ofParameter farClip; 36 | 37 | ofxPanel guiPanel; 38 | 39 | ofImage scaledDepthImage; 40 | ofFloatImage linearDepthImage; 41 | }; 42 | -------------------------------------------------------------------------------- /example-shader/addons.make: -------------------------------------------------------------------------------- 1 | ofxAzureKinect 2 | -------------------------------------------------------------------------------- /example-shader/bin/data/shaders/render.frag: -------------------------------------------------------------------------------- 1 | #version 150 2 | 3 | // Custom attributes. 4 | 5 | uniform sampler2DRect uColorTex; // Sampler for the color registered data 6 | 7 | in vec2 gTexCoord; 8 | 9 | out vec4 fragColor; 10 | 11 | void main() 12 | { 13 | fragColor = texture(uColorTex, gTexCoord); 14 | } -------------------------------------------------------------------------------- /example-shader/bin/data/shaders/render.geom: -------------------------------------------------------------------------------- 1 | #version 150 2 | 3 | layout (points) in; 4 | layout (triangle_strip) out; 5 | layout (max_vertices = 4) out; 6 | 7 | // OF handled uniforms and attributes. 8 | uniform mat4 projectionMatrix; 9 | 10 | // App specific uniforms and attributes. 11 | uniform float uSpriteSize; 12 | 13 | in vec4 vPosition[]; 14 | in vec2 vTexCoord[]; 15 | flat in int vValid[]; 16 | 17 | out vec2 gTexCoord; 18 | 19 | void main() 20 | { 21 | if (vValid[0] == 0) return; 22 | 23 | gTexCoord = vTexCoord[0]; 24 | 25 | for (int i = 0; i < gl_in.length(); ++i) 26 | { 27 | gl_Position = projectionMatrix * (vPosition[i] + vec4(1.0, -1.0, 0.0, 0.0) * uSpriteSize); 28 | EmitVertex(); 29 | 30 | gl_Position = projectionMatrix * (vPosition[i] + vec4(1.0, 1.0, 0.0, 0.0) * uSpriteSize); 31 | EmitVertex(); 32 | 33 | gl_Position = projectionMatrix * (vPosition[i] + vec4(-1.0, -1.0, 0.0, 0.0) * uSpriteSize); 34 | EmitVertex(); 35 | 36 | gl_Position = projectionMatrix * (vPosition[i] + vec4(-1.0, 1.0, 0.0, 0.0) * uSpriteSize); 37 | EmitVertex(); 38 | 39 | EndPrimitive(); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /example-shader/bin/data/shaders/render.vert: -------------------------------------------------------------------------------- 1 | #version 150 2 | 3 | // OF built-in attributes. 4 | 5 | uniform mat4 modelViewMatrix; 6 | 7 | // Custom attributes. 8 | 9 | uniform sampler2DRect uDepthTex; // Sampler for the depth space data 10 | uniform sampler2DRect uWorldTex; // Transformation from kinect depth/color space to kinect world space 11 | 12 | uniform ivec2 uFrameSize; 13 | 14 | out vec4 vPosition; 15 | out vec2 vTexCoord; 16 | flat out int vValid; 17 | 18 | void main() 19 | { 20 | vTexCoord = vec2(gl_InstanceID % uFrameSize.x, gl_InstanceID / uFrameSize.x); 21 | 22 | float depth = texture(uDepthTex, vTexCoord).x; 23 | vec4 ray = texture(uWorldTex, vTexCoord); 24 | 25 | vValid = (depth != 0 && ray.x != 0 && ray.y != 0) ? 1 : 0; 26 | 27 | vec4 posWorld = vec4(1); 28 | posWorld.z = depth * 65535.0; // Remap to float range. 29 | posWorld.x = ray.x * posWorld.z; 30 | posWorld.y = ray.y * posWorld.z; 31 | 32 | // Flip X as OpenGL and K4A have different conventions on which direction is positive. 33 | posWorld.x *= -1; 34 | 35 | vPosition = modelViewMatrix * posWorld; 36 | } -------------------------------------------------------------------------------- /example-shader/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | int main() 4 | { 5 | ofGLFWWindowSettings settings; 6 | settings.setGLVersion(3, 2); 7 | settings.setSize(1280, 720); 8 | ofCreateWindow(settings); 9 | 10 | ofRunApp(new ofApp()); 11 | } 12 | -------------------------------------------------------------------------------- /example-shader/src/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup() 5 | { 6 | //ofSetLogLevel(OF_LOG_VERBOSE); 7 | 8 | ofLogNotice(__FUNCTION__) << "Found " << ofxAzureKinect::Device::getInstalledCount() << " installed devices."; 9 | 10 | // Open Kinect. 11 | if (kinectDevice.open()) 12 | { 13 | auto kinectSettings = ofxAzureKinect::DeviceSettings(); 14 | kinectSettings.updateIr = false; 15 | kinectSettings.updateColor = true; 16 | kinectSettings.colorResolution = K4A_COLOR_RESOLUTION_1080P; 17 | kinectSettings.updateVbo = false; 18 | kinectDevice.startCameras(kinectSettings); 19 | } 20 | 21 | // Load shader. 22 | auto shaderSettings = ofShaderSettings(); 23 | shaderSettings.shaderFiles[GL_VERTEX_SHADER] = "shaders/render.vert"; 24 | shaderSettings.shaderFiles[GL_GEOMETRY_SHADER] = "shaders/render.geom"; 25 | shaderSettings.shaderFiles[GL_FRAGMENT_SHADER] = "shaders/render.frag"; 26 | shaderSettings.bindDefaults = true; 27 | if (shader.setup(shaderSettings)) 28 | { 29 | ofLogNotice(__FUNCTION__) << "Success loading shader!"; 30 | } 31 | 32 | // Setup vbo. 33 | std::vector verts(1); 34 | vbo.setVertexData(verts.data(), verts.size(), GL_STATIC_DRAW); 35 | 36 | pointSize = 3.0f; 37 | useColorSpace = false; 38 | } 39 | 40 | //-------------------------------------------------------------- 41 | void ofApp::exit() 42 | { 43 | kinectDevice.close(); 44 | } 45 | 46 | //-------------------------------------------------------------- 47 | void ofApp::update(){ 48 | 49 | } 50 | 51 | //-------------------------------------------------------------- 52 | void ofApp::draw() 53 | { 54 | ofBackground(0); 55 | 56 | if (kinectDevice.isStreaming()) 57 | { 58 | cam.begin(); 59 | { 60 | ofEnableDepthTest(); 61 | 62 | ofDrawAxis(100.0f); 63 | 64 | ofPushMatrix(); 65 | { 66 | ofRotateXDeg(180); 67 | 68 | shader.begin(); 69 | { 70 | shader.setUniform1f("uSpriteSize", pointSize); 71 | 72 | int numPoints; 73 | 74 | if (useColorSpace) 75 | { 76 | shader.setUniformTexture("uDepthTex", kinectDevice.getDepthInColorTex(), 1); 77 | shader.setUniformTexture("uWorldTex", kinectDevice.getColorToWorldTex(), 2); 78 | shader.setUniformTexture("uColorTex", kinectDevice.getColorTex(), 3); 79 | shader.setUniform2i("uFrameSize", kinectDevice.getColorTex().getWidth(), kinectDevice.getColorTex().getHeight()); 80 | 81 | numPoints = kinectDevice.getColorTex().getWidth() * kinectDevice.getColorTex().getHeight(); 82 | } 83 | else 84 | { 85 | shader.setUniformTexture("uDepthTex", kinectDevice.getDepthTex(), 1); 86 | shader.setUniformTexture("uWorldTex", kinectDevice.getDepthToWorldTex(), 2); 87 | shader.setUniformTexture("uColorTex", kinectDevice.getColorInDepthTex(), 3); 88 | shader.setUniform2i("uFrameSize", kinectDevice.getDepthTex().getWidth(), kinectDevice.getDepthTex().getHeight()); 89 | 90 | numPoints = kinectDevice.getDepthTex().getWidth() * kinectDevice.getDepthTex().getHeight(); 91 | } 92 | 93 | vbo.drawInstanced(GL_POINTS, 0, 1, numPoints); 94 | } 95 | shader.end(); 96 | } 97 | ofPopMatrix(); 98 | } 99 | cam.end(); 100 | } 101 | 102 | ofDrawBitmapStringHighlight(ofToString(ofGetFrameRate(), 2) + " FPS", 10, 20); 103 | } 104 | 105 | //-------------------------------------------------------------- 106 | void ofApp::keyPressed(int key) 107 | { 108 | if (key == OF_KEY_UP) 109 | { 110 | pointSize *= 2; 111 | } 112 | else if (key == OF_KEY_DOWN) 113 | { 114 | pointSize /= 2; 115 | } 116 | else if (key == ' ') 117 | { 118 | useColorSpace ^= 1; 119 | } 120 | } 121 | 122 | //-------------------------------------------------------------- 123 | void ofApp::keyReleased(int key){ 124 | 125 | } 126 | 127 | //-------------------------------------------------------------- 128 | void ofApp::mouseMoved(int x, int y ){ 129 | 130 | } 131 | 132 | //-------------------------------------------------------------- 133 | void ofApp::mouseDragged(int x, int y, int button){ 134 | 135 | } 136 | 137 | //-------------------------------------------------------------- 138 | void ofApp::mousePressed(int x, int y, int button){ 139 | 140 | } 141 | 142 | //-------------------------------------------------------------- 143 | void ofApp::mouseReleased(int x, int y, int button){ 144 | 145 | } 146 | 147 | //-------------------------------------------------------------- 148 | void ofApp::mouseEntered(int x, int y){ 149 | 150 | } 151 | 152 | //-------------------------------------------------------------- 153 | void ofApp::mouseExited(int x, int y){ 154 | 155 | } 156 | 157 | //-------------------------------------------------------------- 158 | void ofApp::windowResized(int w, int h){ 159 | 160 | } 161 | 162 | //-------------------------------------------------------------- 163 | void ofApp::gotMessage(ofMessage msg){ 164 | 165 | } 166 | 167 | //-------------------------------------------------------------- 168 | void ofApp::dragEvent(ofDragInfo dragInfo){ 169 | 170 | } 171 | -------------------------------------------------------------------------------- /example-shader/src/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | #include "ofxAzureKinect.h" 6 | 7 | class ofApp 8 | : public ofBaseApp 9 | { 10 | public: 11 | void setup(); 12 | void exit(); 13 | 14 | void update(); 15 | void draw(); 16 | 17 | void keyPressed(int key); 18 | void keyReleased(int key); 19 | void mouseMoved(int x, int y); 20 | void mouseDragged(int x, int y, int button); 21 | void mousePressed(int x, int y, int button); 22 | void mouseReleased(int x, int y, int button); 23 | void mouseEntered(int x, int y); 24 | void mouseExited(int x, int y); 25 | void windowResized(int w, int h); 26 | void dragEvent(ofDragInfo dragInfo); 27 | void gotMessage(ofMessage msg); 28 | 29 | private: 30 | ofxAzureKinect::Device kinectDevice; 31 | ofEasyCam cam; 32 | ofVbo vbo; 33 | ofShader shader; 34 | 35 | float pointSize; 36 | bool useColorSpace; 37 | }; 38 | -------------------------------------------------------------------------------- /example-streams/addons.make: -------------------------------------------------------------------------------- 1 | ofxAzureKinect 2 | -------------------------------------------------------------------------------- /example-streams/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | int main() 4 | { 5 | ofGLFWWindowSettings settings; 6 | settings.setGLVersion(3, 2); 7 | settings.setSize(1280 + 360, 720); 8 | ofCreateWindow(settings); 9 | 10 | ofRunApp(new ofApp()); 11 | } 12 | -------------------------------------------------------------------------------- /example-streams/src/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup() 5 | { 6 | //ofSetLogLevel(OF_LOG_VERBOSE); 7 | 8 | ofLogNotice(__FUNCTION__) << "Found " << ofxAzureKinect::Device::getInstalledCount() << " installed devices."; 9 | 10 | if (kinectDevice.open()) 11 | { 12 | auto kinectSettings = ofxAzureKinect::DeviceSettings(); 13 | kinectSettings.syncImages = false; 14 | kinectSettings.updateWorld = false; 15 | kinectDevice.startCameras(kinectSettings); 16 | } 17 | } 18 | 19 | //-------------------------------------------------------------- 20 | void ofApp::exit() 21 | { 22 | kinectDevice.close(); 23 | } 24 | 25 | //-------------------------------------------------------------- 26 | void ofApp::update() 27 | { 28 | if (kinectDevice.isFrameNew()) 29 | { 30 | kinectFps.newFrame(); 31 | } 32 | } 33 | 34 | //-------------------------------------------------------------- 35 | void ofApp::draw() 36 | { 37 | ofBackground(128); 38 | 39 | if (kinectDevice.isStreaming()) 40 | { 41 | kinectDevice.getColorTex().draw(0, 0, 1280, 720); 42 | kinectDevice.getDepthTex().draw(1280, 0, 360, 360); 43 | kinectDevice.getIrTex().draw(1280, 360, 360, 360); 44 | } 45 | 46 | std::ostringstream oss; 47 | oss << std::fixed << std::setprecision(2) 48 | << "APP: " << ofGetFrameRate() << " FPS" << std::endl 49 | << "K4A: " << kinectFps.getFps() << " FPS"; 50 | ofDrawBitmapStringHighlight(oss.str(), 10, 20); 51 | } 52 | 53 | //-------------------------------------------------------------- 54 | void ofApp::keyPressed(int key){ 55 | 56 | } 57 | 58 | //-------------------------------------------------------------- 59 | void ofApp::keyReleased(int key){ 60 | 61 | } 62 | 63 | //-------------------------------------------------------------- 64 | void ofApp::mouseMoved(int x, int y ){ 65 | 66 | } 67 | 68 | //-------------------------------------------------------------- 69 | void ofApp::mouseDragged(int x, int y, int button){ 70 | 71 | } 72 | 73 | //-------------------------------------------------------------- 74 | void ofApp::mousePressed(int x, int y, int button){ 75 | 76 | } 77 | 78 | //-------------------------------------------------------------- 79 | void ofApp::mouseReleased(int x, int y, int button){ 80 | 81 | } 82 | 83 | //-------------------------------------------------------------- 84 | void ofApp::mouseEntered(int x, int y){ 85 | 86 | } 87 | 88 | //-------------------------------------------------------------- 89 | void ofApp::mouseExited(int x, int y){ 90 | 91 | } 92 | 93 | //-------------------------------------------------------------- 94 | void ofApp::windowResized(int w, int h){ 95 | 96 | } 97 | 98 | //-------------------------------------------------------------- 99 | void ofApp::gotMessage(ofMessage msg){ 100 | 101 | } 102 | 103 | //-------------------------------------------------------------- 104 | void ofApp::dragEvent(ofDragInfo dragInfo){ 105 | 106 | } 107 | -------------------------------------------------------------------------------- /example-streams/src/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | #include "ofxAzureKinect.h" 6 | 7 | class ofApp 8 | : public ofBaseApp 9 | { 10 | public: 11 | void setup(); 12 | void exit(); 13 | 14 | void update(); 15 | void draw(); 16 | 17 | void keyPressed(int key); 18 | void keyReleased(int key); 19 | void mouseMoved(int x, int y); 20 | void mouseDragged(int x, int y, int button); 21 | void mousePressed(int x, int y, int button); 22 | void mouseReleased(int x, int y, int button); 23 | void mouseEntered(int x, int y); 24 | void mouseExited(int x, int y); 25 | void windowResized(int w, int h); 26 | void dragEvent(ofDragInfo dragInfo); 27 | void gotMessage(ofMessage msg); 28 | 29 | private: 30 | ofxAzureKinect::Device kinectDevice; 31 | ofFpsCounter kinectFps; 32 | }; 33 | -------------------------------------------------------------------------------- /example-world-coord/addons.make: -------------------------------------------------------------------------------- 1 | ofxAzureKinect 2 | -------------------------------------------------------------------------------- /example-world-coord/config.make: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prisonerjohn/ofxAzureKinect/0fd9db1fc6ef616482f285e42c933da3471b9d17/example-world-coord/config.make -------------------------------------------------------------------------------- /example-world-coord/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | int main() 4 | { 5 | ofGLFWWindowSettings settings; 6 | settings.setGLVersion(3, 2); 7 | settings.setSize(512, 512); 8 | ofCreateWindow(settings); 9 | 10 | ofRunApp(new ofApp()); 11 | } 12 | -------------------------------------------------------------------------------- /example-world-coord/src/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup() 5 | { 6 | //ofSetLogLevel(OF_LOG_VERBOSE); 7 | 8 | ofLogNotice(__FUNCTION__) << "Found " << ofxAzureKinect::Device::getInstalledCount() << " installed devices."; 9 | 10 | if (kinectDevice.open()) 11 | { 12 | auto kinectSettings = ofxAzureKinect::DeviceSettings(); 13 | kinectDevice.startCameras(kinectSettings); 14 | } 15 | } 16 | 17 | //-------------------------------------------------------------- 18 | void ofApp::exit() 19 | { 20 | kinectDevice.close(); 21 | } 22 | 23 | //-------------------------------------------------------------- 24 | void ofApp::update() 25 | { 26 | if (kinectDevice.isFrameNew()) 27 | { 28 | kinectFps.newFrame(); 29 | } 30 | } 31 | 32 | //-------------------------------------------------------------- 33 | void ofApp::draw() 34 | { 35 | ofBackground(128); 36 | 37 | if (kinectDevice.isStreaming() && kinectDevice.getDepthTex().isAllocated()) 38 | { 39 | kinectDevice.getDepthTex().draw(0, 0); 40 | kinectDevice.getColorInDepthTex().draw(0, 0); 41 | 42 | glm::vec3 worldCoord = getWorldCoordinate(mouseX, mouseY); 43 | ofDrawBitmapStringHighlight(ofToString(worldCoord), ofGetMouseX() + 16, ofGetMouseY() + 10); 44 | } 45 | 46 | std::ostringstream oss; 47 | oss << std::fixed << std::setprecision(2) 48 | << "APP: " << ofGetFrameRate() << " FPS" << std::endl 49 | << "K4A: " << kinectFps.getFps() << " FPS"; 50 | ofDrawBitmapStringHighlight(oss.str(), 10, 20); 51 | } 52 | 53 | //-------------------------------------------------------------- 54 | glm::vec3 ofApp::getWorldCoordinate(int x, int y) 55 | { 56 | const auto& depthPixels = kinectDevice.getDepthPix(); 57 | const auto depthData = depthPixels.getData(); 58 | const auto& depthToWorldPixels = kinectDevice.getDepthToWorldPix(); 59 | const auto depthToWorldData = depthToWorldPixels.getData(); 60 | 61 | int sampleX = ofClamp(x, 0, depthPixels.getWidth() - 1); 62 | int sampleY = ofClamp(y, 0, depthPixels.getHeight() - 1); 63 | int idx = sampleY * depthPixels.getWidth() + sampleX; 64 | 65 | if (depthData[idx] != 0 && 66 | depthToWorldData[idx * 2 + 0] != 0 && depthToWorldData[idx * 2 + 1] != 0) 67 | { 68 | float depthVal = static_cast(depthData[idx]); 69 | return glm::vec3( 70 | depthToWorldData[idx * 2 + 0] * depthVal, 71 | depthToWorldData[idx * 2 + 1] * depthVal, 72 | depthVal 73 | ); 74 | } 75 | } 76 | 77 | //-------------------------------------------------------------- 78 | void ofApp::keyPressed(int key){ 79 | 80 | } 81 | 82 | //-------------------------------------------------------------- 83 | void ofApp::keyReleased(int key){ 84 | 85 | } 86 | 87 | //-------------------------------------------------------------- 88 | void ofApp::mouseMoved(int x, int y ){ 89 | 90 | } 91 | 92 | //-------------------------------------------------------------- 93 | void ofApp::mouseDragged(int x, int y, int button){ 94 | 95 | } 96 | 97 | //-------------------------------------------------------------- 98 | void ofApp::mousePressed(int x, int y, int button){ 99 | 100 | } 101 | 102 | //-------------------------------------------------------------- 103 | void ofApp::mouseReleased(int x, int y, int button){ 104 | 105 | } 106 | 107 | //-------------------------------------------------------------- 108 | void ofApp::mouseEntered(int x, int y){ 109 | 110 | } 111 | 112 | //-------------------------------------------------------------- 113 | void ofApp::mouseExited(int x, int y){ 114 | 115 | } 116 | 117 | //-------------------------------------------------------------- 118 | void ofApp::windowResized(int w, int h){ 119 | 120 | } 121 | 122 | //-------------------------------------------------------------- 123 | void ofApp::gotMessage(ofMessage msg){ 124 | 125 | } 126 | 127 | //-------------------------------------------------------------- 128 | void ofApp::dragEvent(ofDragInfo dragInfo){ 129 | 130 | } 131 | -------------------------------------------------------------------------------- /example-world-coord/src/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | #include "ofxAzureKinect.h" 6 | 7 | class ofApp 8 | : public ofBaseApp 9 | { 10 | public: 11 | void setup(); 12 | void exit(); 13 | 14 | void update(); 15 | void draw(); 16 | 17 | void keyPressed(int key); 18 | void keyReleased(int key); 19 | void mouseMoved(int x, int y); 20 | void mouseDragged(int x, int y, int button); 21 | void mousePressed(int x, int y, int button); 22 | void mouseReleased(int x, int y, int button); 23 | void mouseEntered(int x, int y); 24 | void mouseExited(int x, int y); 25 | void windowResized(int w, int h); 26 | void dragEvent(ofDragInfo dragInfo); 27 | void gotMessage(ofMessage msg); 28 | 29 | glm::vec3 getWorldCoordinate(int x, int y); 30 | 31 | private: 32 | ofxAzureKinect::Device kinectDevice; 33 | ofFpsCounter kinectFps; 34 | 35 | }; 36 | -------------------------------------------------------------------------------- /libs/turbojpeg/include/turbojpeg.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C)2009-2015, 2017 D. R. Commander. All Rights Reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are met: 6 | * 7 | * - Redistributions of source code must retain the above copyright notice, 8 | * this list of conditions and the following disclaimer. 9 | * - Redistributions in binary form must reproduce the above copyright notice, 10 | * this list of conditions and the following disclaimer in the documentation 11 | * and/or other materials provided with the distribution. 12 | * - Neither the name of the libjpeg-turbo Project nor the names of its 13 | * contributors may be used to endorse or promote products derived from this 14 | * software without specific prior written permission. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS", 17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE 20 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 | * POSSIBILITY OF SUCH DAMAGE. 27 | */ 28 | 29 | #ifndef __TURBOJPEG_H__ 30 | #define __TURBOJPEG_H__ 31 | 32 | #if defined(_WIN32) && defined(DLLDEFINE) 33 | #define DLLEXPORT __declspec(dllexport) 34 | #else 35 | #define DLLEXPORT 36 | #endif 37 | #define DLLCALL 38 | 39 | 40 | /** 41 | * @addtogroup TurboJPEG 42 | * TurboJPEG API. This API provides an interface for generating, decoding, and 43 | * transforming planar YUV and JPEG images in memory. 44 | * 45 | * @anchor YUVnotes 46 | * YUV Image Format Notes 47 | * ---------------------- 48 | * Technically, the JPEG format uses the YCbCr colorspace (which is technically 49 | * not a colorspace but a color transform), but per the convention of the 50 | * digital video community, the TurboJPEG API uses "YUV" to refer to an image 51 | * format consisting of Y, Cb, and Cr image planes. 52 | * 53 | * Each plane is simply a 2D array of bytes, each byte representing the value 54 | * of one of the components (Y, Cb, or Cr) at a particular location in the 55 | * image. The width and height of each plane are determined by the image 56 | * width, height, and level of chrominance subsampling. The luminance plane 57 | * width is the image width padded to the nearest multiple of the horizontal 58 | * subsampling factor (2 in the case of 4:2:0 and 4:2:2, 4 in the case of 59 | * 4:1:1, 1 in the case of 4:4:4 or grayscale.) Similarly, the luminance plane 60 | * height is the image height padded to the nearest multiple of the vertical 61 | * subsampling factor (2 in the case of 4:2:0 or 4:4:0, 1 in the case of 4:4:4 62 | * or grayscale.) This is irrespective of any additional padding that may be 63 | * specified as an argument to the various YUV functions. The chrominance 64 | * plane width is equal to the luminance plane width divided by the horizontal 65 | * subsampling factor, and the chrominance plane height is equal to the 66 | * luminance plane height divided by the vertical subsampling factor. 67 | * 68 | * For example, if the source image is 35 x 35 pixels and 4:2:2 subsampling is 69 | * used, then the luminance plane would be 36 x 35 bytes, and each of the 70 | * chrominance planes would be 18 x 35 bytes. If you specify a line padding of 71 | * 4 bytes on top of this, then the luminance plane would be 36 x 35 bytes, and 72 | * each of the chrominance planes would be 20 x 35 bytes. 73 | * 74 | * @{ 75 | */ 76 | 77 | 78 | /** 79 | * The number of chrominance subsampling options 80 | */ 81 | #define TJ_NUMSAMP 6 82 | 83 | /** 84 | * Chrominance subsampling options. 85 | * When pixels are converted from RGB to YCbCr (see #TJCS_YCbCr) or from CMYK 86 | * to YCCK (see #TJCS_YCCK) as part of the JPEG compression process, some of 87 | * the Cb and Cr (chrominance) components can be discarded or averaged together 88 | * to produce a smaller image with little perceptible loss of image clarity 89 | * (the human eye is more sensitive to small changes in brightness than to 90 | * small changes in color.) This is called "chrominance subsampling". 91 | */ 92 | enum TJSAMP { 93 | /** 94 | * 4:4:4 chrominance subsampling (no chrominance subsampling). The JPEG or 95 | * YUV image will contain one chrominance component for every pixel in the 96 | * source image. 97 | */ 98 | TJSAMP_444 = 0, 99 | /** 100 | * 4:2:2 chrominance subsampling. The JPEG or YUV image will contain one 101 | * chrominance component for every 2x1 block of pixels in the source image. 102 | */ 103 | TJSAMP_422, 104 | /** 105 | * 4:2:0 chrominance subsampling. The JPEG or YUV image will contain one 106 | * chrominance component for every 2x2 block of pixels in the source image. 107 | */ 108 | TJSAMP_420, 109 | /** 110 | * Grayscale. The JPEG or YUV image will contain no chrominance components. 111 | */ 112 | TJSAMP_GRAY, 113 | /** 114 | * 4:4:0 chrominance subsampling. The JPEG or YUV image will contain one 115 | * chrominance component for every 1x2 block of pixels in the source image. 116 | * 117 | * @note 4:4:0 subsampling is not fully accelerated in libjpeg-turbo. 118 | */ 119 | TJSAMP_440, 120 | /** 121 | * 4:1:1 chrominance subsampling. The JPEG or YUV image will contain one 122 | * chrominance component for every 4x1 block of pixels in the source image. 123 | * JPEG images compressed with 4:1:1 subsampling will be almost exactly the 124 | * same size as those compressed with 4:2:0 subsampling, and in the 125 | * aggregate, both subsampling methods produce approximately the same 126 | * perceptual quality. However, 4:1:1 is better able to reproduce sharp 127 | * horizontal features. 128 | * 129 | * @note 4:1:1 subsampling is not fully accelerated in libjpeg-turbo. 130 | */ 131 | TJSAMP_411 132 | }; 133 | 134 | /** 135 | * MCU block width (in pixels) for a given level of chrominance subsampling. 136 | * MCU block sizes: 137 | * - 8x8 for no subsampling or grayscale 138 | * - 16x8 for 4:2:2 139 | * - 8x16 for 4:4:0 140 | * - 16x16 for 4:2:0 141 | * - 32x8 for 4:1:1 142 | */ 143 | static const int tjMCUWidth[TJ_NUMSAMP] = { 8, 16, 16, 8, 8, 32 }; 144 | 145 | /** 146 | * MCU block height (in pixels) for a given level of chrominance subsampling. 147 | * MCU block sizes: 148 | * - 8x8 for no subsampling or grayscale 149 | * - 16x8 for 4:2:2 150 | * - 8x16 for 4:4:0 151 | * - 16x16 for 4:2:0 152 | * - 32x8 for 4:1:1 153 | */ 154 | static const int tjMCUHeight[TJ_NUMSAMP] = { 8, 8, 16, 8, 16, 8 }; 155 | 156 | 157 | /** 158 | * The number of pixel formats 159 | */ 160 | #define TJ_NUMPF 12 161 | 162 | /** 163 | * Pixel formats 164 | */ 165 | enum TJPF { 166 | /** 167 | * RGB pixel format. The red, green, and blue components in the image are 168 | * stored in 3-byte pixels in the order R, G, B from lowest to highest byte 169 | * address within each pixel. 170 | */ 171 | TJPF_RGB = 0, 172 | /** 173 | * BGR pixel format. The red, green, and blue components in the image are 174 | * stored in 3-byte pixels in the order B, G, R from lowest to highest byte 175 | * address within each pixel. 176 | */ 177 | TJPF_BGR, 178 | /** 179 | * RGBX pixel format. The red, green, and blue components in the image are 180 | * stored in 4-byte pixels in the order R, G, B from lowest to highest byte 181 | * address within each pixel. The X component is ignored when compressing 182 | * and undefined when decompressing. 183 | */ 184 | TJPF_RGBX, 185 | /** 186 | * BGRX pixel format. The red, green, and blue components in the image are 187 | * stored in 4-byte pixels in the order B, G, R from lowest to highest byte 188 | * address within each pixel. The X component is ignored when compressing 189 | * and undefined when decompressing. 190 | */ 191 | TJPF_BGRX, 192 | /** 193 | * XBGR pixel format. The red, green, and blue components in the image are 194 | * stored in 4-byte pixels in the order R, G, B from highest to lowest byte 195 | * address within each pixel. The X component is ignored when compressing 196 | * and undefined when decompressing. 197 | */ 198 | TJPF_XBGR, 199 | /** 200 | * XRGB pixel format. The red, green, and blue components in the image are 201 | * stored in 4-byte pixels in the order B, G, R from highest to lowest byte 202 | * address within each pixel. The X component is ignored when compressing 203 | * and undefined when decompressing. 204 | */ 205 | TJPF_XRGB, 206 | /** 207 | * Grayscale pixel format. Each 1-byte pixel represents a luminance 208 | * (brightness) level from 0 to 255. 209 | */ 210 | TJPF_GRAY, 211 | /** 212 | * RGBA pixel format. This is the same as @ref TJPF_RGBX, except that when 213 | * decompressing, the X component is guaranteed to be 0xFF, which can be 214 | * interpreted as an opaque alpha channel. 215 | */ 216 | TJPF_RGBA, 217 | /** 218 | * BGRA pixel format. This is the same as @ref TJPF_BGRX, except that when 219 | * decompressing, the X component is guaranteed to be 0xFF, which can be 220 | * interpreted as an opaque alpha channel. 221 | */ 222 | TJPF_BGRA, 223 | /** 224 | * ABGR pixel format. This is the same as @ref TJPF_XBGR, except that when 225 | * decompressing, the X component is guaranteed to be 0xFF, which can be 226 | * interpreted as an opaque alpha channel. 227 | */ 228 | TJPF_ABGR, 229 | /** 230 | * ARGB pixel format. This is the same as @ref TJPF_XRGB, except that when 231 | * decompressing, the X component is guaranteed to be 0xFF, which can be 232 | * interpreted as an opaque alpha channel. 233 | */ 234 | TJPF_ARGB, 235 | /** 236 | * CMYK pixel format. Unlike RGB, which is an additive color model used 237 | * primarily for display, CMYK (Cyan/Magenta/Yellow/Key) is a subtractive 238 | * color model used primarily for printing. In the CMYK color model, the 239 | * value of each color component typically corresponds to an amount of cyan, 240 | * magenta, yellow, or black ink that is applied to a white background. In 241 | * order to convert between CMYK and RGB, it is necessary to use a color 242 | * management system (CMS.) A CMS will attempt to map colors within the 243 | * printer's gamut to perceptually similar colors in the display's gamut and 244 | * vice versa, but the mapping is typically not 1:1 or reversible, nor can it 245 | * be defined with a simple formula. Thus, such a conversion is out of scope 246 | * for a codec library. However, the TurboJPEG API allows for compressing 247 | * CMYK pixels into a YCCK JPEG image (see #TJCS_YCCK) and decompressing YCCK 248 | * JPEG images into CMYK pixels. 249 | */ 250 | TJPF_CMYK, 251 | /** 252 | * Unknown pixel format. Currently this is only used by #tjLoadImage(). 253 | */ 254 | TJPF_UNKNOWN = -1 255 | }; 256 | 257 | /** 258 | * Red offset (in bytes) for a given pixel format. This specifies the number 259 | * of bytes that the red component is offset from the start of the pixel. For 260 | * instance, if a pixel of format TJ_BGRX is stored in char pixel[], 261 | * then the red component will be pixel[tjRedOffset[TJ_BGRX]]. This 262 | * will be -1 if the pixel format does not have a red component. 263 | */ 264 | static const int tjRedOffset[TJ_NUMPF] = { 265 | 0, 2, 0, 2, 3, 1, -1, 0, 2, 3, 1, -1 266 | }; 267 | /** 268 | * Green offset (in bytes) for a given pixel format. This specifies the number 269 | * of bytes that the green component is offset from the start of the pixel. 270 | * For instance, if a pixel of format TJ_BGRX is stored in 271 | * char pixel[], then the green component will be 272 | * pixel[tjGreenOffset[TJ_BGRX]]. This will be -1 if the pixel format 273 | * does not have a green component. 274 | */ 275 | static const int tjGreenOffset[TJ_NUMPF] = { 276 | 1, 1, 1, 1, 2, 2, -1, 1, 1, 2, 2, -1 277 | }; 278 | /** 279 | * Blue offset (in bytes) for a given pixel format. This specifies the number 280 | * of bytes that the Blue component is offset from the start of the pixel. For 281 | * instance, if a pixel of format TJ_BGRX is stored in char pixel[], 282 | * then the blue component will be pixel[tjBlueOffset[TJ_BGRX]]. This 283 | * will be -1 if the pixel format does not have a blue component. 284 | */ 285 | static const int tjBlueOffset[TJ_NUMPF] = { 286 | 2, 0, 2, 0, 1, 3, -1, 2, 0, 1, 3, -1 287 | }; 288 | /** 289 | * Alpha offset (in bytes) for a given pixel format. This specifies the number 290 | * of bytes that the Alpha component is offset from the start of the pixel. 291 | * For instance, if a pixel of format TJ_BGRA is stored in 292 | * char pixel[], then the alpha component will be 293 | * pixel[tjAlphaOffset[TJ_BGRA]]. This will be -1 if the pixel format 294 | * does not have an alpha component. 295 | */ 296 | static const int tjAlphaOffset[TJ_NUMPF] = { 297 | -1, -1, -1, -1, -1, -1, -1, 3, 3, 0, 0, -1 298 | }; 299 | /** 300 | * Pixel size (in bytes) for a given pixel format 301 | */ 302 | static const int tjPixelSize[TJ_NUMPF] = { 303 | 3, 3, 4, 4, 4, 4, 1, 4, 4, 4, 4, 4 304 | }; 305 | 306 | 307 | /** 308 | * The number of JPEG colorspaces 309 | */ 310 | #define TJ_NUMCS 5 311 | 312 | /** 313 | * JPEG colorspaces 314 | */ 315 | enum TJCS { 316 | /** 317 | * RGB colorspace. When compressing the JPEG image, the R, G, and B 318 | * components in the source image are reordered into image planes, but no 319 | * colorspace conversion or subsampling is performed. RGB JPEG images can be 320 | * decompressed to any of the extended RGB pixel formats or grayscale, but 321 | * they cannot be decompressed to YUV images. 322 | */ 323 | TJCS_RGB = 0, 324 | /** 325 | * YCbCr colorspace. YCbCr is not an absolute colorspace but rather a 326 | * mathematical transformation of RGB designed solely for storage and 327 | * transmission. YCbCr images must be converted to RGB before they can 328 | * actually be displayed. In the YCbCr colorspace, the Y (luminance) 329 | * component represents the black & white portion of the original image, and 330 | * the Cb and Cr (chrominance) components represent the color portion of the 331 | * original image. Originally, the analog equivalent of this transformation 332 | * allowed the same signal to drive both black & white and color televisions, 333 | * but JPEG images use YCbCr primarily because it allows the color data to be 334 | * optionally subsampled for the purposes of reducing bandwidth or disk 335 | * space. YCbCr is the most common JPEG colorspace, and YCbCr JPEG images 336 | * can be compressed from and decompressed to any of the extended RGB pixel 337 | * formats or grayscale, or they can be decompressed to YUV planar images. 338 | */ 339 | TJCS_YCbCr, 340 | /** 341 | * Grayscale colorspace. The JPEG image retains only the luminance data (Y 342 | * component), and any color data from the source image is discarded. 343 | * Grayscale JPEG images can be compressed from and decompressed to any of 344 | * the extended RGB pixel formats or grayscale, or they can be decompressed 345 | * to YUV planar images. 346 | */ 347 | TJCS_GRAY, 348 | /** 349 | * CMYK colorspace. When compressing the JPEG image, the C, M, Y, and K 350 | * components in the source image are reordered into image planes, but no 351 | * colorspace conversion or subsampling is performed. CMYK JPEG images can 352 | * only be decompressed to CMYK pixels. 353 | */ 354 | TJCS_CMYK, 355 | /** 356 | * YCCK colorspace. YCCK (AKA "YCbCrK") is not an absolute colorspace but 357 | * rather a mathematical transformation of CMYK designed solely for storage 358 | * and transmission. It is to CMYK as YCbCr is to RGB. CMYK pixels can be 359 | * reversibly transformed into YCCK, and as with YCbCr, the chrominance 360 | * components in the YCCK pixels can be subsampled without incurring major 361 | * perceptual loss. YCCK JPEG images can only be compressed from and 362 | * decompressed to CMYK pixels. 363 | */ 364 | TJCS_YCCK 365 | }; 366 | 367 | 368 | /** 369 | * The uncompressed source/destination image is stored in bottom-up (Windows, 370 | * OpenGL) order, not top-down (X11) order. 371 | */ 372 | #define TJFLAG_BOTTOMUP 2 373 | /** 374 | * When decompressing an image that was compressed using chrominance 375 | * subsampling, use the fastest chrominance upsampling algorithm available in 376 | * the underlying codec. The default is to use smooth upsampling, which 377 | * creates a smooth transition between neighboring chrominance components in 378 | * order to reduce upsampling artifacts in the decompressed image. 379 | */ 380 | #define TJFLAG_FASTUPSAMPLE 256 381 | /** 382 | * Disable buffer (re)allocation. If passed to one of the JPEG compression or 383 | * transform functions, this flag will cause those functions to generate an 384 | * error if the JPEG image buffer is invalid or too small rather than 385 | * attempting to allocate or reallocate that buffer. This reproduces the 386 | * behavior of earlier versions of TurboJPEG. 387 | */ 388 | #define TJFLAG_NOREALLOC 1024 389 | /** 390 | * Use the fastest DCT/IDCT algorithm available in the underlying codec. The 391 | * default if this flag is not specified is implementation-specific. For 392 | * example, the implementation of TurboJPEG for libjpeg[-turbo] uses the fast 393 | * algorithm by default when compressing, because this has been shown to have 394 | * only a very slight effect on accuracy, but it uses the accurate algorithm 395 | * when decompressing, because this has been shown to have a larger effect. 396 | */ 397 | #define TJFLAG_FASTDCT 2048 398 | /** 399 | * Use the most accurate DCT/IDCT algorithm available in the underlying codec. 400 | * The default if this flag is not specified is implementation-specific. For 401 | * example, the implementation of TurboJPEG for libjpeg[-turbo] uses the fast 402 | * algorithm by default when compressing, because this has been shown to have 403 | * only a very slight effect on accuracy, but it uses the accurate algorithm 404 | * when decompressing, because this has been shown to have a larger effect. 405 | */ 406 | #define TJFLAG_ACCURATEDCT 4096 407 | /** 408 | * Immediately discontinue the current compression/decompression/transform 409 | * operation if the underlying codec throws a warning (non-fatal error). The 410 | * default behavior is to allow the operation to complete unless a fatal error 411 | * is encountered. 412 | */ 413 | #define TJFLAG_STOPONWARNING 8192 414 | /** 415 | * Use progressive entropy coding in JPEG images generated by the compression 416 | * and transform functions. Progressive entropy coding will generally improve 417 | * compression relative to baseline entropy coding (the default), but it will 418 | * reduce compression and decompression performance considerably. 419 | */ 420 | #define TJFLAG_PROGRESSIVE 16384 421 | 422 | 423 | /** 424 | * The number of error codes 425 | */ 426 | #define TJ_NUMERR 2 427 | 428 | /** 429 | * Error codes 430 | */ 431 | enum TJERR { 432 | /** 433 | * The error was non-fatal and recoverable, but the image may still be 434 | * corrupt. 435 | */ 436 | TJERR_WARNING = 0, 437 | /** 438 | * The error was fatal and non-recoverable. 439 | */ 440 | TJERR_FATAL 441 | }; 442 | 443 | 444 | /** 445 | * The number of transform operations 446 | */ 447 | #define TJ_NUMXOP 8 448 | 449 | /** 450 | * Transform operations for #tjTransform() 451 | */ 452 | enum TJXOP { 453 | /** 454 | * Do not transform the position of the image pixels 455 | */ 456 | TJXOP_NONE = 0, 457 | /** 458 | * Flip (mirror) image horizontally. This transform is imperfect if there 459 | * are any partial MCU blocks on the right edge (see #TJXOPT_PERFECT.) 460 | */ 461 | TJXOP_HFLIP, 462 | /** 463 | * Flip (mirror) image vertically. This transform is imperfect if there are 464 | * any partial MCU blocks on the bottom edge (see #TJXOPT_PERFECT.) 465 | */ 466 | TJXOP_VFLIP, 467 | /** 468 | * Transpose image (flip/mirror along upper left to lower right axis.) This 469 | * transform is always perfect. 470 | */ 471 | TJXOP_TRANSPOSE, 472 | /** 473 | * Transverse transpose image (flip/mirror along upper right to lower left 474 | * axis.) This transform is imperfect if there are any partial MCU blocks in 475 | * the image (see #TJXOPT_PERFECT.) 476 | */ 477 | TJXOP_TRANSVERSE, 478 | /** 479 | * Rotate image clockwise by 90 degrees. This transform is imperfect if 480 | * there are any partial MCU blocks on the bottom edge (see 481 | * #TJXOPT_PERFECT.) 482 | */ 483 | TJXOP_ROT90, 484 | /** 485 | * Rotate image 180 degrees. This transform is imperfect if there are any 486 | * partial MCU blocks in the image (see #TJXOPT_PERFECT.) 487 | */ 488 | TJXOP_ROT180, 489 | /** 490 | * Rotate image counter-clockwise by 90 degrees. This transform is imperfect 491 | * if there are any partial MCU blocks on the right edge (see 492 | * #TJXOPT_PERFECT.) 493 | */ 494 | TJXOP_ROT270 495 | }; 496 | 497 | 498 | /** 499 | * This option will cause #tjTransform() to return an error if the transform is 500 | * not perfect. Lossless transforms operate on MCU blocks, whose size depends 501 | * on the level of chrominance subsampling used (see #tjMCUWidth 502 | * and #tjMCUHeight.) If the image's width or height is not evenly divisible 503 | * by the MCU block size, then there will be partial MCU blocks on the right 504 | * and/or bottom edges. It is not possible to move these partial MCU blocks to 505 | * the top or left of the image, so any transform that would require that is 506 | * "imperfect." If this option is not specified, then any partial MCU blocks 507 | * that cannot be transformed will be left in place, which will create 508 | * odd-looking strips on the right or bottom edge of the image. 509 | */ 510 | #define TJXOPT_PERFECT 1 511 | /** 512 | * This option will cause #tjTransform() to discard any partial MCU blocks that 513 | * cannot be transformed. 514 | */ 515 | #define TJXOPT_TRIM 2 516 | /** 517 | * This option will enable lossless cropping. See #tjTransform() for more 518 | * information. 519 | */ 520 | #define TJXOPT_CROP 4 521 | /** 522 | * This option will discard the color data in the input image and produce 523 | * a grayscale output image. 524 | */ 525 | #define TJXOPT_GRAY 8 526 | /** 527 | * This option will prevent #tjTransform() from outputting a JPEG image for 528 | * this particular transform (this can be used in conjunction with a custom 529 | * filter to capture the transformed DCT coefficients without transcoding 530 | * them.) 531 | */ 532 | #define TJXOPT_NOOUTPUT 16 533 | /** 534 | * This option will enable progressive entropy coding in the output image 535 | * generated by this particular transform. Progressive entropy coding will 536 | * generally improve compression relative to baseline entropy coding (the 537 | * default), but it will reduce compression and decompression performance 538 | * considerably. 539 | */ 540 | #define TJXOPT_PROGRESSIVE 32 541 | /** 542 | * This option will prevent #tjTransform() from copying any extra markers 543 | * (including EXIF and ICC profile data) from the source image to the output 544 | * image. 545 | */ 546 | #define TJXOPT_COPYNONE 64 547 | 548 | 549 | /** 550 | * Scaling factor 551 | */ 552 | typedef struct { 553 | /** 554 | * Numerator 555 | */ 556 | int num; 557 | /** 558 | * Denominator 559 | */ 560 | int denom; 561 | } tjscalingfactor; 562 | 563 | /** 564 | * Cropping region 565 | */ 566 | typedef struct { 567 | /** 568 | * The left boundary of the cropping region. This must be evenly divisible 569 | * by the MCU block width (see #tjMCUWidth.) 570 | */ 571 | int x; 572 | /** 573 | * The upper boundary of the cropping region. This must be evenly divisible 574 | * by the MCU block height (see #tjMCUHeight.) 575 | */ 576 | int y; 577 | /** 578 | * The width of the cropping region. Setting this to 0 is the equivalent of 579 | * setting it to the width of the source JPEG image - x. 580 | */ 581 | int w; 582 | /** 583 | * The height of the cropping region. Setting this to 0 is the equivalent of 584 | * setting it to the height of the source JPEG image - y. 585 | */ 586 | int h; 587 | } tjregion; 588 | 589 | /** 590 | * Lossless transform 591 | */ 592 | typedef struct tjtransform { 593 | /** 594 | * Cropping region 595 | */ 596 | tjregion r; 597 | /** 598 | * One of the @ref TJXOP "transform operations" 599 | */ 600 | int op; 601 | /** 602 | * The bitwise OR of one of more of the @ref TJXOPT_CROP "transform options" 603 | */ 604 | int options; 605 | /** 606 | * Arbitrary data that can be accessed within the body of the callback 607 | * function 608 | */ 609 | void *data; 610 | /** 611 | * A callback function that can be used to modify the DCT coefficients 612 | * after they are losslessly transformed but before they are transcoded to a 613 | * new JPEG image. This allows for custom filters or other transformations 614 | * to be applied in the frequency domain. 615 | * 616 | * @param coeffs pointer to an array of transformed DCT coefficients. (NOTE: 617 | * this pointer is not guaranteed to be valid once the callback returns, so 618 | * applications wishing to hand off the DCT coefficients to another function 619 | * or library should make a copy of them within the body of the callback.) 620 | * 621 | * @param arrayRegion #tjregion structure containing the width and height of 622 | * the array pointed to by coeffs as well as its offset relative to 623 | * the component plane. TurboJPEG implementations may choose to split each 624 | * component plane into multiple DCT coefficient arrays and call the callback 625 | * function once for each array. 626 | * 627 | * @param planeRegion #tjregion structure containing the width and height of 628 | * the component plane to which coeffs belongs 629 | * 630 | * @param componentID ID number of the component plane to which 631 | * coeffs belongs (Y, Cb, and Cr have, respectively, ID's of 0, 1, 632 | * and 2 in typical JPEG images.) 633 | * 634 | * @param transformID ID number of the transformed image to which 635 | * coeffs belongs. This is the same as the index of the transform 636 | * in the transforms array that was passed to #tjTransform(). 637 | * 638 | * @param transform a pointer to a #tjtransform structure that specifies the 639 | * parameters and/or cropping region for this transform 640 | * 641 | * @return 0 if the callback was successful, or -1 if an error occurred. 642 | */ 643 | int (*customFilter) (short *coeffs, tjregion arrayRegion, 644 | tjregion planeRegion, int componentIndex, 645 | int transformIndex, struct tjtransform *transform); 646 | } tjtransform; 647 | 648 | /** 649 | * TurboJPEG instance handle 650 | */ 651 | typedef void *tjhandle; 652 | 653 | 654 | /** 655 | * Pad the given width to the nearest 32-bit boundary 656 | */ 657 | #define TJPAD(width) (((width) + 3) & (~3)) 658 | 659 | /** 660 | * Compute the scaled value of dimension using the given scaling 661 | * factor. This macro performs the integer equivalent of ceil(dimension * 662 | * scalingFactor). 663 | */ 664 | #define TJSCALED(dimension, scalingFactor) \ 665 | ((dimension * scalingFactor.num + scalingFactor.denom - 1) / \ 666 | scalingFactor.denom) 667 | 668 | 669 | #ifdef __cplusplus 670 | extern "C" { 671 | #endif 672 | 673 | 674 | /** 675 | * Create a TurboJPEG compressor instance. 676 | * 677 | * @return a handle to the newly-created instance, or NULL if an error 678 | * occurred (see #tjGetErrorStr2().) 679 | */ 680 | DLLEXPORT tjhandle tjInitCompress(void); 681 | 682 | 683 | /** 684 | * Compress an RGB, grayscale, or CMYK image into a JPEG image. 685 | * 686 | * @param handle a handle to a TurboJPEG compressor or transformer instance 687 | * 688 | * @param srcBuf pointer to an image buffer containing RGB, grayscale, or 689 | * CMYK pixels to be compressed 690 | * 691 | * @param width width (in pixels) of the source image 692 | * 693 | * @param pitch bytes per line in the source image. Normally, this should be 694 | * width * #tjPixelSize[pixelFormat] if the image is unpadded, or 695 | * #TJPAD(width * #tjPixelSize[pixelFormat]) if each line of the image 696 | * is padded to the nearest 32-bit boundary, as is the case for Windows 697 | * bitmaps. You can also be clever and use this parameter to skip lines, etc. 698 | * Setting this parameter to 0 is the equivalent of setting it to 699 | * width * #tjPixelSize[pixelFormat]. 700 | * 701 | * @param height height (in pixels) of the source image 702 | * 703 | * @param pixelFormat pixel format of the source image (see @ref TJPF 704 | * "Pixel formats".) 705 | * 706 | * @param jpegBuf address of a pointer to an image buffer that will receive the 707 | * JPEG image. TurboJPEG has the ability to reallocate the JPEG buffer 708 | * to accommodate the size of the JPEG image. Thus, you can choose to: 709 | * -# pre-allocate the JPEG buffer with an arbitrary size using #tjAlloc() and 710 | * let TurboJPEG grow the buffer as needed, 711 | * -# set *jpegBuf to NULL to tell TurboJPEG to allocate the buffer 712 | * for you, or 713 | * -# pre-allocate the buffer to a "worst case" size determined by calling 714 | * #tjBufSize(). This should ensure that the buffer never has to be 715 | * re-allocated (setting #TJFLAG_NOREALLOC guarantees that it won't be.) 716 | * . 717 | * If you choose option 1, *jpegSize should be set to the size of your 718 | * pre-allocated buffer. In any case, unless you have set #TJFLAG_NOREALLOC, 719 | * you should always check *jpegBuf upon return from this function, as 720 | * it may have changed. 721 | * 722 | * @param jpegSize pointer to an unsigned long variable that holds the size of 723 | * the JPEG image buffer. If *jpegBuf points to a pre-allocated 724 | * buffer, then *jpegSize should be set to the size of the buffer. 725 | * Upon return, *jpegSize will contain the size of the JPEG image (in 726 | * bytes.) If *jpegBuf points to a JPEG image buffer that is being 727 | * reused from a previous call to one of the JPEG compression functions, then 728 | * *jpegSize is ignored. 729 | * 730 | * @param jpegSubsamp the level of chrominance subsampling to be used when 731 | * generating the JPEG image (see @ref TJSAMP 732 | * "Chrominance subsampling options".) 733 | * 734 | * @param jpegQual the image quality of the generated JPEG image (1 = worst, 735 | * 100 = best) 736 | * 737 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_ACCURATEDCT 738 | * "flags" 739 | * 740 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 741 | * and #tjGetErrorCode().) 742 | */ 743 | DLLEXPORT int tjCompress2(tjhandle handle, const unsigned char *srcBuf, 744 | int width, int pitch, int height, int pixelFormat, 745 | unsigned char **jpegBuf, unsigned long *jpegSize, 746 | int jpegSubsamp, int jpegQual, int flags); 747 | 748 | 749 | /** 750 | * Compress a YUV planar image into a JPEG image. 751 | * 752 | * @param handle a handle to a TurboJPEG compressor or transformer instance 753 | * 754 | * @param srcBuf pointer to an image buffer containing a YUV planar image to be 755 | * compressed. The size of this buffer should match the value returned by 756 | * #tjBufSizeYUV2() for the given image width, height, padding, and level of 757 | * chrominance subsampling. The Y, U (Cb), and V (Cr) image planes should be 758 | * stored sequentially in the source buffer (refer to @ref YUVnotes 759 | * "YUV Image Format Notes".) 760 | * 761 | * @param width width (in pixels) of the source image. If the width is not an 762 | * even multiple of the MCU block width (see #tjMCUWidth), then an intermediate 763 | * buffer copy will be performed within TurboJPEG. 764 | * 765 | * @param pad the line padding used in the source image. For instance, if each 766 | * line in each plane of the YUV image is padded to the nearest multiple of 4 767 | * bytes, then pad should be set to 4. 768 | * 769 | * @param height height (in pixels) of the source image. If the height is not 770 | * an even multiple of the MCU block height (see #tjMCUHeight), then an 771 | * intermediate buffer copy will be performed within TurboJPEG. 772 | * 773 | * @param subsamp the level of chrominance subsampling used in the source 774 | * image (see @ref TJSAMP "Chrominance subsampling options".) 775 | * 776 | * @param jpegBuf address of a pointer to an image buffer that will receive the 777 | * JPEG image. TurboJPEG has the ability to reallocate the JPEG buffer to 778 | * accommodate the size of the JPEG image. Thus, you can choose to: 779 | * -# pre-allocate the JPEG buffer with an arbitrary size using #tjAlloc() and 780 | * let TurboJPEG grow the buffer as needed, 781 | * -# set *jpegBuf to NULL to tell TurboJPEG to allocate the buffer 782 | * for you, or 783 | * -# pre-allocate the buffer to a "worst case" size determined by calling 784 | * #tjBufSize(). This should ensure that the buffer never has to be 785 | * re-allocated (setting #TJFLAG_NOREALLOC guarantees that it won't be.) 786 | * . 787 | * If you choose option 1, *jpegSize should be set to the size of your 788 | * pre-allocated buffer. In any case, unless you have set #TJFLAG_NOREALLOC, 789 | * you should always check *jpegBuf upon return from this function, as 790 | * it may have changed. 791 | * 792 | * @param jpegSize pointer to an unsigned long variable that holds the size of 793 | * the JPEG image buffer. If *jpegBuf points to a pre-allocated 794 | * buffer, then *jpegSize should be set to the size of the buffer. 795 | * Upon return, *jpegSize will contain the size of the JPEG image (in 796 | * bytes.) If *jpegBuf points to a JPEG image buffer that is being 797 | * reused from a previous call to one of the JPEG compression functions, then 798 | * *jpegSize is ignored. 799 | * 800 | * @param jpegQual the image quality of the generated JPEG image (1 = worst, 801 | * 100 = best) 802 | * 803 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_ACCURATEDCT 804 | * "flags" 805 | * 806 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 807 | * and #tjGetErrorCode().) 808 | */ 809 | DLLEXPORT int tjCompressFromYUV(tjhandle handle, const unsigned char *srcBuf, 810 | int width, int pad, int height, int subsamp, 811 | unsigned char **jpegBuf, 812 | unsigned long *jpegSize, int jpegQual, 813 | int flags); 814 | 815 | 816 | /** 817 | * Compress a set of Y, U (Cb), and V (Cr) image planes into a JPEG image. 818 | * 819 | * @param handle a handle to a TurboJPEG compressor or transformer instance 820 | * 821 | * @param srcPlanes an array of pointers to Y, U (Cb), and V (Cr) image planes 822 | * (or just a Y plane, if compressing a grayscale image) that contain a YUV 823 | * image to be compressed. These planes can be contiguous or non-contiguous in 824 | * memory. The size of each plane should match the value returned by 825 | * #tjPlaneSizeYUV() for the given image width, height, strides, and level of 826 | * chrominance subsampling. Refer to @ref YUVnotes "YUV Image Format Notes" 827 | * for more details. 828 | * 829 | * @param width width (in pixels) of the source image. If the width is not an 830 | * even multiple of the MCU block width (see #tjMCUWidth), then an intermediate 831 | * buffer copy will be performed within TurboJPEG. 832 | * 833 | * @param strides an array of integers, each specifying the number of bytes per 834 | * line in the corresponding plane of the YUV source image. Setting the stride 835 | * for any plane to 0 is the same as setting it to the plane width (see 836 | * @ref YUVnotes "YUV Image Format Notes".) If strides is NULL, then 837 | * the strides for all planes will be set to their respective plane widths. 838 | * You can adjust the strides in order to specify an arbitrary amount of line 839 | * padding in each plane or to create a JPEG image from a subregion of a larger 840 | * YUV planar image. 841 | * 842 | * @param height height (in pixels) of the source image. If the height is not 843 | * an even multiple of the MCU block height (see #tjMCUHeight), then an 844 | * intermediate buffer copy will be performed within TurboJPEG. 845 | * 846 | * @param subsamp the level of chrominance subsampling used in the source 847 | * image (see @ref TJSAMP "Chrominance subsampling options".) 848 | * 849 | * @param jpegBuf address of a pointer to an image buffer that will receive the 850 | * JPEG image. TurboJPEG has the ability to reallocate the JPEG buffer to 851 | * accommodate the size of the JPEG image. Thus, you can choose to: 852 | * -# pre-allocate the JPEG buffer with an arbitrary size using #tjAlloc() and 853 | * let TurboJPEG grow the buffer as needed, 854 | * -# set *jpegBuf to NULL to tell TurboJPEG to allocate the buffer 855 | * for you, or 856 | * -# pre-allocate the buffer to a "worst case" size determined by calling 857 | * #tjBufSize(). This should ensure that the buffer never has to be 858 | * re-allocated (setting #TJFLAG_NOREALLOC guarantees that it won't be.) 859 | * . 860 | * If you choose option 1, *jpegSize should be set to the size of your 861 | * pre-allocated buffer. In any case, unless you have set #TJFLAG_NOREALLOC, 862 | * you should always check *jpegBuf upon return from this function, as 863 | * it may have changed. 864 | * 865 | * @param jpegSize pointer to an unsigned long variable that holds the size of 866 | * the JPEG image buffer. If *jpegBuf points to a pre-allocated 867 | * buffer, then *jpegSize should be set to the size of the buffer. 868 | * Upon return, *jpegSize will contain the size of the JPEG image (in 869 | * bytes.) If *jpegBuf points to a JPEG image buffer that is being 870 | * reused from a previous call to one of the JPEG compression functions, then 871 | * *jpegSize is ignored. 872 | * 873 | * @param jpegQual the image quality of the generated JPEG image (1 = worst, 874 | * 100 = best) 875 | * 876 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_ACCURATEDCT 877 | * "flags" 878 | * 879 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 880 | * and #tjGetErrorCode().) 881 | */ 882 | DLLEXPORT int tjCompressFromYUVPlanes(tjhandle handle, 883 | const unsigned char **srcPlanes, 884 | int width, const int *strides, 885 | int height, int subsamp, 886 | unsigned char **jpegBuf, 887 | unsigned long *jpegSize, int jpegQual, 888 | int flags); 889 | 890 | 891 | /** 892 | * The maximum size of the buffer (in bytes) required to hold a JPEG image with 893 | * the given parameters. The number of bytes returned by this function is 894 | * larger than the size of the uncompressed source image. The reason for this 895 | * is that the JPEG format uses 16-bit coefficients, and it is thus possible 896 | * for a very high-quality JPEG image with very high-frequency content to 897 | * expand rather than compress when converted to the JPEG format. Such images 898 | * represent a very rare corner case, but since there is no way to predict the 899 | * size of a JPEG image prior to compression, the corner case has to be 900 | * handled. 901 | * 902 | * @param width width (in pixels) of the image 903 | * 904 | * @param height height (in pixels) of the image 905 | * 906 | * @param jpegSubsamp the level of chrominance subsampling to be used when 907 | * generating the JPEG image (see @ref TJSAMP 908 | * "Chrominance subsampling options".) 909 | * 910 | * @return the maximum size of the buffer (in bytes) required to hold the 911 | * image, or -1 if the arguments are out of bounds. 912 | */ 913 | DLLEXPORT unsigned long tjBufSize(int width, int height, int jpegSubsamp); 914 | 915 | 916 | /** 917 | * The size of the buffer (in bytes) required to hold a YUV planar image with 918 | * the given parameters. 919 | * 920 | * @param width width (in pixels) of the image 921 | * 922 | * @param pad the width of each line in each plane of the image is padded to 923 | * the nearest multiple of this number of bytes (must be a power of 2.) 924 | * 925 | * @param height height (in pixels) of the image 926 | * 927 | * @param subsamp level of chrominance subsampling in the image (see 928 | * @ref TJSAMP "Chrominance subsampling options".) 929 | * 930 | * @return the size of the buffer (in bytes) required to hold the image, or 931 | * -1 if the arguments are out of bounds. 932 | */ 933 | DLLEXPORT unsigned long tjBufSizeYUV2(int width, int pad, int height, 934 | int subsamp); 935 | 936 | 937 | /** 938 | * The size of the buffer (in bytes) required to hold a YUV image plane with 939 | * the given parameters. 940 | * 941 | * @param componentID ID number of the image plane (0 = Y, 1 = U/Cb, 2 = V/Cr) 942 | * 943 | * @param width width (in pixels) of the YUV image. NOTE: this is the width of 944 | * the whole image, not the plane width. 945 | * 946 | * @param stride bytes per line in the image plane. Setting this to 0 is the 947 | * equivalent of setting it to the plane width. 948 | * 949 | * @param height height (in pixels) of the YUV image. NOTE: this is the height 950 | * of the whole image, not the plane height. 951 | * 952 | * @param subsamp level of chrominance subsampling in the image (see 953 | * @ref TJSAMP "Chrominance subsampling options".) 954 | * 955 | * @return the size of the buffer (in bytes) required to hold the YUV image 956 | * plane, or -1 if the arguments are out of bounds. 957 | */ 958 | DLLEXPORT unsigned long tjPlaneSizeYUV(int componentID, int width, int stride, 959 | int height, int subsamp); 960 | 961 | 962 | /** 963 | * The plane width of a YUV image plane with the given parameters. Refer to 964 | * @ref YUVnotes "YUV Image Format Notes" for a description of plane width. 965 | * 966 | * @param componentID ID number of the image plane (0 = Y, 1 = U/Cb, 2 = V/Cr) 967 | * 968 | * @param width width (in pixels) of the YUV image 969 | * 970 | * @param subsamp level of chrominance subsampling in the image (see 971 | * @ref TJSAMP "Chrominance subsampling options".) 972 | * 973 | * @return the plane width of a YUV image plane with the given parameters, or 974 | * -1 if the arguments are out of bounds. 975 | */ 976 | DLLEXPORT int tjPlaneWidth(int componentID, int width, int subsamp); 977 | 978 | 979 | /** 980 | * The plane height of a YUV image plane with the given parameters. Refer to 981 | * @ref YUVnotes "YUV Image Format Notes" for a description of plane height. 982 | * 983 | * @param componentID ID number of the image plane (0 = Y, 1 = U/Cb, 2 = V/Cr) 984 | * 985 | * @param height height (in pixels) of the YUV image 986 | * 987 | * @param subsamp level of chrominance subsampling in the image (see 988 | * @ref TJSAMP "Chrominance subsampling options".) 989 | * 990 | * @return the plane height of a YUV image plane with the given parameters, or 991 | * -1 if the arguments are out of bounds. 992 | */ 993 | DLLEXPORT int tjPlaneHeight(int componentID, int height, int subsamp); 994 | 995 | 996 | /** 997 | * Encode an RGB or grayscale image into a YUV planar image. This function 998 | * uses the accelerated color conversion routines in the underlying 999 | * codec but does not execute any of the other steps in the JPEG compression 1000 | * process. 1001 | * 1002 | * @param handle a handle to a TurboJPEG compressor or transformer instance 1003 | * 1004 | * @param srcBuf pointer to an image buffer containing RGB or grayscale pixels 1005 | * to be encoded 1006 | * 1007 | * @param width width (in pixels) of the source image 1008 | * 1009 | * @param pitch bytes per line in the source image. Normally, this should be 1010 | * width * #tjPixelSize[pixelFormat] if the image is unpadded, or 1011 | * #TJPAD(width * #tjPixelSize[pixelFormat]) if each line of the image 1012 | * is padded to the nearest 32-bit boundary, as is the case for Windows 1013 | * bitmaps. You can also be clever and use this parameter to skip lines, etc. 1014 | * Setting this parameter to 0 is the equivalent of setting it to 1015 | * width * #tjPixelSize[pixelFormat]. 1016 | * 1017 | * @param height height (in pixels) of the source image 1018 | * 1019 | * @param pixelFormat pixel format of the source image (see @ref TJPF 1020 | * "Pixel formats".) 1021 | * 1022 | * @param dstBuf pointer to an image buffer that will receive the YUV image. 1023 | * Use #tjBufSizeYUV2() to determine the appropriate size for this buffer based 1024 | * on the image width, height, padding, and level of chrominance subsampling. 1025 | * The Y, U (Cb), and V (Cr) image planes will be stored sequentially in the 1026 | * buffer (refer to @ref YUVnotes "YUV Image Format Notes".) 1027 | * 1028 | * @param pad the width of each line in each plane of the YUV image will be 1029 | * padded to the nearest multiple of this number of bytes (must be a power of 1030 | * 2.) To generate images suitable for X Video, pad should be set to 1031 | * 4. 1032 | * 1033 | * @param subsamp the level of chrominance subsampling to be used when 1034 | * generating the YUV image (see @ref TJSAMP 1035 | * "Chrominance subsampling options".) To generate images suitable for X 1036 | * Video, subsamp should be set to @ref TJSAMP_420. This produces an 1037 | * image compatible with the I420 (AKA "YUV420P") format. 1038 | * 1039 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_ACCURATEDCT 1040 | * "flags" 1041 | * 1042 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 1043 | * and #tjGetErrorCode().) 1044 | */ 1045 | DLLEXPORT int tjEncodeYUV3(tjhandle handle, const unsigned char *srcBuf, 1046 | int width, int pitch, int height, int pixelFormat, 1047 | unsigned char *dstBuf, int pad, int subsamp, 1048 | int flags); 1049 | 1050 | 1051 | /** 1052 | * Encode an RGB or grayscale image into separate Y, U (Cb), and V (Cr) image 1053 | * planes. This function uses the accelerated color conversion routines in the 1054 | * underlying codec but does not execute any of the other steps in the JPEG 1055 | * compression process. 1056 | * 1057 | * @param handle a handle to a TurboJPEG compressor or transformer instance 1058 | * 1059 | * @param srcBuf pointer to an image buffer containing RGB or grayscale pixels 1060 | * to be encoded 1061 | * 1062 | * @param width width (in pixels) of the source image 1063 | * 1064 | * @param pitch bytes per line in the source image. Normally, this should be 1065 | * width * #tjPixelSize[pixelFormat] if the image is unpadded, or 1066 | * #TJPAD(width * #tjPixelSize[pixelFormat]) if each line of the image 1067 | * is padded to the nearest 32-bit boundary, as is the case for Windows 1068 | * bitmaps. You can also be clever and use this parameter to skip lines, etc. 1069 | * Setting this parameter to 0 is the equivalent of setting it to 1070 | * width * #tjPixelSize[pixelFormat]. 1071 | * 1072 | * @param height height (in pixels) of the source image 1073 | * 1074 | * @param pixelFormat pixel format of the source image (see @ref TJPF 1075 | * "Pixel formats".) 1076 | * 1077 | * @param dstPlanes an array of pointers to Y, U (Cb), and V (Cr) image planes 1078 | * (or just a Y plane, if generating a grayscale image) that will receive the 1079 | * encoded image. These planes can be contiguous or non-contiguous in memory. 1080 | * Use #tjPlaneSizeYUV() to determine the appropriate size for each plane based 1081 | * on the image width, height, strides, and level of chrominance subsampling. 1082 | * Refer to @ref YUVnotes "YUV Image Format Notes" for more details. 1083 | * 1084 | * @param strides an array of integers, each specifying the number of bytes per 1085 | * line in the corresponding plane of the output image. Setting the stride for 1086 | * any plane to 0 is the same as setting it to the plane width (see 1087 | * @ref YUVnotes "YUV Image Format Notes".) If strides is NULL, then 1088 | * the strides for all planes will be set to their respective plane widths. 1089 | * You can adjust the strides in order to add an arbitrary amount of line 1090 | * padding to each plane or to encode an RGB or grayscale image into a 1091 | * subregion of a larger YUV planar image. 1092 | * 1093 | * @param subsamp the level of chrominance subsampling to be used when 1094 | * generating the YUV image (see @ref TJSAMP 1095 | * "Chrominance subsampling options".) To generate images suitable for X 1096 | * Video, subsamp should be set to @ref TJSAMP_420. This produces an 1097 | * image compatible with the I420 (AKA "YUV420P") format. 1098 | * 1099 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_ACCURATEDCT 1100 | * "flags" 1101 | * 1102 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 1103 | * and #tjGetErrorCode().) 1104 | */ 1105 | DLLEXPORT int tjEncodeYUVPlanes(tjhandle handle, const unsigned char *srcBuf, 1106 | int width, int pitch, int height, 1107 | int pixelFormat, unsigned char **dstPlanes, 1108 | int *strides, int subsamp, int flags); 1109 | 1110 | 1111 | /** 1112 | * Create a TurboJPEG decompressor instance. 1113 | * 1114 | * @return a handle to the newly-created instance, or NULL if an error 1115 | * occurred (see #tjGetErrorStr2().) 1116 | */ 1117 | DLLEXPORT tjhandle tjInitDecompress(void); 1118 | 1119 | 1120 | /** 1121 | * Retrieve information about a JPEG image without decompressing it. 1122 | * 1123 | * @param handle a handle to a TurboJPEG decompressor or transformer instance 1124 | * 1125 | * @param jpegBuf pointer to a buffer containing a JPEG image 1126 | * 1127 | * @param jpegSize size of the JPEG image (in bytes) 1128 | * 1129 | * @param width pointer to an integer variable that will receive the width (in 1130 | * pixels) of the JPEG image 1131 | * 1132 | * @param height pointer to an integer variable that will receive the height 1133 | * (in pixels) of the JPEG image 1134 | * 1135 | * @param jpegSubsamp pointer to an integer variable that will receive the 1136 | * level of chrominance subsampling used when the JPEG image was compressed 1137 | * (see @ref TJSAMP "Chrominance subsampling options".) 1138 | * 1139 | * @param jpegColorspace pointer to an integer variable that will receive one 1140 | * of the JPEG colorspace constants, indicating the colorspace of the JPEG 1141 | * image (see @ref TJCS "JPEG colorspaces".) 1142 | * 1143 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 1144 | * and #tjGetErrorCode().) 1145 | */ 1146 | DLLEXPORT int tjDecompressHeader3(tjhandle handle, 1147 | const unsigned char *jpegBuf, 1148 | unsigned long jpegSize, int *width, 1149 | int *height, int *jpegSubsamp, 1150 | int *jpegColorspace); 1151 | 1152 | 1153 | /** 1154 | * Returns a list of fractional scaling factors that the JPEG decompressor in 1155 | * this implementation of TurboJPEG supports. 1156 | * 1157 | * @param numscalingfactors pointer to an integer variable that will receive 1158 | * the number of elements in the list 1159 | * 1160 | * @return a pointer to a list of fractional scaling factors, or NULL if an 1161 | * error is encountered (see #tjGetErrorStr2().) 1162 | */ 1163 | DLLEXPORT tjscalingfactor *tjGetScalingFactors(int *numscalingfactors); 1164 | 1165 | 1166 | /** 1167 | * Decompress a JPEG image to an RGB, grayscale, or CMYK image. 1168 | * 1169 | * @param handle a handle to a TurboJPEG decompressor or transformer instance 1170 | * 1171 | * @param jpegBuf pointer to a buffer containing the JPEG image to decompress 1172 | * 1173 | * @param jpegSize size of the JPEG image (in bytes) 1174 | * 1175 | * @param dstBuf pointer to an image buffer that will receive the decompressed 1176 | * image. This buffer should normally be pitch * scaledHeight bytes 1177 | * in size, where scaledHeight can be determined by calling 1178 | * #TJSCALED() with the JPEG image height and one of the scaling factors 1179 | * returned by #tjGetScalingFactors(). The dstBuf pointer may also be 1180 | * used to decompress into a specific region of a larger buffer. 1181 | * 1182 | * @param width desired width (in pixels) of the destination image. If this is 1183 | * different than the width of the JPEG image being decompressed, then 1184 | * TurboJPEG will use scaling in the JPEG decompressor to generate the largest 1185 | * possible image that will fit within the desired width. If width is 1186 | * set to 0, then only the height will be considered when determining the 1187 | * scaled image size. 1188 | * 1189 | * @param pitch bytes per line in the destination image. Normally, this is 1190 | * scaledWidth * #tjPixelSize[pixelFormat] if the decompressed image 1191 | * is unpadded, else #TJPAD(scaledWidth * #tjPixelSize[pixelFormat]) 1192 | * if each line of the decompressed image is padded to the nearest 32-bit 1193 | * boundary, as is the case for Windows bitmaps. (NOTE: scaledWidth 1194 | * can be determined by calling #TJSCALED() with the JPEG image width and one 1195 | * of the scaling factors returned by #tjGetScalingFactors().) You can also be 1196 | * clever and use the pitch parameter to skip lines, etc. Setting this 1197 | * parameter to 0 is the equivalent of setting it to 1198 | * scaledWidth * #tjPixelSize[pixelFormat]. 1199 | * 1200 | * @param height desired height (in pixels) of the destination image. If this 1201 | * is different than the height of the JPEG image being decompressed, then 1202 | * TurboJPEG will use scaling in the JPEG decompressor to generate the largest 1203 | * possible image that will fit within the desired height. If height 1204 | * is set to 0, then only the width will be considered when determining the 1205 | * scaled image size. 1206 | * 1207 | * @param pixelFormat pixel format of the destination image (see @ref 1208 | * TJPF "Pixel formats".) 1209 | * 1210 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_ACCURATEDCT 1211 | * "flags" 1212 | * 1213 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 1214 | * and #tjGetErrorCode().) 1215 | */ 1216 | DLLEXPORT int tjDecompress2(tjhandle handle, const unsigned char *jpegBuf, 1217 | unsigned long jpegSize, unsigned char *dstBuf, 1218 | int width, int pitch, int height, int pixelFormat, 1219 | int flags); 1220 | 1221 | 1222 | /** 1223 | * Decompress a JPEG image to a YUV planar image. This function performs JPEG 1224 | * decompression but leaves out the color conversion step, so a planar YUV 1225 | * image is generated instead of an RGB image. 1226 | * 1227 | * @param handle a handle to a TurboJPEG decompressor or transformer instance 1228 | * 1229 | * @param jpegBuf pointer to a buffer containing the JPEG image to decompress 1230 | * 1231 | * @param jpegSize size of the JPEG image (in bytes) 1232 | * 1233 | * @param dstBuf pointer to an image buffer that will receive the YUV image. 1234 | * Use #tjBufSizeYUV2() to determine the appropriate size for this buffer based 1235 | * on the image width, height, padding, and level of subsampling. The Y, 1236 | * U (Cb), and V (Cr) image planes will be stored sequentially in the buffer 1237 | * (refer to @ref YUVnotes "YUV Image Format Notes".) 1238 | * 1239 | * @param width desired width (in pixels) of the YUV image. If this is 1240 | * different than the width of the JPEG image being decompressed, then 1241 | * TurboJPEG will use scaling in the JPEG decompressor to generate the largest 1242 | * possible image that will fit within the desired width. If width is 1243 | * set to 0, then only the height will be considered when determining the 1244 | * scaled image size. If the scaled width is not an even multiple of the MCU 1245 | * block width (see #tjMCUWidth), then an intermediate buffer copy will be 1246 | * performed within TurboJPEG. 1247 | * 1248 | * @param pad the width of each line in each plane of the YUV image will be 1249 | * padded to the nearest multiple of this number of bytes (must be a power of 1250 | * 2.) To generate images suitable for X Video, pad should be set to 1251 | * 4. 1252 | * 1253 | * @param height desired height (in pixels) of the YUV image. If this is 1254 | * different than the height of the JPEG image being decompressed, then 1255 | * TurboJPEG will use scaling in the JPEG decompressor to generate the largest 1256 | * possible image that will fit within the desired height. If height 1257 | * is set to 0, then only the width will be considered when determining the 1258 | * scaled image size. If the scaled height is not an even multiple of the MCU 1259 | * block height (see #tjMCUHeight), then an intermediate buffer copy will be 1260 | * performed within TurboJPEG. 1261 | * 1262 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_ACCURATEDCT 1263 | * "flags" 1264 | * 1265 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 1266 | * and #tjGetErrorCode().) 1267 | */ 1268 | DLLEXPORT int tjDecompressToYUV2(tjhandle handle, const unsigned char *jpegBuf, 1269 | unsigned long jpegSize, unsigned char *dstBuf, 1270 | int width, int pad, int height, int flags); 1271 | 1272 | 1273 | /** 1274 | * Decompress a JPEG image into separate Y, U (Cb), and V (Cr) image 1275 | * planes. This function performs JPEG decompression but leaves out the color 1276 | * conversion step, so a planar YUV image is generated instead of an RGB image. 1277 | * 1278 | * @param handle a handle to a TurboJPEG decompressor or transformer instance 1279 | * 1280 | * @param jpegBuf pointer to a buffer containing the JPEG image to decompress 1281 | * 1282 | * @param jpegSize size of the JPEG image (in bytes) 1283 | * 1284 | * @param dstPlanes an array of pointers to Y, U (Cb), and V (Cr) image planes 1285 | * (or just a Y plane, if decompressing a grayscale image) that will receive 1286 | * the YUV image. These planes can be contiguous or non-contiguous in memory. 1287 | * Use #tjPlaneSizeYUV() to determine the appropriate size for each plane based 1288 | * on the scaled image width, scaled image height, strides, and level of 1289 | * chrominance subsampling. Refer to @ref YUVnotes "YUV Image Format Notes" 1290 | * for more details. 1291 | * 1292 | * @param width desired width (in pixels) of the YUV image. If this is 1293 | * different than the width of the JPEG image being decompressed, then 1294 | * TurboJPEG will use scaling in the JPEG decompressor to generate the largest 1295 | * possible image that will fit within the desired width. If width is 1296 | * set to 0, then only the height will be considered when determining the 1297 | * scaled image size. If the scaled width is not an even multiple of the MCU 1298 | * block width (see #tjMCUWidth), then an intermediate buffer copy will be 1299 | * performed within TurboJPEG. 1300 | * 1301 | * @param strides an array of integers, each specifying the number of bytes per 1302 | * line in the corresponding plane of the output image. Setting the stride for 1303 | * any plane to 0 is the same as setting it to the scaled plane width (see 1304 | * @ref YUVnotes "YUV Image Format Notes".) If strides is NULL, then 1305 | * the strides for all planes will be set to their respective scaled plane 1306 | * widths. You can adjust the strides in order to add an arbitrary amount of 1307 | * line padding to each plane or to decompress the JPEG image into a subregion 1308 | * of a larger YUV planar image. 1309 | * 1310 | * @param height desired height (in pixels) of the YUV image. If this is 1311 | * different than the height of the JPEG image being decompressed, then 1312 | * TurboJPEG will use scaling in the JPEG decompressor to generate the largest 1313 | * possible image that will fit within the desired height. If height 1314 | * is set to 0, then only the width will be considered when determining the 1315 | * scaled image size. If the scaled height is not an even multiple of the MCU 1316 | * block height (see #tjMCUHeight), then an intermediate buffer copy will be 1317 | * performed within TurboJPEG. 1318 | * 1319 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_ACCURATEDCT 1320 | * "flags" 1321 | * 1322 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 1323 | * and #tjGetErrorCode().) 1324 | */ 1325 | DLLEXPORT int tjDecompressToYUVPlanes(tjhandle handle, 1326 | const unsigned char *jpegBuf, 1327 | unsigned long jpegSize, 1328 | unsigned char **dstPlanes, int width, 1329 | int *strides, int height, int flags); 1330 | 1331 | 1332 | /** 1333 | * Decode a YUV planar image into an RGB or grayscale image. This function 1334 | * uses the accelerated color conversion routines in the underlying 1335 | * codec but does not execute any of the other steps in the JPEG decompression 1336 | * process. 1337 | * 1338 | * @param handle a handle to a TurboJPEG decompressor or transformer instance 1339 | * 1340 | * @param srcBuf pointer to an image buffer containing a YUV planar image to be 1341 | * decoded. The size of this buffer should match the value returned by 1342 | * #tjBufSizeYUV2() for the given image width, height, padding, and level of 1343 | * chrominance subsampling. The Y, U (Cb), and V (Cr) image planes should be 1344 | * stored sequentially in the source buffer (refer to @ref YUVnotes 1345 | * "YUV Image Format Notes".) 1346 | * 1347 | * @param pad Use this parameter to specify that the width of each line in each 1348 | * plane of the YUV source image is padded to the nearest multiple of this 1349 | * number of bytes (must be a power of 2.) 1350 | * 1351 | * @param subsamp the level of chrominance subsampling used in the YUV source 1352 | * image (see @ref TJSAMP "Chrominance subsampling options".) 1353 | * 1354 | * @param dstBuf pointer to an image buffer that will receive the decoded 1355 | * image. This buffer should normally be pitch * height bytes in 1356 | * size, but the dstBuf pointer can also be used to decode into a 1357 | * specific region of a larger buffer. 1358 | * 1359 | * @param width width (in pixels) of the source and destination images 1360 | * 1361 | * @param pitch bytes per line in the destination image. Normally, this should 1362 | * be width * #tjPixelSize[pixelFormat] if the destination image is 1363 | * unpadded, or #TJPAD(width * #tjPixelSize[pixelFormat]) if each line 1364 | * of the destination image should be padded to the nearest 32-bit boundary, as 1365 | * is the case for Windows bitmaps. You can also be clever and use the pitch 1366 | * parameter to skip lines, etc. Setting this parameter to 0 is the equivalent 1367 | * of setting it to width * #tjPixelSize[pixelFormat]. 1368 | * 1369 | * @param height height (in pixels) of the source and destination images 1370 | * 1371 | * @param pixelFormat pixel format of the destination image (see @ref TJPF 1372 | * "Pixel formats".) 1373 | * 1374 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_ACCURATEDCT 1375 | * "flags" 1376 | * 1377 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 1378 | * and #tjGetErrorCode().) 1379 | */ 1380 | DLLEXPORT int tjDecodeYUV(tjhandle handle, const unsigned char *srcBuf, 1381 | int pad, int subsamp, unsigned char *dstBuf, 1382 | int width, int pitch, int height, int pixelFormat, 1383 | int flags); 1384 | 1385 | 1386 | /** 1387 | * Decode a set of Y, U (Cb), and V (Cr) image planes into an RGB or grayscale 1388 | * image. This function uses the accelerated color conversion routines in the 1389 | * underlying codec but does not execute any of the other steps in the JPEG 1390 | * decompression process. 1391 | * 1392 | * @param handle a handle to a TurboJPEG decompressor or transformer instance 1393 | * 1394 | * @param srcPlanes an array of pointers to Y, U (Cb), and V (Cr) image planes 1395 | * (or just a Y plane, if decoding a grayscale image) that contain a YUV image 1396 | * to be decoded. These planes can be contiguous or non-contiguous in memory. 1397 | * The size of each plane should match the value returned by #tjPlaneSizeYUV() 1398 | * for the given image width, height, strides, and level of chrominance 1399 | * subsampling. Refer to @ref YUVnotes "YUV Image Format Notes" for more 1400 | * details. 1401 | * 1402 | * @param strides an array of integers, each specifying the number of bytes per 1403 | * line in the corresponding plane of the YUV source image. Setting the stride 1404 | * for any plane to 0 is the same as setting it to the plane width (see 1405 | * @ref YUVnotes "YUV Image Format Notes".) If strides is NULL, then 1406 | * the strides for all planes will be set to their respective plane widths. 1407 | * You can adjust the strides in order to specify an arbitrary amount of line 1408 | * padding in each plane or to decode a subregion of a larger YUV planar image. 1409 | * 1410 | * @param subsamp the level of chrominance subsampling used in the YUV source 1411 | * image (see @ref TJSAMP "Chrominance subsampling options".) 1412 | * 1413 | * @param dstBuf pointer to an image buffer that will receive the decoded 1414 | * image. This buffer should normally be pitch * height bytes in 1415 | * size, but the dstBuf pointer can also be used to decode into a 1416 | * specific region of a larger buffer. 1417 | * 1418 | * @param width width (in pixels) of the source and destination images 1419 | * 1420 | * @param pitch bytes per line in the destination image. Normally, this should 1421 | * be width * #tjPixelSize[pixelFormat] if the destination image is 1422 | * unpadded, or #TJPAD(width * #tjPixelSize[pixelFormat]) if each line 1423 | * of the destination image should be padded to the nearest 32-bit boundary, as 1424 | * is the case for Windows bitmaps. You can also be clever and use the pitch 1425 | * parameter to skip lines, etc. Setting this parameter to 0 is the equivalent 1426 | * of setting it to width * #tjPixelSize[pixelFormat]. 1427 | * 1428 | * @param height height (in pixels) of the source and destination images 1429 | * 1430 | * @param pixelFormat pixel format of the destination image (see @ref TJPF 1431 | * "Pixel formats".) 1432 | * 1433 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_ACCURATEDCT 1434 | * "flags" 1435 | * 1436 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 1437 | * and #tjGetErrorCode().) 1438 | */ 1439 | DLLEXPORT int tjDecodeYUVPlanes(tjhandle handle, 1440 | const unsigned char **srcPlanes, 1441 | const int *strides, int subsamp, 1442 | unsigned char *dstBuf, int width, int pitch, 1443 | int height, int pixelFormat, int flags); 1444 | 1445 | 1446 | /** 1447 | * Create a new TurboJPEG transformer instance. 1448 | * 1449 | * @return a handle to the newly-created instance, or NULL if an error 1450 | * occurred (see #tjGetErrorStr2().) 1451 | */ 1452 | DLLEXPORT tjhandle tjInitTransform(void); 1453 | 1454 | 1455 | /** 1456 | * Losslessly transform a JPEG image into another JPEG image. Lossless 1457 | * transforms work by moving the raw DCT coefficients from one JPEG image 1458 | * structure to another without altering the values of the coefficients. While 1459 | * this is typically faster than decompressing the image, transforming it, and 1460 | * re-compressing it, lossless transforms are not free. Each lossless 1461 | * transform requires reading and performing Huffman decoding on all of the 1462 | * coefficients in the source image, regardless of the size of the destination 1463 | * image. Thus, this function provides a means of generating multiple 1464 | * transformed images from the same source or applying multiple 1465 | * transformations simultaneously, in order to eliminate the need to read the 1466 | * source coefficients multiple times. 1467 | * 1468 | * @param handle a handle to a TurboJPEG transformer instance 1469 | * 1470 | * @param jpegBuf pointer to a buffer containing the JPEG source image to 1471 | * transform 1472 | * 1473 | * @param jpegSize size of the JPEG source image (in bytes) 1474 | * 1475 | * @param n the number of transformed JPEG images to generate 1476 | * 1477 | * @param dstBufs pointer to an array of n image buffers. dstBufs[i] 1478 | * will receive a JPEG image that has been transformed using the parameters in 1479 | * transforms[i]. TurboJPEG has the ability to reallocate the JPEG 1480 | * buffer to accommodate the size of the JPEG image. Thus, you can choose to: 1481 | * -# pre-allocate the JPEG buffer with an arbitrary size using #tjAlloc() and 1482 | * let TurboJPEG grow the buffer as needed, 1483 | * -# set dstBufs[i] to NULL to tell TurboJPEG to allocate the buffer 1484 | * for you, or 1485 | * -# pre-allocate the buffer to a "worst case" size determined by calling 1486 | * #tjBufSize() with the transformed or cropped width and height. Under normal 1487 | * circumstances, this should ensure that the buffer never has to be 1488 | * re-allocated (setting #TJFLAG_NOREALLOC guarantees that it won't be.) Note, 1489 | * however, that there are some rare cases (such as transforming images with a 1490 | * large amount of embedded EXIF or ICC profile data) in which the output image 1491 | * will be larger than the worst-case size, and #TJFLAG_NOREALLOC cannot be 1492 | * used in those cases. 1493 | * . 1494 | * If you choose option 1, dstSizes[i] should be set to the size of 1495 | * your pre-allocated buffer. In any case, unless you have set 1496 | * #TJFLAG_NOREALLOC, you should always check dstBufs[i] upon return 1497 | * from this function, as it may have changed. 1498 | * 1499 | * @param dstSizes pointer to an array of n unsigned long variables that will 1500 | * receive the actual sizes (in bytes) of each transformed JPEG image. If 1501 | * dstBufs[i] points to a pre-allocated buffer, then 1502 | * dstSizes[i] should be set to the size of the buffer. Upon return, 1503 | * dstSizes[i] will contain the size of the JPEG image (in bytes.) 1504 | * 1505 | * @param transforms pointer to an array of n #tjtransform structures, each of 1506 | * which specifies the transform parameters and/or cropping region for the 1507 | * corresponding transformed output image. 1508 | * 1509 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_ACCURATEDCT 1510 | * "flags" 1511 | * 1512 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2() 1513 | * and #tjGetErrorCode().) 1514 | */ 1515 | DLLEXPORT int tjTransform(tjhandle handle, const unsigned char *jpegBuf, 1516 | unsigned long jpegSize, int n, 1517 | unsigned char **dstBufs, unsigned long *dstSizes, 1518 | tjtransform *transforms, int flags); 1519 | 1520 | 1521 | /** 1522 | * Destroy a TurboJPEG compressor, decompressor, or transformer instance. 1523 | * 1524 | * @param handle a handle to a TurboJPEG compressor, decompressor or 1525 | * transformer instance 1526 | * 1527 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2().) 1528 | */ 1529 | DLLEXPORT int tjDestroy(tjhandle handle); 1530 | 1531 | 1532 | /** 1533 | * Allocate an image buffer for use with TurboJPEG. You should always use 1534 | * this function to allocate the JPEG destination buffer(s) for the compression 1535 | * and transform functions unless you are disabling automatic buffer 1536 | * (re)allocation (by setting #TJFLAG_NOREALLOC.) 1537 | * 1538 | * @param bytes the number of bytes to allocate 1539 | * 1540 | * @return a pointer to a newly-allocated buffer with the specified number of 1541 | * bytes. 1542 | * 1543 | * @sa tjFree() 1544 | */ 1545 | DLLEXPORT unsigned char *tjAlloc(int bytes); 1546 | 1547 | 1548 | /** 1549 | * Load an uncompressed image from disk into memory. 1550 | * 1551 | * @param filename name of a file containing an uncompressed image in Windows 1552 | * BMP or PBMPLUS (PPM/PGM) format 1553 | * 1554 | * @param width pointer to an integer variable that will receive the width (in 1555 | * pixels) of the uncompressed image 1556 | * 1557 | * @param align row alignment of the image buffer to be returned (must be a 1558 | * power of 2.) For instance, setting this parameter to 4 will cause all rows 1559 | * in the image buffer to be padded to the nearest 32-bit boundary, and setting 1560 | * this parameter to 1 will cause all rows in the image buffer to be unpadded. 1561 | * 1562 | * @param height pointer to an integer variable that will receive the height 1563 | * (in pixels) of the uncompressed image 1564 | * 1565 | * @param pixelFormat pointer to an integer variable that specifies or will 1566 | * receive the pixel format of the uncompressed image buffer. The behavior of 1567 | * #tjLoadImage() will vary depending on the value of *pixelFormat 1568 | * passed to the function: 1569 | * - @ref TJPF_UNKNOWN : The uncompressed image buffer returned by the function 1570 | * will use the most optimal pixel format for the file type, and 1571 | * *pixelFormat will contain the ID of this pixel format upon 1572 | * successful return from the function. 1573 | * - @ref TJPF_GRAY : Only PGM files and 8-bit BMP files with a grayscale 1574 | * colormap can be loaded. 1575 | * - @ref TJPF_CMYK : The RGB or grayscale pixels stored in the file will be 1576 | * converted using a quick & dirty algorithm that is suitable only for testing 1577 | * purposes (proper conversion between CMYK and other formats requires a color 1578 | * management system.) 1579 | * - Other @ref TJPF "pixel formats" : The uncompressed image buffer will use 1580 | * the specified pixel format, and pixel format conversion will be performed if 1581 | * necessary. 1582 | * 1583 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_BOTTOMUP 1584 | * "flags". 1585 | * 1586 | * @return a pointer to a newly-allocated buffer containing the uncompressed 1587 | * image, converted to the chosen pixel format and with the chosen row 1588 | * alignment, or NULL if an error occurred (see #tjGetErrorStr2().) This 1589 | * buffer should be freed using #tjFree(). 1590 | */ 1591 | DLLEXPORT unsigned char *tjLoadImage(const char *filename, int *width, 1592 | int align, int *height, int *pixelFormat, 1593 | int flags); 1594 | 1595 | 1596 | /** 1597 | * Save an uncompressed image from memory to disk. 1598 | * 1599 | * @param filename name of a file to which to save the uncompressed image. 1600 | * The image will be stored in Windows BMP or PBMPLUS (PPM/PGM) format, 1601 | * depending on the file extension. 1602 | * 1603 | * @param buffer pointer to an image buffer containing RGB, grayscale, or 1604 | * CMYK pixels to be saved 1605 | * 1606 | * @param width width (in pixels) of the uncompressed image 1607 | * 1608 | * @param pitch bytes per line in the image buffer. Setting this parameter to 1609 | * 0 is the equivalent of setting it to 1610 | * width * #tjPixelSize[pixelFormat]. 1611 | * 1612 | * @param height height (in pixels) of the uncompressed image 1613 | * 1614 | * @param pixelFormat pixel format of the image buffer (see @ref TJPF 1615 | * "Pixel formats".) If this parameter is set to @ref TJPF_GRAY, then the 1616 | * image will be stored in PGM or 8-bit (indexed color) BMP format. Otherwise, 1617 | * the image will be stored in PPM or 24-bit BMP format. If this parameter 1618 | * is set to @ref TJPF_CMYK, then the CMYK pixels will be converted to RGB 1619 | * using a quick & dirty algorithm that is suitable only for testing (proper 1620 | * conversion between CMYK and other formats requires a color management 1621 | * system.) 1622 | * 1623 | * @param flags the bitwise OR of one or more of the @ref TJFLAG_BOTTOMUP 1624 | * "flags". 1625 | * 1626 | * @return 0 if successful, or -1 if an error occurred (see #tjGetErrorStr2().) 1627 | */ 1628 | DLLEXPORT int tjSaveImage(const char *filename, unsigned char *buffer, 1629 | int width, int pitch, int height, int pixelFormat, 1630 | int flags); 1631 | 1632 | 1633 | /** 1634 | * Free an image buffer previously allocated by TurboJPEG. You should always 1635 | * use this function to free JPEG destination buffer(s) that were automatically 1636 | * (re)allocated by the compression and transform functions or that were 1637 | * manually allocated using #tjAlloc(). 1638 | * 1639 | * @param buffer address of the buffer to free 1640 | * 1641 | * @sa tjAlloc() 1642 | */ 1643 | DLLEXPORT void tjFree(unsigned char *buffer); 1644 | 1645 | 1646 | /** 1647 | * Returns a descriptive error message explaining why the last command failed. 1648 | * 1649 | * @param handle a handle to a TurboJPEG compressor, decompressor, or 1650 | * transformer instance, or NULL if the error was generated by a global 1651 | * function (but note that retrieving the error message for a global function 1652 | * is not thread-safe.) 1653 | * 1654 | * @return a descriptive error message explaining why the last command failed. 1655 | */ 1656 | DLLEXPORT char *tjGetErrorStr2(tjhandle handle); 1657 | 1658 | 1659 | /** 1660 | * Returns a code indicating the severity of the last error. See 1661 | * @ref TJERR "Error codes". 1662 | * 1663 | * @param handle a handle to a TurboJPEG compressor, decompressor or 1664 | * transformer instance 1665 | * 1666 | * @return a code indicating the severity of the last error. See 1667 | * @ref TJERR "Error codes". 1668 | */ 1669 | DLLEXPORT int tjGetErrorCode(tjhandle handle); 1670 | 1671 | 1672 | /* Deprecated functions and macros */ 1673 | #define TJFLAG_FORCEMMX 8 1674 | #define TJFLAG_FORCESSE 16 1675 | #define TJFLAG_FORCESSE2 32 1676 | #define TJFLAG_FORCESSE3 128 1677 | 1678 | 1679 | /* Backward compatibility functions and macros (nothing to see here) */ 1680 | #define NUMSUBOPT TJ_NUMSAMP 1681 | #define TJ_444 TJSAMP_444 1682 | #define TJ_422 TJSAMP_422 1683 | #define TJ_420 TJSAMP_420 1684 | #define TJ_411 TJSAMP_420 1685 | #define TJ_GRAYSCALE TJSAMP_GRAY 1686 | 1687 | #define TJ_BGR 1 1688 | #define TJ_BOTTOMUP TJFLAG_BOTTOMUP 1689 | #define TJ_FORCEMMX TJFLAG_FORCEMMX 1690 | #define TJ_FORCESSE TJFLAG_FORCESSE 1691 | #define TJ_FORCESSE2 TJFLAG_FORCESSE2 1692 | #define TJ_ALPHAFIRST 64 1693 | #define TJ_FORCESSE3 TJFLAG_FORCESSE3 1694 | #define TJ_FASTUPSAMPLE TJFLAG_FASTUPSAMPLE 1695 | #define TJ_YUV 512 1696 | 1697 | DLLEXPORT unsigned long TJBUFSIZE(int width, int height); 1698 | 1699 | DLLEXPORT unsigned long TJBUFSIZEYUV(int width, int height, int jpegSubsamp); 1700 | 1701 | DLLEXPORT unsigned long tjBufSizeYUV(int width, int height, int subsamp); 1702 | 1703 | DLLEXPORT int tjCompress(tjhandle handle, unsigned char *srcBuf, int width, 1704 | int pitch, int height, int pixelSize, 1705 | unsigned char *dstBuf, unsigned long *compressedSize, 1706 | int jpegSubsamp, int jpegQual, int flags); 1707 | 1708 | DLLEXPORT int tjEncodeYUV(tjhandle handle, unsigned char *srcBuf, int width, 1709 | int pitch, int height, int pixelSize, 1710 | unsigned char *dstBuf, int subsamp, int flags); 1711 | 1712 | DLLEXPORT int tjEncodeYUV2(tjhandle handle, unsigned char *srcBuf, int width, 1713 | int pitch, int height, int pixelFormat, 1714 | unsigned char *dstBuf, int subsamp, int flags); 1715 | 1716 | DLLEXPORT int tjDecompressHeader(tjhandle handle, unsigned char *jpegBuf, 1717 | unsigned long jpegSize, int *width, 1718 | int *height); 1719 | 1720 | DLLEXPORT int tjDecompressHeader2(tjhandle handle, unsigned char *jpegBuf, 1721 | unsigned long jpegSize, int *width, 1722 | int *height, int *jpegSubsamp); 1723 | 1724 | DLLEXPORT int tjDecompress(tjhandle handle, unsigned char *jpegBuf, 1725 | unsigned long jpegSize, unsigned char *dstBuf, 1726 | int width, int pitch, int height, int pixelSize, 1727 | int flags); 1728 | 1729 | DLLEXPORT int tjDecompressToYUV(tjhandle handle, unsigned char *jpegBuf, 1730 | unsigned long jpegSize, unsigned char *dstBuf, 1731 | int flags); 1732 | 1733 | DLLEXPORT char *tjGetErrorStr(void); 1734 | 1735 | 1736 | /** 1737 | * @} 1738 | */ 1739 | 1740 | #ifdef __cplusplus 1741 | } 1742 | #endif 1743 | 1744 | #endif 1745 | -------------------------------------------------------------------------------- /libs/turbojpeg/lib/vs/x64/Debug/turbojpeg-static.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prisonerjohn/ofxAzureKinect/0fd9db1fc6ef616482f285e42c933da3471b9d17/libs/turbojpeg/lib/vs/x64/Debug/turbojpeg-static.lib -------------------------------------------------------------------------------- /libs/turbojpeg/lib/vs/x64/Release/turbojpeg-static.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prisonerjohn/ofxAzureKinect/0fd9db1fc6ef616482f285e42c933da3471b9d17/libs/turbojpeg/lib/vs/x64/Release/turbojpeg-static.lib -------------------------------------------------------------------------------- /ofxaddons_thumbnail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prisonerjohn/ofxAzureKinect/0fd9db1fc6ef616482f285e42c933da3471b9d17/ofxaddons_thumbnail.png -------------------------------------------------------------------------------- /src/ofxAzureKinect.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofxAzureKinect/BodyTracker.h" 4 | #include "ofxAzureKinect/Device.h" 5 | #include "ofxAzureKinect/Playback.h" 6 | #include "ofxAzureKinect/Recorder.h" 7 | #include "ofxAzureKinect/Types.h" 8 | -------------------------------------------------------------------------------- /src/ofxAzureKinect/BodyTracker.cpp: -------------------------------------------------------------------------------- 1 | #include "BodyTracker.h" 2 | 3 | namespace ofxAzureKinect 4 | { 5 | BodyTrackerSettings::BodyTrackerSettings() 6 | : sensorOrientation(K4ABT_SENSOR_ORIENTATION_DEFAULT) 7 | , processingMode(K4ABT_TRACKER_PROCESSING_MODE_GPU) 8 | , gpuDeviceID(0) 9 | , imageType(K4A_CALIBRATION_TYPE_DEPTH) 10 | , updateBodyIndex(true) 11 | , updateBodiesWorld(true) 12 | , updateBodiesImage(false) 13 | {} 14 | 15 | BodyTracker::BodyTracker() 16 | : bTracking(false) 17 | , imageType(K4A_CALIBRATION_TYPE_DEPTH) 18 | , bUpdateBodyIndex(false) 19 | , bUpdateBodiesWorld(false) 20 | , bUpdateBodiesImage(false) 21 | {} 22 | 23 | BodyTracker::~BodyTracker() 24 | { 25 | this->stopTracking(); 26 | } 27 | 28 | bool BodyTracker::startTracking(const k4a::calibration& calibration, BodyTrackerSettings settings) 29 | { 30 | if (this->bTracking) return false; 31 | 32 | // Generate tracker config. 33 | this->trackerConfig = K4ABT_TRACKER_CONFIG_DEFAULT; 34 | this->trackerConfig.processing_mode = K4ABT_TRACKER_PROCESSING_MODE_GPU_CUDA; 35 | this->trackerConfig.sensor_orientation = settings.sensorOrientation; 36 | this->trackerConfig.gpu_device_id = settings.gpuDeviceID; 37 | 38 | try 39 | { 40 | // Create tracker. 41 | this->bodyTracker = k4abt::tracker::create(calibration, this->trackerConfig); 42 | } 43 | catch (const k4a::error& e) 44 | { 45 | ofLogError(__FUNCTION__) << e.what(); 46 | return false; 47 | } 48 | 49 | // Add joint smoothing parameter listener. 50 | this->eventListeners.push(this->jointSmoothing.newListener([this](float&) 51 | { 52 | this->bodyTracker.set_temporal_smoothing(this->jointSmoothing); 53 | })); 54 | 55 | // Save update flags. 56 | this->imageType = settings.imageType; 57 | this->bUpdateBodyIndex = settings.updateBodyIndex; 58 | this->bUpdateBodiesWorld = settings.updateBodiesWorld || settings.updateBodiesImage; 59 | this->bUpdateBodiesImage = settings.updateBodiesImage; 60 | 61 | this->bTracking = true; 62 | 63 | return true; 64 | } 65 | 66 | bool BodyTracker::stopTracking() 67 | { 68 | if (!this->bTracking) return false; 69 | 70 | this->eventListeners.unsubscribeAll(); 71 | 72 | this->bodyIndexPix.clear(); 73 | this->bodyIndexTex.clear(); 74 | 75 | this->bodyTracker.shutdown(); 76 | this->bodyTracker.destroy(); 77 | 78 | this->bTracking = false; 79 | 80 | return true; 81 | } 82 | 83 | void BodyTracker::processCapture(const k4a::capture& capture, const k4a::calibration& calibration, const k4a::transformation& transformation, const k4a::image& depthImg) 84 | { 85 | if (!this->bodyTracker.enqueue_capture(capture, std::chrono::milliseconds(0))) 86 | { 87 | ofLogError(__FUNCTION__) << "Failed adding capture to tracker process queue!"; 88 | return; 89 | } 90 | 91 | k4abt::frame bodyFrame = this->bodyTracker.pop_result(std::chrono::milliseconds(0)); 92 | if (bodyFrame == nullptr) 93 | { 94 | ofLogError(__FUNCTION__) << "Failed processing capture!"; 95 | return; 96 | } 97 | 98 | if (this->bUpdateBodyIndex) 99 | { 100 | // Probe for a body index map image. 101 | k4a::image bodyIndexImg = bodyFrame.get_body_index_map(); 102 | 103 | if (this->imageType == K4A_CALIBRATION_TYPE_COLOR) 104 | { 105 | try 106 | { 107 | k4a::image transformedBodyIndexImg = transformation.depth_image_to_color_camera_custom(depthImg, bodyIndexImg, 108 | K4A_TRANSFORMATION_INTERPOLATION_TYPE_NEAREST, K4ABT_BODY_INDEX_MAP_BACKGROUND).second; 109 | 110 | // Swap body index image with transformed version. 111 | bodyIndexImg.reset(); 112 | bodyIndexImg = std::move(transformedBodyIndexImg); 113 | } 114 | catch (const k4a::error& e) 115 | { 116 | ofLogError(__FUNCTION__) << e.what(); 117 | } 118 | } 119 | 120 | const auto bodyIndexDims = glm::ivec2(bodyIndexImg.get_width_pixels(), bodyIndexImg.get_height_pixels()); 121 | if (!this->bodyIndexPix.isAllocated()) 122 | { 123 | this->bodyIndexPix.allocate(bodyIndexDims.x, bodyIndexDims.y, 1); 124 | } 125 | 126 | const auto bodyIndexData = reinterpret_cast(bodyIndexImg.get_buffer()); 127 | this->bodyIndexPix.setFromPixels(bodyIndexData, bodyIndexDims.x, bodyIndexDims.y, 1); 128 | ofLogVerbose(__FUNCTION__) << "Capture BodyIndex " << bodyIndexDims.x << "x" << bodyIndexDims.y << " stride: " << bodyIndexImg.get_stride_bytes() << "."; 129 | 130 | bodyIndexImg.reset(); 131 | } 132 | 133 | size_t numBodies = bodyFrame.get_num_bodies(); 134 | ofLogVerbose(__FUNCTION__) << numBodies << " bodies found!"; 135 | 136 | if (this->bUpdateBodiesWorld) 137 | { 138 | this->bodySkeletons.resize(numBodies); 139 | 140 | for (size_t i = 0; i < numBodies; i++) 141 | { 142 | k4abt_skeleton_t skeleton = bodyFrame.get_body_skeleton(i); 143 | uint32_t id = bodyFrame.get_body_id(i); 144 | 145 | this->bodySkeletons[i].id = id; 146 | 147 | for (size_t j = 0; j < K4ABT_JOINT_COUNT; ++j) 148 | { 149 | this->bodySkeletons[i].joints[j].position = toGlm(skeleton.joints[j].position); 150 | this->bodySkeletons[i].joints[j].orientation = toGlm(skeleton.joints[j].orientation); 151 | this->bodySkeletons[i].joints[j].confidenceLevel = skeleton.joints[j].confidence_level; 152 | 153 | if (this->bUpdateBodiesImage) 154 | { 155 | try 156 | { 157 | k4a_float2_t projPos; 158 | calibration.convert_3d_to_2d(skeleton.joints[j].position, K4A_CALIBRATION_TYPE_DEPTH, this->imageType, &projPos); 159 | this->bodySkeletons[i].joints[j].projPos = toGlm(projPos); 160 | } 161 | catch (const k4a::error& e) 162 | { 163 | ofLogError(__FUNCTION__) << e.what(); 164 | } 165 | } 166 | } 167 | } 168 | } 169 | 170 | // Release body frame once we're finished. 171 | bodyFrame.reset(); 172 | } 173 | 174 | void BodyTracker::updateTextures() 175 | { 176 | if (this->bUpdateBodyIndex && this->bodyIndexPix.isAllocated()) 177 | { 178 | if (!this->bodyIndexTex.isAllocated()) 179 | { 180 | this->bodyIndexTex.allocate(this->bodyIndexPix); 181 | this->bodyIndexTex.setTextureMinMagFilter(GL_NEAREST, GL_NEAREST); 182 | } 183 | 184 | this->bodyIndexTex.loadData(this->bodyIndexPix); 185 | } 186 | } 187 | 188 | bool BodyTracker::isTracking() const 189 | { 190 | return this->bTracking; 191 | } 192 | 193 | const ofPixels& BodyTracker::getBodyIndexPix() const 194 | { 195 | return this->bodyIndexPix; 196 | } 197 | 198 | const ofTexture& BodyTracker::getBodyIndexTex() const 199 | { 200 | return this->bodyIndexTex; 201 | } 202 | 203 | size_t BodyTracker::getNumBodies() const 204 | { 205 | return this->bodySkeletons.size(); 206 | } 207 | 208 | const std::vector& BodyTracker::getBodySkeletons() const 209 | { 210 | return this->bodySkeletons; 211 | } 212 | } -------------------------------------------------------------------------------- /src/ofxAzureKinect/BodyTracker.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "ofParameter.h" 7 | #include "ofPixels.h" 8 | #include "ofTexture.h" 9 | 10 | #include "Types.h" 11 | 12 | namespace ofxAzureKinect 13 | { 14 | struct BodyTrackerSettings 15 | { 16 | SensorOrientation sensorOrientation; 17 | ProcessingMode processingMode; 18 | int32_t gpuDeviceID; 19 | k4a_calibration_type_t imageType; 20 | bool updateBodyIndex; 21 | bool updateBodiesWorld; 22 | bool updateBodiesImage; 23 | 24 | BodyTrackerSettings(); 25 | }; 26 | 27 | struct BodyJoint 28 | { 29 | glm::vec3 position; 30 | glm::quat orientation; 31 | ConfidenceLevel confidenceLevel; 32 | 33 | glm::vec2 projPos; 34 | }; 35 | 36 | struct BodySkeleton 37 | { 38 | uint32_t id; 39 | BodyJoint joints[K4ABT_JOINT_COUNT]; 40 | }; 41 | 42 | class BodyTracker 43 | { 44 | public: 45 | BodyTracker(); 46 | ~BodyTracker(); 47 | 48 | bool startTracking(const k4a::calibration& calibration, BodyTrackerSettings settings = BodyTrackerSettings()); 49 | bool stopTracking(); 50 | 51 | void processCapture(const k4a::capture& capture, const k4a::calibration& calibration, const k4a::transformation& transformation, const k4a::image& depthImg); 52 | void updateTextures(); 53 | 54 | bool isTracking() const; 55 | 56 | const ofPixels& getBodyIndexPix() const; 57 | const ofTexture& getBodyIndexTex() const; 58 | 59 | size_t getNumBodies() const; 60 | const std::vector& getBodySkeletons() const; 61 | 62 | public: 63 | ofParameter jointSmoothing{ "Joint Smoothing", 0.0f, 0.0f, 1.0f }; 64 | 65 | private: 66 | bool bTracking; 67 | 68 | bool bUpdateBodyIndex; 69 | bool bUpdateBodiesWorld; 70 | bool bUpdateBodiesImage; 71 | 72 | k4abt_tracker_configuration_t trackerConfig; 73 | k4abt::tracker bodyTracker; 74 | 75 | k4a_calibration_type_t imageType; 76 | 77 | ofPixels bodyIndexPix; 78 | ofTexture bodyIndexTex; 79 | 80 | std::vector bodySkeletons; 81 | 82 | ofEventListeners eventListeners; 83 | }; 84 | } -------------------------------------------------------------------------------- /src/ofxAzureKinect/Device.cpp: -------------------------------------------------------------------------------- 1 | #include "Device.h" 2 | 3 | #include "ofLog.h" 4 | 5 | const int32_t TIMEOUT_IN_MS = 1000; 6 | 7 | namespace ofxAzureKinect 8 | { 9 | DeviceSettings::DeviceSettings() 10 | : depthMode(K4A_DEPTH_MODE_WFOV_2X2BINNED) 11 | , colorResolution(K4A_COLOR_RESOLUTION_1080P) 12 | , colorFormat(K4A_IMAGE_FORMAT_COLOR_BGRA32) 13 | , cameraFps(K4A_FRAMES_PER_SECOND_30) 14 | , wiredSyncMode(K4A_WIRED_SYNC_MODE_STANDALONE) 15 | , depthDelayUsec(0) 16 | , subordinateDelayUsec(0) 17 | , updateColor(true) 18 | , updateIr(true) 19 | , updateWorld(true) 20 | , updateVbo(true) 21 | , forceVboToDepthSize(false) 22 | , syncImages(true) 23 | {} 24 | 25 | int Device::getInstalledCount() 26 | { 27 | return k4a_device_get_installed_count(); 28 | } 29 | 30 | Device::Device() 31 | : Stream() 32 | , index(-1) 33 | , bRecording(false) 34 | {} 35 | 36 | Device::~Device() 37 | { 38 | this->close(); 39 | } 40 | 41 | bool Device::open(uint32_t idx) 42 | { 43 | if (this->bOpen) 44 | { 45 | ofLogWarning(__FUNCTION__) << "Device " << this->index << " / " << this->serialNumber << " already open!"; 46 | return false; 47 | } 48 | 49 | // Load the device at the requested index. 50 | try 51 | { 52 | // Open connection to the device. 53 | this->device = k4a::device::open(idx); 54 | 55 | // Get the device index and serial number. 56 | this->index = idx; 57 | this->serialNumber = this->device.get_serialnum(); 58 | } 59 | catch (const k4a::error& e) 60 | { 61 | ofLogError(__FUNCTION__) << e.what(); 62 | 63 | this->device.close(); 64 | 65 | return false; 66 | } 67 | 68 | ofLogNotice(__FUNCTION__) << "Successfully opened device " << this->index << " / " << this->serialNumber << "."; 69 | this->bOpen = true; 70 | 71 | return true; 72 | } 73 | 74 | bool Device::open(const std::string& serialNumber) 75 | { 76 | if (this->bOpen) 77 | { 78 | ofLogWarning(__FUNCTION__) << "Device " << this->index << " / " << this->serialNumber << " already open!"; 79 | return false; 80 | } 81 | 82 | // Loop through devices and find the one with the requested serial. 83 | bool deviceFound = false; 84 | int numConnected = Device::getInstalledCount(); 85 | for (int i = 0; i < numConnected; ++i) 86 | { 87 | try 88 | { 89 | // Open connection to the device. 90 | this->device = k4a::device::open(static_cast(i)); 91 | 92 | // Get the device serial number and check it. 93 | this->serialNumber = this->device.get_serialnum(); 94 | if (this->serialNumber == serialNumber) 95 | { 96 | deviceFound = true; 97 | this->index = i; 98 | break; 99 | } 100 | else 101 | { 102 | this->device.close(); 103 | } 104 | } 105 | catch (const k4a::error& e) 106 | { 107 | // Don't worry about it; we just might be trying to access an already open device. 108 | continue; 109 | } 110 | } 111 | 112 | if (!deviceFound) 113 | { 114 | ofLogError(__FUNCTION__) << "No device found with serial number " << serialNumber; 115 | return false; 116 | } 117 | 118 | ofLogNotice(__FUNCTION__) << "Successfully opened device " << this->index << " / " << this->serialNumber << "."; 119 | this->bOpen = true; 120 | 121 | return true; 122 | } 123 | 124 | bool Device::close() 125 | { 126 | if (!this->bOpen) return false; 127 | 128 | this->stopRecording(); 129 | this->stopCameras(); 130 | 131 | this->device.close(); 132 | 133 | this->index = -1; 134 | this->serialNumber = ""; 135 | this->bOpen = false; 136 | 137 | return true; 138 | } 139 | 140 | bool Device::startCameras(DeviceSettings deviceSettings) 141 | { 142 | if (!this->bOpen) 143 | { 144 | ofLogError(__FUNCTION__) << "Open device before starting cameras!"; 145 | return false; 146 | } 147 | 148 | // Generate device config. 149 | this->config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL; 150 | this->config.depth_mode = deviceSettings.depthMode; 151 | this->config.color_format = deviceSettings.colorFormat; 152 | this->config.color_resolution = deviceSettings.colorResolution; 153 | this->config.camera_fps = deviceSettings.cameraFps; 154 | this->config.synchronized_images_only = deviceSettings.syncImages; 155 | 156 | this->config.wired_sync_mode = deviceSettings.wiredSyncMode; 157 | this->config.depth_delay_off_color_usec = deviceSettings.depthDelayUsec; 158 | this->config.subordinate_delay_off_master_usec = deviceSettings.subordinateDelayUsec; 159 | 160 | // Set update flags. 161 | this->bUpdateColor = deviceSettings.updateColor; 162 | this->bUpdateIr = deviceSettings.updateIr; 163 | this->bUpdateWorld = deviceSettings.updateWorld; 164 | this->bUpdateVbo = deviceSettings.updateWorld && deviceSettings.updateVbo; 165 | this->bForceVboToDepthSize = deviceSettings.forceVboToDepthSize; 166 | 167 | // Get calibration. 168 | try 169 | { 170 | this->calibration = this->device.get_calibration(this->config.depth_mode, this->config.color_resolution); 171 | } 172 | catch (const k4a::error& e) 173 | { 174 | ofLogError(__FUNCTION__) << e.what(); 175 | return false; 176 | } 177 | 178 | if (this->bUpdateColor) 179 | { 180 | // Create transformation and images. 181 | this->transformation = k4a::transformation(this->calibration); 182 | 183 | this->setupTransformationImages(); 184 | } 185 | 186 | if (this->bUpdateWorld) 187 | { 188 | // Load depth to world LUT. 189 | this->setupDepthToWorldTable(); 190 | 191 | if (this->bUpdateColor) 192 | { 193 | // Load color to world LUT. 194 | this->setupColorToWorldTable(); 195 | } 196 | } 197 | 198 | // Check compatible sync mode and connection. 199 | if (this->config.wired_sync_mode == K4A_WIRED_SYNC_MODE_MASTER && !this->isSyncOutConnected()) 200 | { 201 | ofLogWarning(__FUNCTION__) << "Wired sync mode set to Master but Sync Out not connected! Reverting to Standalone."; 202 | this->config.wired_sync_mode = K4A_WIRED_SYNC_MODE_STANDALONE; 203 | } 204 | else if (this->config.wired_sync_mode == K4A_WIRED_SYNC_MODE_SUBORDINATE && !this->isSyncInConnected()) 205 | { 206 | ofLogWarning(__FUNCTION__) << "Wired sync mode set to Subordinate but Sync In not connected! Reverting to Standalone."; 207 | this->config.wired_sync_mode = K4A_WIRED_SYNC_MODE_STANDALONE; 208 | } 209 | 210 | if (this->config.wired_sync_mode != K4A_WIRED_SYNC_MODE_SUBORDINATE) 211 | { 212 | this->config.subordinate_delay_off_master_usec = 0; 213 | } 214 | 215 | // Start cameras. 216 | try 217 | { 218 | this->device.start_cameras(&this->config); 219 | } 220 | catch (const k4a::error& e) 221 | { 222 | ofLogError(__FUNCTION__) << e.what(); 223 | return false; 224 | } 225 | 226 | return this->startStreaming(); 227 | } 228 | 229 | bool Device::stopCameras() 230 | { 231 | if (!this->bStreaming) return false; 232 | 233 | this->stopStreaming(); 234 | 235 | this->depthToWorldImg.reset(); 236 | this->colorToWorldImg.reset(); 237 | 238 | this->transformation.destroy(); 239 | this->depthInColorImg.reset(); 240 | this->colorInDepthImg.reset(); 241 | 242 | this->device.stop_cameras(); 243 | 244 | return true; 245 | } 246 | 247 | bool Device::startRecording(std::string filepath) 248 | { 249 | if (!this->bOpen) return false; 250 | 251 | if (this->isRecording()) 252 | { 253 | this->stopRecording(); 254 | } 255 | 256 | if (filepath.empty()) 257 | { 258 | filepath = "k4a_" + ofGetTimestampString("%Y%m%d_%H%M%S") + ".mkv"; 259 | } 260 | 261 | if (this->recorder.open(this->device, this->config, filepath)) 262 | { 263 | this->bRecording = true; 264 | } 265 | 266 | return this->bRecording; 267 | } 268 | 269 | bool Device::stopRecording() 270 | { 271 | if (!this->isRecording()) return false; 272 | 273 | this->recorder.close(); 274 | this->bRecording = false; 275 | 276 | return this->bRecording; 277 | } 278 | 279 | bool Device::isSyncInConnected() const 280 | { 281 | return this->device.is_sync_in_connected(); 282 | } 283 | 284 | bool Device::isSyncOutConnected() const 285 | { 286 | return this->device.is_sync_out_connected(); 287 | } 288 | 289 | bool Device::updateCapture() 290 | { 291 | try 292 | { 293 | if (this->device.get_capture(&this->capture, std::chrono::milliseconds(TIMEOUT_IN_MS))) 294 | { 295 | return true; 296 | } 297 | else 298 | { 299 | ofLogWarning(__FUNCTION__) << "Timed out waiting for a capture for device " << this->index << "::" << this->serialNumber << "."; 300 | return false; 301 | } 302 | } 303 | catch (const k4a::error& e) 304 | { 305 | ofLogError(__FUNCTION__) << e.what(); 306 | return false; 307 | } 308 | } 309 | 310 | void Device::updatePixels() 311 | { 312 | Stream::updatePixels(); 313 | 314 | if (this->bRecording) 315 | { 316 | this->recorder.writeCapture(this->capture); 317 | } 318 | } 319 | 320 | bool Device::isRecording() const 321 | { 322 | return this->recorder.isOpen() && this->bRecording; 323 | } 324 | 325 | DepthMode Device::getDepthMode() const 326 | { 327 | return this->config.depth_mode; 328 | } 329 | 330 | ImageFormat Device::getColorFormat() const 331 | { 332 | return this->config.color_format; 333 | } 334 | 335 | ColorResolution Device::getColorResolution() const 336 | { 337 | return this->config.color_resolution; 338 | } 339 | 340 | FramesPerSecond Device::getCameraFps() const 341 | { 342 | return this->config.camera_fps; 343 | } 344 | 345 | WiredSyncMode Device::getWiredSyncMode() const 346 | { 347 | return this->config.wired_sync_mode; 348 | } 349 | 350 | uint32_t Device::getDepthDelayUsec() const 351 | { 352 | return this->config.depth_delay_off_color_usec; 353 | } 354 | 355 | uint32_t Device::getSubordinateDelayUsec() const 356 | { 357 | return this->config.subordinate_delay_off_master_usec; 358 | } 359 | 360 | bool Device::getSyncImages() const 361 | { 362 | return this->config.synchronized_images_only; 363 | } 364 | 365 | const Recorder& Device::getRecorder() const 366 | { 367 | return this->recorder; 368 | } 369 | 370 | Recorder& Device::getRecorder() 371 | { 372 | return this->recorder; 373 | } 374 | } 375 | -------------------------------------------------------------------------------- /src/ofxAzureKinect/Device.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | #include "ofPixels.h" 9 | #include "ofTexture.h" 10 | 11 | #include "Recorder.h" 12 | #include "Stream.h" 13 | #include "Types.h" 14 | 15 | namespace ofxAzureKinect 16 | { 17 | struct DeviceSettings 18 | { 19 | DepthMode depthMode; 20 | ColorResolution colorResolution; 21 | ImageFormat colorFormat; 22 | FramesPerSecond cameraFps; 23 | 24 | WiredSyncMode wiredSyncMode; 25 | uint32_t depthDelayUsec; 26 | uint32_t subordinateDelayUsec; 27 | 28 | bool updateColor; 29 | bool updateIr; 30 | bool updateWorld; 31 | bool updateVbo; 32 | bool forceVboToDepthSize; 33 | 34 | bool syncImages; 35 | 36 | DeviceSettings(); 37 | }; 38 | 39 | class Device 40 | : public Stream 41 | { 42 | public: 43 | static int getInstalledCount(); 44 | 45 | public: 46 | Device(); 47 | ~Device(); 48 | 49 | bool open(uint32_t idx = 0); 50 | bool open(const std::string& serialNumber); 51 | bool close(); 52 | 53 | bool startCameras(DeviceSettings deviceSettings = DeviceSettings()); 54 | bool stopCameras(); 55 | 56 | bool startRecording(std::string filepath = ""); 57 | bool stopRecording(); 58 | 59 | bool isSyncInConnected() const; 60 | bool isSyncOutConnected() const; 61 | 62 | bool isRecording() const; 63 | 64 | DepthMode getDepthMode() const override; 65 | ImageFormat getColorFormat() const override; 66 | ColorResolution getColorResolution() const override; 67 | FramesPerSecond getCameraFps() const override; 68 | 69 | WiredSyncMode getWiredSyncMode() const override; 70 | uint32_t getDepthDelayUsec() const override; 71 | uint32_t getSubordinateDelayUsec() const override; 72 | 73 | bool getSyncImages() const; 74 | 75 | const Recorder& getRecorder() const; 76 | Recorder& getRecorder(); 77 | 78 | protected: 79 | bool updateCapture() override; 80 | 81 | void updatePixels() override; 82 | 83 | private: 84 | int index; 85 | 86 | bool bRecording; 87 | 88 | k4a_device_configuration_t config; 89 | k4a::device device; 90 | 91 | Recorder recorder; 92 | }; 93 | } 94 | -------------------------------------------------------------------------------- /src/ofxAzureKinect/Playback.cpp: -------------------------------------------------------------------------------- 1 | #include "Playback.h" 2 | 3 | namespace ofxAzureKinect 4 | { 5 | PlaybackSettings::PlaybackSettings() 6 | : updateColor(true) 7 | , updateIr(true) 8 | , updateWorld(true) 9 | , updateVbo(true) 10 | , forceVboToDepthSize(false) 11 | , autoloop(true) 12 | {} 13 | 14 | Playback::Playback() 15 | : Stream() 16 | , bUpdateDepth(true) 17 | , bLoops(true) 18 | , bPaused(false) 19 | , lastFrameSecs(0) 20 | , duration(0) 21 | { 22 | 23 | } 24 | 25 | Playback::~Playback() 26 | { 27 | this->close(); 28 | } 29 | 30 | bool Playback::open(std::string filepath) 31 | { 32 | if (this->bOpen) return false; 33 | 34 | if (filepath.empty()) 35 | { 36 | ofLogError(__FUNCTION__) << "File path cannot be empty!"; 37 | } 38 | 39 | filepath = ofToDataPath(filepath, true); 40 | 41 | try 42 | { 43 | // Open playback file. 44 | this->playback = k4a::playback::open(filepath.c_str()); 45 | 46 | // Read playback config. 47 | this->config = this->playback.get_record_configuration(); 48 | 49 | // Get the serial number. 50 | this->playback.get_tag("K4A_DEVICE_SERIAL_NUMBER", &this->serialNumber); 51 | 52 | // Get the calibration. 53 | this->calibration = this->playback.get_calibration(); 54 | 55 | // Get the duration. 56 | this->duration = this->playback.get_recording_length(); 57 | } 58 | catch (const k4a::error& e) 59 | { 60 | ofLogError(__FUNCTION__) << e.what(); 61 | 62 | this->playback.close(); 63 | 64 | return false; 65 | } 66 | 67 | ofLogNotice(__FUNCTION__) << "Open success, reading from file " << filepath; 68 | 69 | this->bOpen = true; 70 | return true; 71 | } 72 | 73 | bool Playback::close() 74 | { 75 | if (!this->bOpen) return false; 76 | 77 | this->stopPlayback(); 78 | 79 | this->playback.close(); 80 | 81 | ofLogNotice(__FUNCTION__) << "Close success"; 82 | 83 | this->serialNumber = ""; 84 | this->bOpen = false; 85 | 86 | return true; 87 | } 88 | 89 | bool Playback::startPlayback(PlaybackSettings playbackSettings) 90 | { 91 | if (!this->bOpen) 92 | { 93 | ofLogError(__FUNCTION__) << "Open file before starting playback!"; 94 | return false; 95 | } 96 | 97 | // Set update flags. 98 | this->bUpdateDepth = this->config.depth_track_enabled; 99 | this->bUpdateColor = this->config.color_track_enabled && playbackSettings.updateColor; 100 | this->bUpdateIr = this->config.ir_track_enabled && playbackSettings.updateIr; 101 | this->bUpdateWorld = this->config.depth_track_enabled && playbackSettings.updateWorld; 102 | this->bUpdateVbo = this->config.depth_track_enabled && playbackSettings.updateWorld && playbackSettings.updateVbo; 103 | this->bForceVboToDepthSize = playbackSettings.forceVboToDepthSize; 104 | 105 | this->bLoops = playbackSettings.autoloop; 106 | 107 | this->lastFrameSecs = 0; 108 | 109 | if (this->bUpdateDepth && this->bUpdateColor) 110 | { 111 | // Create transformation and images. 112 | this->transformation = k4a::transformation(this->calibration); 113 | 114 | this->setupTransformationImages(); 115 | } 116 | 117 | if (this->bUpdateWorld) 118 | { 119 | // Load depth to world LUT. 120 | this->setupDepthToWorldTable(); 121 | 122 | if (this->bUpdateColor) 123 | { 124 | // Load color to world LUT. 125 | this->setupColorToWorldTable(); 126 | } 127 | } 128 | 129 | return this->startStreaming(); 130 | } 131 | 132 | bool Playback::stopPlayback() 133 | { 134 | if (!this->bStreaming) return false; 135 | 136 | this->stopStreaming(); 137 | 138 | this->transformation.destroy(); 139 | 140 | return true; 141 | } 142 | 143 | void Playback::setPaused(bool paused) 144 | { 145 | this->bPaused = paused; 146 | } 147 | 148 | bool Playback::isPaused() const 149 | { 150 | return this->bPaused; 151 | } 152 | 153 | bool Playback::seekPct(float pct) 154 | { 155 | return this->seekUsecs(ofMap(pct, 0, 1, 0, this->getDurationUsecs(), true)); 156 | } 157 | 158 | bool Playback::seekSecs(float seconds) 159 | { 160 | return this->seekUsecs(seconds * 1000000ll); 161 | } 162 | 163 | bool Playback::seekUsecs(long long usecs) 164 | { 165 | if (!this->bOpen) return false; 166 | 167 | try 168 | { 169 | this->playback.seek_timestamp(std::chrono::microseconds(usecs), K4A_PLAYBACK_SEEK_BEGIN); 170 | this->lastFrameSecs = 0; 171 | } 172 | catch (const k4a::error& e) 173 | { 174 | ofLogError(__FUNCTION__) << e.what(); 175 | return false; 176 | } 177 | 178 | return true; 179 | } 180 | 181 | bool Playback::updateCapture() 182 | { 183 | float nextFrameSecs = lastFrameSecs + 1 / static_cast(this->getFramerate()); 184 | if (this->bPaused || ofGetElapsedTimef() < nextFrameSecs) 185 | { 186 | // Not ready for another frame yet. 187 | return false; 188 | } 189 | 190 | try 191 | { 192 | if (this->playback.get_next_capture(&this->capture)) 193 | { 194 | lastFrameSecs = ofGetElapsedTimef(); 195 | return true; 196 | } 197 | else if (this->bLoops) 198 | { 199 | // Rewind and try again. 200 | this->seekUsecs(0); 201 | return this->updateCapture(); 202 | } 203 | else 204 | { 205 | // Stop. 206 | this->stopPlayback(); 207 | return false; 208 | } 209 | } 210 | catch (const k4a::error& e) 211 | { 212 | ofLogError(__FUNCTION__) << e.what(); 213 | return false; 214 | } 215 | } 216 | 217 | std::string Playback::readTag(const std::string& name) 218 | { 219 | if (!this->isOpen()) 220 | { 221 | ofLogError(__FUNCTION__) << "Open playback before reading!"; 222 | return ""; 223 | } 224 | 225 | std::string value = ""; 226 | this->playback.get_tag(name.c_str(), &value); 227 | return value; 228 | } 229 | 230 | DepthMode Playback::getDepthMode() const 231 | { 232 | return this->config.depth_mode; 233 | } 234 | 235 | ImageFormat Playback::getColorFormat() const 236 | { 237 | return this->config.color_format; 238 | } 239 | 240 | ColorResolution Playback::getColorResolution() const 241 | { 242 | return this->config.color_resolution; 243 | } 244 | 245 | FramesPerSecond Playback::getCameraFps() const 246 | { 247 | return this->config.camera_fps; 248 | } 249 | 250 | WiredSyncMode Playback::getWiredSyncMode() const 251 | { 252 | return this->config.wired_sync_mode; 253 | } 254 | 255 | uint32_t Playback::getDepthDelayUsec() const 256 | { 257 | return this->config.depth_delay_off_color_usec; 258 | } 259 | 260 | uint32_t Playback::getSubordinateDelayUsec() const 261 | { 262 | return this->config.subordinate_delay_off_master_usec; 263 | } 264 | 265 | float Playback::getDurationSecs() const 266 | { 267 | return getDurationUsecs() / 1000000.0f; 268 | } 269 | 270 | long long Playback::getDurationUsecs() const 271 | { 272 | return this->duration.count(); 273 | } 274 | } 275 | -------------------------------------------------------------------------------- /src/ofxAzureKinect/Playback.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include 7 | 8 | #include "Stream.h" 9 | #include "Types.h" 10 | 11 | namespace ofxAzureKinect 12 | { 13 | struct PlaybackSettings 14 | { 15 | bool updateColor; 16 | bool updateIr; 17 | bool updateWorld; 18 | bool updateVbo; 19 | bool forceVboToDepthSize; 20 | 21 | bool autoloop; 22 | 23 | PlaybackSettings(); 24 | }; 25 | 26 | class Playback 27 | : public Stream 28 | { 29 | public: 30 | Playback(); 31 | ~Playback(); 32 | 33 | bool open(std::string filepath); 34 | bool close(); 35 | 36 | bool startPlayback(PlaybackSettings playbackSettings = PlaybackSettings()); 37 | bool stopPlayback(); 38 | 39 | void setPaused(bool paused); 40 | bool isPaused() const; 41 | 42 | bool seekPct(float pct); 43 | bool seekSecs(float seconds); 44 | bool seekUsecs(long long usecs); 45 | 46 | std::string readTag(const std::string& name); 47 | 48 | DepthMode getDepthMode() const override; 49 | ImageFormat getColorFormat() const override; 50 | ColorResolution getColorResolution() const override; 51 | FramesPerSecond getCameraFps() const override; 52 | 53 | WiredSyncMode getWiredSyncMode() const override; 54 | uint32_t getDepthDelayUsec() const override; 55 | uint32_t getSubordinateDelayUsec() const override; 56 | 57 | float getDurationSecs() const; 58 | long long getDurationUsecs() const; 59 | 60 | protected: 61 | bool updateCapture() override; 62 | 63 | private: 64 | bool bUpdateDepth; 65 | bool bLoops; 66 | bool bPaused; 67 | 68 | float lastFrameSecs; 69 | std::chrono::microseconds duration; 70 | 71 | k4a_record_configuration_t config; 72 | k4a::playback playback; 73 | }; 74 | } 75 | -------------------------------------------------------------------------------- /src/ofxAzureKinect/Recorder.cpp: -------------------------------------------------------------------------------- 1 | #include "Recorder.h" 2 | 3 | namespace ofxAzureKinect 4 | { 5 | Recorder::Recorder() 6 | : bOpen(false) 7 | { 8 | 9 | } 10 | 11 | Recorder::~Recorder() 12 | { 13 | this->close(); 14 | } 15 | 16 | bool Recorder::open(const k4a::device& device, k4a_device_configuration_t config, std::string filepath) 17 | { 18 | if (this->bOpen) return false; 19 | 20 | if (filepath.empty()) 21 | { 22 | ofLogError(__FUNCTION__) << "File path cannot be empty!"; 23 | } 24 | 25 | filepath = ofToDataPath(filepath, true); 26 | 27 | try 28 | { 29 | this->record = k4a::record::create(filepath.c_str(), device, config); 30 | 31 | // TODO: Add IMU and other custom tracks here. 32 | 33 | // Write header after all track metadata is setup. 34 | this->record.write_header(); 35 | } 36 | catch (const k4a::error& e) 37 | { 38 | ofLogError(__FUNCTION__) << e.what(); 39 | return false; 40 | } 41 | 42 | ofLogNotice(__FUNCTION__) << "Open success, writing to file " << filepath; 43 | 44 | this->bOpen = true; 45 | return true; 46 | } 47 | 48 | bool Recorder::close() 49 | { 50 | if (!this->bOpen) return false; 51 | 52 | this->record.flush(); 53 | this->record.close(); 54 | 55 | ofLogNotice(__FUNCTION__) << "Close success"; 56 | 57 | this->bOpen = false; 58 | return true; 59 | } 60 | 61 | bool Recorder::writeCapture(const k4a::capture& capture) 62 | { 63 | if (!this->isOpen()) 64 | { 65 | ofLogError(__FUNCTION__) << "Open recorder before writing!"; 66 | return false; 67 | } 68 | 69 | this->record.write_capture(capture); 70 | return true; 71 | } 72 | 73 | bool Recorder::addTag(const std::string& name, const std::string& value) 74 | { 75 | if (!this->isOpen()) 76 | { 77 | ofLogError(__FUNCTION__) << "Open recorder before writing!"; 78 | return false; 79 | } 80 | 81 | this->record.add_tag(name.c_str(), value.c_str()); 82 | return true; 83 | } 84 | 85 | bool Recorder::isOpen() const 86 | { 87 | return this->bOpen; 88 | } 89 | } -------------------------------------------------------------------------------- /src/ofxAzureKinect/Recorder.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include "ofParameter.h" 6 | 7 | namespace ofxAzureKinect 8 | { 9 | class Recorder 10 | { 11 | public: 12 | Recorder(); 13 | ~Recorder(); 14 | 15 | bool open(const k4a::device& device, k4a_device_configuration_t config, std::string filepath); 16 | bool close(); 17 | 18 | bool writeCapture(const k4a::capture& capture); 19 | 20 | bool addTag(const std::string& name, const std::string& value); 21 | 22 | bool isOpen() const; 23 | 24 | private: 25 | bool bOpen; 26 | 27 | k4a::record record; 28 | }; 29 | } -------------------------------------------------------------------------------- /src/ofxAzureKinect/Stream.cpp: -------------------------------------------------------------------------------- 1 | #include "Stream.h" 2 | 3 | namespace ofxAzureKinect 4 | { 5 | Stream::Stream() 6 | : bOpen(false) 7 | , bStreaming(false) 8 | , bNewFrame(false) 9 | , serialNumber("") 10 | , pixFrameNum(0) 11 | , texFrameNum(0) 12 | , bUpdateColor(false) 13 | , bUpdateIr(false) 14 | , bUpdateWorld(false) 15 | , bUpdateVbo(false) 16 | , bForceVboToDepthSize(false) 17 | , jpegDecompressor(tjInitDecompress()) 18 | , numPoints(0) 19 | , numSuccessiveFails(0) 20 | {} 21 | 22 | Stream::~Stream() 23 | { 24 | tjDestroy(jpegDecompressor); 25 | } 26 | 27 | bool Stream::setupDepthToWorldTable() 28 | { 29 | if (this->setupImageToWorldTable(K4A_CALIBRATION_TYPE_DEPTH, this->depthToWorldImg)) 30 | { 31 | const int width = this->depthToWorldImg.get_width_pixels(); 32 | const int height = this->depthToWorldImg.get_height_pixels(); 33 | 34 | const auto data = reinterpret_cast(this->depthToWorldImg.get_buffer()); 35 | 36 | if (!this->depthToWorldPix.isAllocated()) 37 | { 38 | this->depthToWorldPix.allocate(width, height, 2); 39 | this->depthToWorldTex.allocate(width, height, GL_RG32F); 40 | this->depthToWorldTex.setTextureMinMagFilter(GL_NEAREST, GL_NEAREST); 41 | } 42 | 43 | this->depthToWorldPix.setFromPixels(data, width, height, 2); 44 | this->depthToWorldTex.loadData(this->depthToWorldPix); 45 | 46 | return true; 47 | } 48 | 49 | return false; 50 | } 51 | 52 | bool Stream::setupColorToWorldTable() 53 | { 54 | if (this->setupImageToWorldTable(K4A_CALIBRATION_TYPE_COLOR, this->colorToWorldImg)) 55 | { 56 | const int width = this->colorToWorldImg.get_width_pixels(); 57 | const int height = this->colorToWorldImg.get_height_pixels(); 58 | 59 | const auto data = reinterpret_cast(this->colorToWorldImg.get_buffer()); 60 | 61 | if (!this->colorToWorldPix.isAllocated()) 62 | { 63 | this->colorToWorldPix.allocate(width, height, 2); 64 | this->colorToWorldTex.allocate(width, height, GL_RG32F); 65 | this->colorToWorldTex.setTextureMinMagFilter(GL_NEAREST, GL_NEAREST); 66 | } 67 | 68 | this->colorToWorldPix.setFromPixels(data, width, height, 2); 69 | this->colorToWorldTex.loadData(this->colorToWorldPix); 70 | 71 | return true; 72 | } 73 | 74 | return false; 75 | } 76 | 77 | bool Stream::setupImageToWorldTable(k4a_calibration_type_t type, k4a::image& img) 78 | { 79 | const k4a_calibration_camera_t& calibrationCamera = (type == K4A_CALIBRATION_TYPE_DEPTH) ? this->calibration.depth_camera_calibration : this->calibration.color_camera_calibration; 80 | 81 | const auto dims = glm::ivec2( 82 | calibrationCamera.resolution_width, 83 | calibrationCamera.resolution_height); 84 | 85 | try 86 | { 87 | img = k4a::image::create(K4A_IMAGE_FORMAT_CUSTOM, 88 | dims.x, dims.y, 89 | dims.x * static_cast(sizeof(k4a_float2_t))); 90 | } 91 | catch (const k4a::error& e) 92 | { 93 | ofLogError(__FUNCTION__) << e.what(); 94 | return false; 95 | } 96 | 97 | auto imgData = reinterpret_cast(img.get_buffer()); 98 | 99 | k4a_float2_t p; 100 | k4a_float3_t ray; 101 | int idx = 0; 102 | for (int y = 0; y < dims.y; ++y) 103 | { 104 | p.xy.y = static_cast(y); 105 | 106 | for (int x = 0; x < dims.x; ++x) 107 | { 108 | p.xy.x = static_cast(x); 109 | 110 | if (this->calibration.convert_2d_to_3d(p, 1.f, type, type, &ray)) 111 | { 112 | imgData[idx].xy.x = ray.xyz.x; 113 | imgData[idx].xy.y = ray.xyz.y; 114 | } 115 | else 116 | { 117 | // The pixel is invalid. 118 | //ofLogNotice(__FUNCTION__) << "Pixel " << depthToWorldData[idx].xy.x << ", " << depthToWorldData[idx].xy.y << " is invalid"; 119 | imgData[idx].xy.x = 0; 120 | imgData[idx].xy.y = 0; 121 | } 122 | 123 | ++idx; 124 | } 125 | } 126 | 127 | return true; 128 | } 129 | 130 | bool Stream::setupTransformationImages() 131 | { 132 | const auto depthDims = glm::ivec2( 133 | this->calibration.depth_camera_calibration.resolution_width, 134 | this->calibration.depth_camera_calibration.resolution_height); 135 | 136 | try 137 | { 138 | this->colorInDepthImg = k4a::image::create(K4A_IMAGE_FORMAT_COLOR_BGRA32, 139 | depthDims.x, depthDims.y, 140 | depthDims.x * 4 * static_cast(sizeof(uint8_t))); 141 | } 142 | catch (const k4a::error& e) 143 | { 144 | ofLogError(__FUNCTION__) << e.what(); 145 | return false; 146 | } 147 | 148 | const auto colorDims = glm::ivec2( 149 | this->calibration.color_camera_calibration.resolution_width, 150 | this->calibration.color_camera_calibration.resolution_height); 151 | 152 | try 153 | { 154 | this->depthInColorImg = k4a::image::create(K4A_IMAGE_FORMAT_DEPTH16, 155 | colorDims.x, colorDims.y, 156 | colorDims.x * static_cast(sizeof(uint16_t))); 157 | } 158 | catch (const k4a::error& e) 159 | { 160 | ofLogError(__FUNCTION__) << e.what(); 161 | return false; 162 | } 163 | 164 | return true; 165 | } 166 | 167 | bool Stream::startStreaming() 168 | { 169 | if (this->bStreaming) return false; 170 | 171 | this->startThread(); 172 | ofAddListener(ofEvents().update, this, &Stream::update); 173 | 174 | this->numSuccessiveFails = 0; 175 | this->bStreaming = true; 176 | 177 | return true; 178 | } 179 | 180 | bool Stream::stopStreaming() 181 | { 182 | if (!this->bStreaming) return false; 183 | 184 | this->stopBodyTracker(); 185 | 186 | std::unique_lock lock(this->mutex); 187 | this->stopThread(); 188 | this->condition.notify_all(); 189 | 190 | ofRemoveListener(ofEvents().update, this, &Stream::update); 191 | 192 | this->numSuccessiveFails = 0; 193 | this->bStreaming = false; 194 | 195 | return true; 196 | } 197 | 198 | bool Stream::startBodyTracker(BodyTrackerSettings trackerSettings) 199 | { 200 | if (trackerSettings.imageType == K4A_CALIBRATION_TYPE_COLOR && !this->bUpdateColor) 201 | { 202 | ofLogWarning(__FUNCTION__) << "Cannot map tracker to color because color stream is disabled! Overriding to depth image."; 203 | trackerSettings.imageType = K4A_CALIBRATION_TYPE_DEPTH; 204 | } 205 | return this->bodyTracker.startTracking(this->calibration, trackerSettings); 206 | } 207 | 208 | bool Stream::stopBodyTracker() 209 | { 210 | return this->bodyTracker.stopTracking(); 211 | } 212 | 213 | void Stream::threadedFunction() 214 | { 215 | while (this->isThreadRunning()) 216 | { 217 | std::unique_lock lock(this->mutex); 218 | 219 | while (this->isThreadRunning() && this->texFrameNum != this->pixFrameNum) 220 | { 221 | this->condition.wait(lock); 222 | } 223 | 224 | if (this->isThreadRunning() && this->updateCapture()) 225 | { 226 | this->updatePixels(); 227 | 228 | this->releaseCapture(); 229 | 230 | this->numSuccessiveFails = 0; 231 | } 232 | else 233 | { 234 | ++this->numSuccessiveFails; 235 | } 236 | } 237 | } 238 | 239 | void Stream::update(ofEventArgs& args) 240 | { 241 | this->bNewFrame = false; 242 | 243 | if (this->texFrameNum != this->pixFrameNum) 244 | { 245 | std::unique_lock lock(this->mutex); 246 | 247 | this->updateTextures(); 248 | 249 | this->condition.notify_all(); 250 | } 251 | } 252 | 253 | void Stream::releaseCapture() 254 | { 255 | this->capture.reset(); 256 | } 257 | 258 | void Stream::updatePixels() 259 | { 260 | // Probe for a depth16 image. 261 | auto depthImg = this->capture.get_depth_image(); 262 | if (depthImg) 263 | { 264 | const auto depthDims = glm::ivec2(depthImg.get_width_pixels(), depthImg.get_height_pixels()); 265 | if (!depthPix.isAllocated()) 266 | { 267 | this->depthPix.allocate(depthDims.x, depthDims.y, 1); 268 | } 269 | 270 | const auto depthData = reinterpret_cast(depthImg.get_buffer()); 271 | this->depthPix.setFromPixels(depthData, depthDims.x, depthDims.y, 1); 272 | 273 | ofLogVerbose(__FUNCTION__) << "Capture Depth16 " << depthDims.x << "x" << depthDims.y << " stride: " << depthImg.get_stride_bytes() << "."; 274 | } 275 | else 276 | { 277 | ofLogWarning(__FUNCTION__) << "No Depth16 capture found (" << ofGetFrameNum() << ")!"; 278 | } 279 | 280 | k4a::image colorImg; 281 | if (this->bUpdateColor) 282 | { 283 | // Probe for a color image. 284 | colorImg = this->capture.get_color_image(); 285 | if (colorImg) 286 | { 287 | const auto colorDims = glm::ivec2(colorImg.get_width_pixels(), colorImg.get_height_pixels()); 288 | if (!colorPix.isAllocated()) 289 | { 290 | this->colorPix.allocate(colorDims.x, colorDims.y, OF_PIXELS_BGRA); 291 | } 292 | 293 | if (this->getColorFormat() == K4A_IMAGE_FORMAT_COLOR_MJPG) 294 | { 295 | const int decompressStatus = tjDecompress2(this->jpegDecompressor, 296 | colorImg.get_buffer(), 297 | static_cast(colorImg.get_size()), 298 | this->colorPix.getData(), 299 | colorDims.x, 300 | 0, // pitch 301 | colorDims.y, 302 | TJPF_BGRA, 303 | TJFLAG_FASTDCT | TJFLAG_FASTUPSAMPLE); 304 | } 305 | else 306 | { 307 | const auto colorData = reinterpret_cast(colorImg.get_buffer()); 308 | this->colorPix.setFromPixels(colorData, colorDims.x, colorDims.y, 4); 309 | } 310 | 311 | ofLogVerbose(__FUNCTION__) << "Capture Color " << colorDims.x << "x" << colorDims.y << " stride: " << colorImg.get_stride_bytes() << "."; 312 | } 313 | else 314 | { 315 | ofLogWarning(__FUNCTION__) << "No Color capture found (" << ofGetFrameNum() << ")!"; 316 | } 317 | } 318 | 319 | k4a::image irImg; 320 | if (this->bUpdateIr) 321 | { 322 | // Probe for a IR16 image. 323 | irImg = this->capture.get_ir_image(); 324 | if (irImg) 325 | { 326 | const auto irDims = glm::ivec2(irImg.get_width_pixels(), irImg.get_height_pixels()); 327 | if (!this->irPix.isAllocated()) 328 | { 329 | this->irPix.allocate(irDims.x, irDims.y, 1); 330 | } 331 | 332 | const auto irData = reinterpret_cast(irImg.get_buffer()); 333 | this->irPix.setFromPixels(irData, irDims.x, irDims.y, 1); 334 | 335 | ofLogVerbose(__FUNCTION__) << "Capture Ir16 " << irDims.x << "x" << irDims.y << " stride: " << irImg.get_stride_bytes() << "."; 336 | } 337 | else 338 | { 339 | ofLogWarning(__FUNCTION__) << "No Ir16 capture found (" << ofGetFrameNum() << ")!"; 340 | } 341 | } 342 | 343 | if (colorImg && this->bUpdateColor && this->getColorFormat() == K4A_IMAGE_FORMAT_COLOR_BGRA32) 344 | { 345 | // TODO: Fix this for non-BGRA formats, maybe always keep a BGRA k4a::image around. 346 | this->updateDepthInColorFrame(depthImg, colorImg); 347 | this->updateColorInDepthFrame(depthImg, colorImg); 348 | } 349 | 350 | if (this->bUpdateVbo) 351 | { 352 | if (this->bUpdateColor && !this->bForceVboToDepthSize) 353 | { 354 | this->updatePointsCache(this->depthInColorImg, this->colorToWorldImg); 355 | } 356 | else 357 | { 358 | this->updatePointsCache(depthImg, this->depthToWorldImg); 359 | } 360 | } 361 | 362 | if (this->bodyTracker.isTracking()) 363 | { 364 | this->bodyTracker.processCapture(this->capture, this->calibration, this->transformation, depthImg); 365 | } 366 | 367 | // Release images. 368 | depthImg.reset(); 369 | colorImg.reset(); 370 | irImg.reset(); 371 | 372 | // Update frame number. 373 | this->pixFrameNum = ofGetFrameNum(); 374 | } 375 | 376 | void Stream::updateTextures() 377 | { 378 | if (this->depthPix.isAllocated()) 379 | { 380 | // Update the depth texture. 381 | if (!this->depthTex.isAllocated()) 382 | { 383 | this->depthTex.allocate(this->depthPix); 384 | this->depthTex.setTextureMinMagFilter(GL_NEAREST, GL_NEAREST); 385 | } 386 | 387 | this->depthTex.loadData(this->depthPix); 388 | ofLogVerbose(__FUNCTION__) << "Update Depth16 " << this->depthTex.getWidth() << "x" << this->depthTex.getHeight() << "."; 389 | } 390 | 391 | if (this->bUpdateColor && this->colorPix.isAllocated()) 392 | { 393 | // Update the color texture. 394 | if (!this->colorTex.isAllocated()) 395 | { 396 | this->colorTex.allocate(this->colorPix); 397 | this->colorTex.setTextureMinMagFilter(GL_NEAREST, GL_NEAREST); 398 | 399 | if (this->getColorFormat() == K4A_IMAGE_FORMAT_COLOR_BGRA32) 400 | { 401 | this->colorTex.bind(); 402 | { 403 | glTexParameteri(this->colorTex.texData.textureTarget, GL_TEXTURE_SWIZZLE_R, GL_BLUE); 404 | glTexParameteri(this->colorTex.texData.textureTarget, GL_TEXTURE_SWIZZLE_B, GL_RED); 405 | } 406 | this->colorTex.unbind(); 407 | } 408 | } 409 | 410 | this->colorTex.loadData(this->colorPix); 411 | ofLogVerbose(__FUNCTION__) << "Update Color " << this->colorTex.getWidth() << "x" << this->colorTex.getHeight() << "."; 412 | } 413 | 414 | if (this->bUpdateIr && this->irPix.isAllocated()) 415 | { 416 | // Update the IR16 image. 417 | if (!this->irTex.isAllocated()) 418 | { 419 | this->irTex.allocate(this->irPix); 420 | this->irTex.setTextureMinMagFilter(GL_NEAREST, GL_NEAREST); 421 | this->irTex.setRGToRGBASwizzles(true); 422 | } 423 | 424 | this->irTex.loadData(this->irPix); 425 | ofLogVerbose(__FUNCTION__) << "Update Ir16 " << this->irTex.getWidth() << "x" << this->irTex.getHeight() << "."; 426 | } 427 | 428 | if (this->bUpdateVbo) 429 | { 430 | this->pointCloudVbo.setVertexData(this->positionCache.data(), this->numPoints, GL_STREAM_DRAW); 431 | this->pointCloudVbo.setTexCoordData(this->uvCache.data(), this->numPoints, GL_STREAM_DRAW); 432 | } 433 | 434 | if (this->bUpdateColor && this->getColorFormat() == K4A_IMAGE_FORMAT_COLOR_BGRA32) 435 | { 436 | if (this->depthInColorPix.isAllocated()) 437 | { 438 | if (!this->depthInColorTex.isAllocated()) 439 | { 440 | this->depthInColorTex.allocate(this->depthInColorPix); 441 | this->depthInColorTex.setTextureMinMagFilter(GL_NEAREST, GL_NEAREST); 442 | } 443 | 444 | this->depthInColorTex.loadData(this->depthInColorPix); 445 | } 446 | 447 | if (this->colorInDepthPix.isAllocated()) 448 | { 449 | if (!this->colorInDepthTex.isAllocated()) 450 | { 451 | this->colorInDepthTex.allocate(this->colorInDepthPix); 452 | this->colorInDepthTex.setTextureMinMagFilter(GL_NEAREST, GL_NEAREST); 453 | this->colorInDepthTex.bind(); 454 | { 455 | glTexParameteri(this->colorInDepthTex.texData.textureTarget, GL_TEXTURE_SWIZZLE_R, GL_BLUE); 456 | glTexParameteri(this->colorInDepthTex.texData.textureTarget, GL_TEXTURE_SWIZZLE_B, GL_RED); 457 | } 458 | this->colorInDepthTex.unbind(); 459 | } 460 | 461 | this->colorInDepthTex.loadData(this->colorInDepthPix); 462 | } 463 | } 464 | 465 | if (this->bodyTracker.isTracking()) 466 | { 467 | this->bodyTracker.updateTextures(); 468 | } 469 | 470 | // Update frame number. 471 | this->texFrameNum = this->pixFrameNum; 472 | this->bNewFrame = true; 473 | } 474 | 475 | bool Stream::updatePointsCache(k4a::image& frameImg, k4a::image& tableImg) 476 | { 477 | const auto frameDims = glm::ivec2(frameImg.get_width_pixels(), frameImg.get_height_pixels()); 478 | const auto tableDims = glm::ivec2(tableImg.get_width_pixels(), tableImg.get_height_pixels()); 479 | if (frameDims != tableDims) 480 | { 481 | ofLogError(__FUNCTION__) << "Image dims mismatch! " << frameDims << " vs " << tableDims; 482 | return false; 483 | } 484 | 485 | const auto frameData = reinterpret_cast(frameImg.get_buffer()); 486 | const auto tableData = reinterpret_cast(tableImg.get_buffer()); 487 | 488 | this->positionCache.resize(frameDims.x * frameDims.y); 489 | this->uvCache.resize(frameDims.x * frameDims.y); 490 | 491 | int count = 0; 492 | for (int y = 0; y < frameDims.y; ++y) 493 | { 494 | for (int x = 0; x < frameDims.x; ++x) 495 | { 496 | int idx = y * frameDims.x + x; 497 | if (frameData[idx] != 0 && 498 | tableData[idx].xy.x != 0 && tableData[idx].xy.y != 0) 499 | { 500 | float depthVal = static_cast(frameData[idx]); 501 | this->positionCache[count] = glm::vec3( 502 | tableData[idx].xy.x * depthVal, 503 | tableData[idx].xy.y * depthVal, 504 | depthVal 505 | ); 506 | 507 | this->uvCache[count] = glm::vec2(x, y); 508 | 509 | ++count; 510 | } 511 | } 512 | } 513 | 514 | this->numPoints = count; 515 | 516 | return true; 517 | } 518 | 519 | bool Stream::updateDepthInColorFrame(const k4a::image& depthImg, const k4a::image& colorImg) 520 | { 521 | try 522 | { 523 | this->transformation.depth_image_to_color_camera(depthImg, &this->depthInColorImg); 524 | } 525 | catch (const k4a::error& e) 526 | { 527 | ofLogError(__FUNCTION__) << e.what(); 528 | return false; 529 | } 530 | 531 | const auto depthInColorData = reinterpret_cast(this->depthInColorImg.get_buffer()); 532 | 533 | if (!this->depthInColorPix.isAllocated()) 534 | { 535 | this->depthInColorPix.allocate(this->depthInColorImg.get_width_pixels(), this->depthInColorImg.get_height_pixels(), 1); 536 | } 537 | 538 | this->depthInColorPix.setFromPixels(depthInColorData, this->depthInColorImg.get_width_pixels(), this->depthInColorImg.get_height_pixels(), 1); 539 | 540 | ofLogVerbose(__FUNCTION__) << "Depth in Color " << this->depthInColorImg.get_width_pixels() << "x" << this->depthInColorImg.get_height_pixels() << " stride: " << this->depthInColorImg.get_stride_bytes() << "."; 541 | 542 | return true; 543 | } 544 | 545 | bool Stream::updateColorInDepthFrame(const k4a::image& depthImg, const k4a::image& colorImg) 546 | { 547 | try 548 | { 549 | this->transformation.color_image_to_depth_camera(depthImg, colorImg, &this->colorInDepthImg); 550 | } 551 | catch (const k4a::error& e) 552 | { 553 | ofLogError(__FUNCTION__) << e.what(); 554 | return false; 555 | } 556 | 557 | const auto colorInDepthData = reinterpret_cast(this->colorInDepthImg.get_buffer()); 558 | 559 | if (!this->colorInDepthPix.isAllocated()) 560 | { 561 | this->colorInDepthPix.allocate(this->colorInDepthImg.get_width_pixels(), this->colorInDepthImg.get_height_pixels(), OF_PIXELS_BGRA); 562 | } 563 | 564 | this->colorInDepthPix.setFromPixels(colorInDepthData, this->colorInDepthImg.get_width_pixels(), this->colorInDepthImg.get_height_pixels(), 4); 565 | 566 | ofLogVerbose(__FUNCTION__) << "Color in Depth " << this->colorInDepthImg.get_width_pixels() << "x" << this->colorInDepthImg.get_height_pixels() << " stride: " << this->colorInDepthImg.get_stride_bytes() << "."; 567 | 568 | return true; 569 | } 570 | 571 | bool Stream::isOpen() const 572 | { 573 | return this->bOpen; 574 | } 575 | 576 | bool Stream::isStreaming() const 577 | { 578 | return this->bStreaming; 579 | } 580 | 581 | bool Stream::isFrameNew() const 582 | { 583 | return this->bNewFrame; 584 | } 585 | 586 | const std::string& Stream::getSerialNumber() const 587 | { 588 | return this->serialNumber; 589 | } 590 | 591 | uint32_t Stream::getFramerate() const 592 | { 593 | switch (this->getCameraFps()) 594 | { 595 | case FramesPerSecond::K4A_FRAMES_PER_SECOND_5: 596 | return 5; 597 | 598 | case FramesPerSecond::K4A_FRAMES_PER_SECOND_15: 599 | return 15; 600 | 601 | case FramesPerSecond::K4A_FRAMES_PER_SECOND_30: 602 | default: 603 | return 30; 604 | } 605 | } 606 | 607 | const k4a::calibration& Stream::getCalibration() const 608 | { 609 | return this->calibration; 610 | } 611 | 612 | const ofShortPixels& Stream::getDepthPix() const 613 | { 614 | return this->depthPix; 615 | } 616 | 617 | const ofTexture& Stream::getDepthTex() const 618 | { 619 | return this->depthTex; 620 | } 621 | 622 | const ofPixels& Stream::getColorPix() const 623 | { 624 | return this->colorPix; 625 | } 626 | 627 | const ofTexture& Stream::getColorTex() const 628 | { 629 | return this->colorTex; 630 | } 631 | 632 | const ofShortPixels& Stream::getIrPix() const 633 | { 634 | return this->irPix; 635 | } 636 | 637 | const ofTexture& Stream::getIrTex() const 638 | { 639 | return this->irTex; 640 | } 641 | 642 | const ofFloatPixels& Stream::getDepthToWorldPix() const 643 | { 644 | return this->depthToWorldPix; 645 | } 646 | 647 | const ofTexture& Stream::getDepthToWorldTex() const 648 | { 649 | return this->depthToWorldTex; 650 | } 651 | 652 | const ofFloatPixels& Stream::getColorToWorldPix() const 653 | { 654 | return this->colorToWorldPix; 655 | } 656 | 657 | const ofTexture& Stream::getColorToWorldTex() const 658 | { 659 | return this->colorToWorldTex; 660 | } 661 | 662 | const ofShortPixels& Stream::getDepthInColorPix() const 663 | { 664 | return this->depthInColorPix; 665 | } 666 | 667 | const ofTexture& Stream::getDepthInColorTex() const 668 | { 669 | return this->depthInColorTex; 670 | } 671 | 672 | const ofPixels& Stream::getColorInDepthPix() const 673 | { 674 | return this->colorInDepthPix; 675 | } 676 | 677 | const ofTexture& Stream::getColorInDepthTex() const 678 | { 679 | return this->colorInDepthTex; 680 | } 681 | 682 | const ofVbo& Stream::getPointCloudVbo() const 683 | { 684 | return this->pointCloudVbo; 685 | } 686 | 687 | const BodyTracker& Stream::getBodyTracker() const 688 | { 689 | return this->bodyTracker; 690 | } 691 | 692 | BodyTracker& Stream::getBodyTracker() 693 | { 694 | return this->bodyTracker; 695 | } 696 | 697 | const ofPixels& Stream::getBodyIndexPix() const 698 | { 699 | return this->bodyTracker.getBodyIndexPix(); 700 | } 701 | 702 | const ofTexture& Stream::getBodyIndexTex() const 703 | { 704 | return this->bodyTracker.getBodyIndexTex(); 705 | } 706 | 707 | size_t Stream::getNumBodies() const 708 | { 709 | return this->bodyTracker.getNumBodies(); 710 | } 711 | 712 | const std::vector& Stream::getBodySkeletons() const 713 | { 714 | return this->bodyTracker.getBodySkeletons(); 715 | } 716 | 717 | size_t Stream::getNumSuccessiveFails() const 718 | { 719 | return this->numSuccessiveFails; 720 | } 721 | } 722 | -------------------------------------------------------------------------------- /src/ofxAzureKinect/Stream.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | 9 | #include "ofEvents.h" 10 | #include "ofPixels.h" 11 | #include "ofTexture.h" 12 | #include "ofThread.h" 13 | #include "ofVbo.h" 14 | #include "ofVectorMath.h" 15 | 16 | #include "BodyTracker.h" 17 | #include "Types.h" 18 | 19 | namespace ofxAzureKinect 20 | { 21 | class Stream 22 | : public ofThread 23 | { 24 | public: 25 | Stream(); 26 | virtual ~Stream(); 27 | 28 | virtual bool startBodyTracker(BodyTrackerSettings trackerSettings = BodyTrackerSettings()); 29 | virtual bool stopBodyTracker(); 30 | 31 | bool isOpen() const; 32 | bool isStreaming() const; 33 | bool isFrameNew() const; 34 | 35 | const std::string& getSerialNumber() const; 36 | 37 | virtual DepthMode getDepthMode() const = 0; 38 | virtual ImageFormat getColorFormat() const = 0; 39 | virtual ColorResolution getColorResolution() const = 0; 40 | virtual FramesPerSecond getCameraFps() const = 0; 41 | virtual uint32_t getFramerate() const; 42 | 43 | virtual WiredSyncMode getWiredSyncMode() const = 0; 44 | virtual uint32_t getDepthDelayUsec() const = 0; 45 | virtual uint32_t getSubordinateDelayUsec() const = 0; 46 | 47 | const k4a::calibration& getCalibration() const; 48 | 49 | const ofShortPixels& getDepthPix() const; 50 | const ofTexture& getDepthTex() const; 51 | 52 | const ofPixels& getColorPix() const; 53 | const ofTexture& getColorTex() const; 54 | 55 | const ofShortPixels& getIrPix() const; 56 | const ofTexture& getIrTex() const; 57 | 58 | const ofFloatPixels& getDepthToWorldPix() const; 59 | const ofTexture& getDepthToWorldTex() const; 60 | 61 | const ofFloatPixels& getColorToWorldPix() const; 62 | const ofTexture& getColorToWorldTex() const; 63 | 64 | const ofShortPixels& getDepthInColorPix() const; 65 | const ofTexture& getDepthInColorTex() const; 66 | 67 | const ofPixels& getColorInDepthPix() const; 68 | const ofTexture& getColorInDepthTex() const; 69 | 70 | const ofVbo& getPointCloudVbo() const; 71 | 72 | const BodyTracker& getBodyTracker() const; 73 | BodyTracker& getBodyTracker(); 74 | 75 | const ofPixels& getBodyIndexPix() const; 76 | const ofTexture& getBodyIndexTex() const; 77 | 78 | size_t getNumBodies() const; 79 | const std::vector& getBodySkeletons() const; 80 | 81 | size_t getNumSuccessiveFails() const; 82 | 83 | protected: 84 | virtual bool setupDepthToWorldTable(); 85 | virtual bool setupColorToWorldTable(); 86 | virtual bool setupImageToWorldTable(k4a_calibration_type_t type, k4a::image& img); 87 | 88 | virtual bool setupTransformationImages(); 89 | 90 | virtual bool startStreaming(); 91 | virtual bool stopStreaming(); 92 | 93 | virtual void threadedFunction() override; 94 | 95 | virtual void update(ofEventArgs& args); 96 | 97 | virtual bool updateCapture() = 0; 98 | virtual void releaseCapture(); 99 | 100 | virtual void updatePixels(); 101 | virtual void updateTextures(); 102 | 103 | virtual bool updatePointsCache(k4a::image& frameImg, k4a::image& tableImg); 104 | 105 | virtual bool updateDepthInColorFrame(const k4a::image& depthImg, const k4a::image& colorImg); 106 | virtual bool updateColorInDepthFrame(const k4a::image& depthImg, const k4a::image& colorImg); 107 | 108 | protected: 109 | bool bOpen; 110 | bool bStreaming; 111 | bool bNewFrame; 112 | 113 | bool bUpdateColor; 114 | bool bUpdateIr; 115 | bool bUpdateWorld; 116 | bool bUpdateVbo; 117 | bool bForceVboToDepthSize; 118 | 119 | std::condition_variable condition; 120 | uint64_t pixFrameNum; 121 | uint64_t texFrameNum; 122 | 123 | std::string serialNumber; 124 | 125 | k4a::calibration calibration; 126 | k4a::transformation transformation; 127 | k4a::capture capture; 128 | 129 | tjhandle jpegDecompressor; 130 | 131 | BodyTracker bodyTracker; 132 | 133 | size_t numSuccessiveFails; 134 | 135 | ofShortPixels depthPix; 136 | ofTexture depthTex; 137 | 138 | ofPixels colorPix; 139 | ofTexture colorTex; 140 | 141 | ofShortPixels irPix; 142 | ofTexture irTex; 143 | 144 | k4a::image depthToWorldImg; 145 | ofFloatPixels depthToWorldPix; 146 | ofTexture depthToWorldTex; 147 | 148 | k4a::image colorToWorldImg; 149 | ofFloatPixels colorToWorldPix; 150 | ofTexture colorToWorldTex; 151 | 152 | k4a::image depthInColorImg; 153 | ofShortPixels depthInColorPix; 154 | ofTexture depthInColorTex; 155 | 156 | k4a::image colorInDepthImg; 157 | ofPixels colorInDepthPix; 158 | ofTexture colorInDepthTex; 159 | 160 | std::vector positionCache; 161 | std::vector uvCache; 162 | size_t numPoints; 163 | ofVbo pointCloudVbo; 164 | }; 165 | } 166 | -------------------------------------------------------------------------------- /src/ofxAzureKinect/Types.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include "ofVectorMath.h" 6 | 7 | inline const glm::vec2 & toGlm(const k4a_float2_t & v) 8 | { 9 | return *reinterpret_cast(&v); 10 | } 11 | 12 | inline const glm::vec3 & toGlm(const k4a_float3_t & v) 13 | { 14 | return *reinterpret_cast(&v); 15 | } 16 | 17 | inline const glm::quat toGlm(const k4a_quaternion_t & q) 18 | { 19 | return glm::quat(q.v[0], q.v[1], q.v[2], q.v[3]); 20 | } 21 | 22 | inline const k4a_float2_t& toK4A(const glm::vec2& v) 23 | { 24 | return *reinterpret_cast(&v); 25 | } 26 | 27 | inline const k4a_float3_t& toK4A(const glm::vec3& v) 28 | { 29 | return *reinterpret_cast(&v); 30 | } 31 | 32 | inline const k4a_quaternion_t toK4A(const glm::quat& q) 33 | { 34 | k4a_quaternion_t quat; 35 | quat.v[0] = q[3]; 36 | quat.v[1] = q[0]; 37 | quat.v[2] = q[1]; 38 | quat.v[3] = q[2]; 39 | return quat; 40 | } 41 | 42 | namespace ofxAzureKinect 43 | { 44 | typedef k4a_depth_mode_t DepthMode; 45 | typedef k4a_color_resolution_t ColorResolution; 46 | typedef k4a_image_format_t ImageFormat; 47 | typedef k4a_fps_t FramesPerSecond; 48 | typedef k4a_wired_sync_mode_t WiredSyncMode; 49 | 50 | typedef k4abt_sensor_orientation_t SensorOrientation; 51 | typedef k4abt_tracker_processing_mode_t ProcessingMode; 52 | typedef k4abt_joint_confidence_level_t ConfidenceLevel; 53 | } 54 | --------------------------------------------------------------------------------