├── CostFunPreCalib.m ├── EstimateIntrins.m ├── FinalCalibration.m ├── Kin2 ├── DemoAll.m ├── Mex │ ├── Kin2.h │ ├── Kin2.m │ ├── Kin2_base.cpp │ ├── Kin2_face.cpp │ ├── Kin2_fusion.cpp │ ├── Kin2_mapping.cpp │ ├── Kin2_mex.cpp │ ├── Kin2_mex.mexw64 │ ├── calibCostFun.m │ └── class_handle.hpp ├── README.txt ├── bodyDemo.m ├── bodyIndexDemo.m ├── calibrationDemo.m ├── compile_cpp_files.m ├── faceDemo.m ├── faceHDDemo.m ├── faceHDDemo2.m ├── image.png ├── kinectFusionDemo.m ├── mapping2CamDemo.m ├── mappingDemo.m ├── pointCloudDemo.m ├── pointCloudDemo2.m └── videoDemo.m ├── PreCalib.m ├── README.md ├── S05_calcReprojectionError.m ├── S05_costFunVec.m ├── Step02_PreCalibration.m ├── Step03_Find3DMatches.m ├── Step03_Matching.m ├── Step04_IntrinsicParametersEstimation.m ├── Step05_FinalCalibration.m ├── TCPIPCommands.mat ├── TCPIPbroadcastCommand.m ├── TCPIPgetResponses.m ├── TCPIPsendCommand.m ├── dataAcq.fig ├── dataAcq.m ├── findPointAfromInfrared.m ├── getCalibDataFromClient.m ├── initialization.fig ├── initialization.m ├── knnsearch.m ├── main.fig ├── main.m ├── matching3DNN.m ├── serverGetData.m ├── setup1.mat └── trackCalibPoints.m /CostFunPreCalib.m: -------------------------------------------------------------------------------- 1 | % Function: 2 | % CostFunPreCalib 3 | % 4 | % Description: 5 | % Function that we wish to minimize using proj02_PreCalib 6 | % 7 | % Dependencies: 8 | % File: calibParameters.mat where we load the variables load dataDir and pointsToConsider 9 | % File: variablesForCostFunPreCalib.mat with the variables camNum, Xw1, Xw2 10 | % 11 | % Inputs: 12 | % 1) x0: parameters that we wish to find by minimizing the function f 13 | % 14 | % Usage: 15 | % This function is called by the proj02_PreCalib script inside the optimization 16 | % function fsolve 17 | % 18 | % Results: 19 | % find the values of x0 that minimize f 20 | % 21 | % Authors: 22 | % Diana M. Cordova 23 | % Juan R. Terven 24 | % Date: 16-Jan-2016 25 | % 26 | function f= CostFunPreCalib(x0) 27 | 28 | persistent Xw1s; 29 | persistent Xw2s; 30 | 31 | % Euler angles to Rotation matrix 32 | R = eul2r(x0(1),x0(2),x0(3)); 33 | t = [x0(4);x0(5);x0(6)]; 34 | 35 | 36 | % Load the data 37 | if isempty(Xw1s) 38 | 39 | % Load dataDir and pointsToConsider 40 | load('calibParameters.mat'); 41 | 42 | % Load camNum, Xw1, Xw2 43 | load([dataDir '/variablesForCostFunPreCalib.mat']); 44 | 45 | Xw1s = Xw1'; 46 | Xw2s = Xw2'; 47 | 48 | if pointsToConsider ~= -1 49 | Xw1s = Xw1s(:,1:pointsToConsider); 50 | Xw2s = Xw2s(:,1:pointsToConsider); 51 | end 52 | end 53 | 54 | pos_puntos_ref=[Xw1s(1,:)' Xw1s(2,:)' Xw1s(3,:)']; 55 | pos_puntos_cam=[Xw2s(1,:)' Xw2s(2,:)' Xw2s(3,:)']; 56 | 57 | f = []; 58 | for j=1:size(pos_puntos_ref,1) 59 | 60 | vec=pos_puntos_ref(j,:); 61 | comp=pos_puntos_cam(j,:); 62 | 63 | p_esp=(R*vec')+t; 64 | 65 | fun = comp - p_esp'; 66 | 67 | f=[fun'; f]; 68 | end 69 | 70 | e=mean(abs(f)) -------------------------------------------------------------------------------- /EstimateIntrins.m: -------------------------------------------------------------------------------- 1 | function intrinsics = EstimateIntrins(dataAcqFile,matchingResultsFile,preCalibResultsFile,camNum,cameraType) 2 | 3 | if camNum == 1 4 | load(dataAcqFile) 5 | else 6 | % Load the point clouds: cam2_1Matches, cam3_1Matches, etc 7 | load(matchingResultsFile); 8 | load(preCalibResultsFile); 9 | end 10 | 11 | if camNum == 1 12 | p3d = cam1.pointcloud'; 13 | R = eye(3); 14 | t = [0 0 0]; 15 | 16 | % 2D points as a 2 x n matrix 17 | if strcmp(cameraType, 'depth') 18 | p2d = cam1.depthProj'; 19 | elseif strcmp(cameraType, 'color') 20 | p2d = cam1.colorProj'; 21 | end 22 | elseif camNum == 2 23 | p3d = cam2_1Matches; 24 | R = R1_2; 25 | t = t1_2; 26 | 27 | if strcmp(cameraType, 'depth') 28 | p2d = cam2_1depthProj; 29 | elseif strcmp(cameraType, 'color') 30 | p2d = cam2_1colorProj; 31 | end 32 | elseif camNum == 3 33 | p3d = cam3_1Matches; 34 | R = R1_3; 35 | t = t1_3; 36 | 37 | if strcmp(cameraType, 'depth') 38 | p2d = cam3_1depthProj; 39 | elseif strcmp(cameraType, 'color') 40 | p2d = cam3_1colorProj; 41 | end 42 | elseif camNum == 4 43 | p3d = cam4_1Matches; 44 | R = R1_4; 45 | t = t1_4; 46 | 47 | if strcmp(cameraType, 'depth') 48 | p2d = cam4_1depthProj; 49 | elseif strcmp(cameraType, 'color') 50 | p2d = cam4_1colorProj; 51 | end 52 | end 53 | 54 | 55 | rows = size(p2d,1); 56 | A = zeros(1,2); 57 | w11=R(1,1); 58 | w12=R(1,2); 59 | w13=R(1,3); 60 | w21=R(2,1); 61 | w22=R(2,2); 62 | w23=R(2,3); 63 | w31=R(3,1); 64 | w32=R(3,2); 65 | w33=R(3,3); 66 | Tx=t(1); 67 | Ty=t(2); 68 | Tz=t(3); 69 | 70 | ht=[]; 71 | At=[]; 72 | for i=1:rows 73 | ui = p3d(i,1); 74 | vi = p3d(i,2); 75 | wi = p3d(i,3); 76 | 77 | Xi = p2d(i,1); 78 | Yi = p2d(i,2); 79 | 80 | A=[(w11*ui+w12*vi+w13*wi+Tx)/(w31*ui+w32*vi+w33*wi+Tz) (w21*ui+w22*vi+w23*wi+Tx)/(w31*ui+w32*vi+w33*wi+Tz) 1 0 0;... 81 | 0 0 0 (w21*ui+w22*vi+w23*wi+Ty)/(w31*ui+w32*vi+w33*wi+Tz) 1]; 82 | 83 | At=[At; A]; 84 | 85 | 86 | end 87 | 88 | x=[p2d(:,1);p2d(:,2)]; 89 | 90 | h = At\double(x); 91 | 92 | intrinsics = [h(1) h(2) h(3); ... 93 | 0 h(4) h(5); 94 | 0 0 1]; 95 | -------------------------------------------------------------------------------- /FinalCalibration.m: -------------------------------------------------------------------------------- 1 | % Script: 2 | % proj05_FinalCalibration 3 | % 4 | % Description: 5 | % Perform calibration of a Kinect camera (depth or color) given pairs of 6 | % 3D points and 2D projections. 7 | % 8 | % Dependencies: 9 | % function proj05_costFunVec: this function is the one we wish to minimize 10 | % function tr2eul: converts from rotation matriz to Euler angles. 11 | % calibParameters.mat: file with variables defined in proj0_Multi_Kinect_Calibration.m 12 | % such as distortRad, distortTan, withSkew. 13 | % 14 | % Inputs: 15 | % - preCalibResults: file with initial estimation of rotation and 16 | % translation for each camera. This file is generated in proj0_Multi_Kinect_Calibration.m 17 | % - initIntrinsics: file with initial estimation of intrinsic parameters for each 18 | % camera. This file is generated in proj0_Multi_Kinect_Calibration.m 19 | % - camNum: Number of camera to calibrate. 20 | % - camType: Type of camera to calibrate. 'depth' or 'color' 21 | % 22 | % 23 | % Usage: 24 | % This function is called on the step 5: Final Joint Calibration of the 25 | % calibration process. 26 | % 27 | % Results: 28 | % Intrinsic and extrinsic parameters, radDist (radial distortion) and 29 | % tanDist (tangential distortion). 30 | % 31 | % Authors: 32 | % Diana M. Cordova 33 | % Juan R. Terven 34 | % Date: 16-Jan-2016 35 | 36 | function [outData1 outData2] = FinalCalibration( ... 37 | preCalibResults, initIntrinsics, camNum, camType) 38 | 39 | clear proj05_costFunVec3; % clear persistent variables of the cost function 40 | load('calibParameters.mat'); % Load the variables: distortRad, distortTan, withSkew 41 | load(preCalibResults); % Load the estimated extrinsics 42 | load(initIntrinsics); % Load the initial intrinsics 43 | 44 | % Generates a file with variables for the cost function 45 | save([dataDir '/variablesForCostFun.mat'],'camNum','camType'); 46 | 47 | cd(fileparts(mfilename('fullpath'))); 48 | 49 | 50 | % Get intrinsic initial values from initIntrinsics file 51 | if strcmp(camType, 'depth') 52 | if camNum == 1 53 | f = preIntrinsicsD1(1,1); 54 | cx = preIntrinsicsD1(1,3); 55 | cy = preIntrinsicsD1(2,3); 56 | skew = preIntrinsicsD1(1,2); 57 | elseif camNum == 2 58 | f = preIntrinsicsD2(1,1); 59 | cx = preIntrinsicsD2(1,3); 60 | cy = preIntrinsicsD2(2,3); 61 | skew = preIntrinsicsD2(1,2); 62 | elseif camNum == 3 63 | f = preIntrinsicsD3(1,1); 64 | cx = preIntrinsicsD3(1,3); 65 | cy = preIntrinsicsD3(2,3); 66 | skew = preIntrinsicsD3(1,2); 67 | elseif camNum == 4 68 | f = preIntrinsicsD4(1,1); 69 | cx = preIntrinsicsD4(1,3); 70 | cy = preIntrinsicsD4(2,3); 71 | skew = preIntrinsicsD4(1,2); 72 | end 73 | elseif strcmp(camType, 'color') 74 | if camNum == 1 75 | f = preIntrinsicsC1(1,1); 76 | cx = preIntrinsicsC1(1,3); 77 | cy = preIntrinsicsC1(2,3); 78 | skew = preIntrinsicsC1(1,2); 79 | elseif camNum == 2 80 | f = preIntrinsicsC2(1,1); 81 | cx = preIntrinsicsC2(1,3); 82 | cy = preIntrinsicsC2(2,3); 83 | skew = preIntrinsicsC2(1,2); 84 | elseif camNum == 3 85 | f = preIntrinsicsC3(1,1); 86 | cx = preIntrinsicsC3(1,3); 87 | cy = preIntrinsicsC3(2,3); 88 | skew = preIntrinsicsC3(1,2); 89 | elseif camNum == 4 90 | f = preIntrinsicsC4(1,1); 91 | cx = preIntrinsicsC4(1,3); 92 | cy = preIntrinsicsC4(2,3); 93 | skew = preIntrinsicsC4(1,2); 94 | end 95 | end 96 | 97 | % Get extrinsics from pre-calibration 98 | if camNum == 1 99 | R = eye(3); 100 | %Rq = qGetQ( R ); 101 | %Rq = qNormalize(Rq); 102 | Rq = rotm2quat(R); 103 | t = [0 0 0]; 104 | elseif camNum == 2 105 | %Rq = qGetQ( R1_2 ); 106 | %Rq = qNormalize(Rq); 107 | Rq = rotm2quat(R1_2); 108 | t = t1_2; 109 | elseif camNum == 3 110 | %Rq = qGetQ( R1_3 ); 111 | %Rq = qNormalize(Rq); 112 | Rq = rotm2quat(R1_3); 113 | t = t1_3; 114 | elseif camNum == 4 115 | %Rq = qGetQ( R1_4 ); 116 | %Rq = qNormalize(Rq); 117 | Rq = rotm2quat(R1_4); 118 | t = t1_4; 119 | end 120 | 121 | % Build the variables vector to be solved 122 | x0 = [f, cx, cy, ... 123 | Rq(1), Rq(2), Rq(3), Rq(4), ... 124 | t(1), t(2), t(3)]; 125 | 126 | 127 | if distortRad > 0 128 | if distortRad == 2 129 | x0 = [x0 0 0]; 130 | elseif distortRad == 3 131 | x0 = [x0 0 0 0]; 132 | end 133 | 134 | if distortTan 135 | x0 = [x0 0 0]; 136 | end 137 | end 138 | if withSkew 139 | x0 = [x0 skew]; 140 | end 141 | 142 | % Non-linear optimization 143 | options = optimset('Algorithm','levenberg-marquardt','MaxFunEvals',100000, ... 144 | 'TolFun',1e-100,'TolX',1e-100,'MaxIter', 10000); 145 | x = fsolve('proj05_costFunVec3',x0,options); 146 | 147 | % Extract results from results vector 148 | f = x(1); % focal length 149 | cx = x(2); % principal point x 150 | cy = x(3); % principal point y 151 | 152 | if withSkew 153 | s = x(end); % skew 154 | else 155 | s = 0; 156 | end 157 | 158 | % Get rotation from vector 159 | Rq = [x(4) x(5) x(6) x(7)]; 160 | 161 | % Convert to rotation matrix 162 | % Rx=[1 0 0;0 cos(Reul(1)) sin(Reul(1));0 -sin(Reul(1)) cos(Reul(1))]; 163 | % Ry=[cos(Reul(2)) 0 -sin(Reul(2));0 1 0;sin(Reul(2)) 0 cos(Reul(2))]; 164 | % Rz=[cos(Reul(3)) sin(Reul(3)) 0;-sin(Reul(3)) cos(Reul(3)) 0;0 0 1]; 165 | % R = Rx*Ry*Rz; 166 | %R = eul2r(Reul(1),Reul(2),Reul(3)); 167 | %Rq = qNormalize(Rq); 168 | % Get rotation matrix from quaternion 169 | %R = qGetR(Rq); 170 | R = quat2rotm(Rq); 171 | 172 | % Extract translation 173 | t = [x(8); x(9); x(10)]; 174 | 175 | % Extract radial and tangential distortion coefficients 176 | if distortRad == 2 177 | k1 = x(11); 178 | k2 = x(12); 179 | if distortTan 180 | p1 = x(13); 181 | p2 = x(14); 182 | end 183 | elseif distortRad == 3 184 | k1 = x(11); 185 | k2 = x(12); 186 | k3 = x(13); 187 | if distortTan 188 | p1 = x(14); 189 | p2 = x(15); 190 | end 191 | end 192 | 193 | % Display results 194 | disp(['***** ' camType ' Camera ' num2str(camNum) ' Calibration Results *****']); 195 | 196 | intrinsics = [f s cx; 197 | 0 f cy; 198 | 0 0 1]; 199 | disp('Intrinsic parameters matrix:'); 200 | disp(intrinsics); 201 | 202 | % Rotation 203 | disp('R='); 204 | disp(R); 205 | 206 | % Test if the rotation is valid 207 | detR = det(R); 208 | if detR ~= 1 209 | disp('Invalid rotation matrix!') 210 | disp(['Determinant = ' num2str(detR)]); 211 | end 212 | 213 | % Translation 214 | disp('t='); 215 | disp(t); 216 | 217 | % Distortion 218 | radDist = []; 219 | tanDist = []; 220 | if distortRad > 0 221 | if distortRad == 2 222 | disp('Radial Distortion: k1, k2') 223 | radDist = [k1 k2]; 224 | disp(radDist); 225 | 226 | elseif distortRad == 3 227 | disp('Radial Distortion: k1, k2, k3') 228 | radDist = [k1 k2 k3]; 229 | disp(radDist); 230 | end 231 | 232 | if distortTan 233 | disp('Tangential Distortion: p1, p2') 234 | tanDist = [p1 p2]; 235 | disp(tanDist); 236 | end 237 | end 238 | 239 | tanStr = '0'; 240 | if distortTan 241 | tanStr = '2'; 242 | else 243 | tanStr = '0'; 244 | end 245 | 246 | % Calculate reproyection error with the pointcloud and point-cloud projections 247 | result = struct('CamType',camType,'CamNum',camNum,'MatchingDist',minDist3D, ... 248 | 'Intrinsics',intrinsics,'Rot',R,'t',t,'RadDist',radDist, ... 249 | 'TanDist',tanDist); 250 | 251 | repError = proj05_calcReprojectionError2(result); 252 | disp(['Error: ' num2str(repError)]); 253 | 254 | 255 | % Build the name of the struct 256 | name = ['cam' num2str(camNum) '_' camType '_' num2str(minDist3D) 'mm_' ... 257 | 'rad' num2str(distortRad) '_tan' tanStr]; 258 | 259 | % Build the struct with the data 260 | results = struct('CamType',camType,'MatchingDist',minDist3D, ... 261 | 'Intrinsics',intrinsics,'Rot',Rq,'t',t,'RadDist',radDist, ... 262 | 'TanDist',tanDist,'Error',repError); 263 | 264 | outData1.(name) = results; 265 | outData2 = results; 266 | 267 | -------------------------------------------------------------------------------- /Kin2/DemoAll.m: -------------------------------------------------------------------------------- 1 | % DEMOALL Illustrates how to use most of the Kin2 capabilities 2 | % 3 | % Note: You must add to the windows path the bin directory containing the 4 | % Kinect20.Face.dll. 5 | % For example: C:\Program Files\Microsoft SDKs\Kinect\v2.0_1409\bin 6 | % 7 | % Juan R. Terven, jrterven@hotmail.com 8 | % Diana M. Cordova, diana_mce@hotmail.com 9 | % 10 | % Citation: 11 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 12 | % Computer Programming. 13 | % https://github.com/jrterven/Kin2, 2016. 14 | 15 | % Specify the data sources 16 | k2=Kin2('color','depth','infrared','body_index','body', ... 17 | 'face','HDface'); 18 | 19 | depth = zeros(424,512,'uint16'); 20 | color = zeros(1080,1920,3,'uint8'); 21 | % Matlab figures: depth, color, infrared, body index, volume 22 | d.h=figure; d.ax=axes; d.im=imshow(depth,[0 4000]); 23 | c.h=figure; c.ax=axes; c.im=imshow(color,[]); 24 | infra.h = figure; infra.ax = axes; infra.im = imshow(depth); 25 | bi.h = figure; bi.ax = axes; bi.im = imshow(depth); 26 | pc.h = figure; pc.ax = axes; 27 | v.h = figure; v.ax = axes; v.im = imshow(depth); 28 | 29 | % Initialize Kinect Fusion engine with a reconstruction 30 | % of 1.5 wide and a resolution of 4mm/voxel 31 | k2.KF_init(256,384,384,384,true); 32 | 33 | for i=1:200 34 | % Fetch data from Kinect and save them in k2 object 35 | validData = k2.updateData; 36 | 37 | if validData % valid frame? 38 | % Get frames 39 | depth = k2.getDepth; 40 | color = k2.getColor; 41 | infrared = k2.getInfrared; 42 | bodyIndex = k2.getBodyIndex; 43 | [pcd, pcCol] = k2.getPointCloud('color','true'); 44 | pcCol = double(pcCol)/255.0; 45 | 46 | % Display frames 47 | imshow(depth,'Parent', d.ax) 48 | imshow(color,'Parent', c.ax) 49 | imshow(infrared,'Parent', infra.ax) 50 | imshow(bodyIndex,'Parent', bi.ax) 51 | scatter3(pc.ax,pcd(:,1),pcd(:,2),pcd(:,3),6,pcCol,'Marker','.'); 52 | 53 | % Get bodies data and display them 54 | bodies = k2.getBodies('Quat'); 55 | k2.drawBodies(d.ax,bodies,'depth',5,3,15); 56 | k2.drawBodies(c.ax,bodies,'color',10,6,30); 57 | 58 | % Get faces data and draw them on the color frame 59 | faces = k2.getFaces; 60 | k2.drawFaces(c.ax,faces,5,true,20); 61 | 62 | % Get HD faces data and draw them 63 | facesHD = k2.getHDFaces; 64 | k2.drawHDFaces(c.ax,faces,true,true,20); 65 | 66 | % Update 3D reconstruction and display it 67 | k2.KF_update; 68 | volume = k2.KF_getVolumeImage; 69 | imshow(volume,'Parent', v.ax) 70 | 71 | end 72 | pause(0.01) 73 | end 74 | 75 | k2.delete; % close Kinect connection -------------------------------------------------------------------------------- /Kin2/Mex/Kin2.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////// 2 | /// Kin2.h 3 | /// 4 | /// Description: 5 | /// Kin2 class encapsulates the funtionality of Kinect2 Sensor. 6 | /// It uses Kinect2 SDK from Microsoft. 7 | /// Copyright (c) Microsoft Corporation. All rights reserved. 8 | /// 9 | /// Define methods to: 10 | /// * Initialize, and get images from the depth, color, and infrared cameras. 11 | /// * Coordinate Mapping between cameras. 12 | /// * Body tracking 13 | /// * Face and HD face processing 14 | /// * 3D reconstruction 15 | /// 16 | /// Authors: 17 | /// Juan R. Terven 18 | /// Diana M. Cordova 19 | /// 20 | /// Citation: 21 | /// Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of Computer Programming. 22 | /// https://github.com/jrterven/Kin2, 2016. 23 | /// 24 | /// Creation Date: Oct/07/2015 25 | /// Modifications: 26 | /// Oct/08/2015: Reserve heap space only if required (using flags) 27 | /// Oct/09/2015: Add coordinate mapping between depth and color 28 | /// Oct/24/2015: Add skeleton tracking 29 | /// Oct/28/2015: Add coordinate mapping between depth, color and camera 30 | /// Oct/29/2015: Add coordinate mapping between camera and depth and color 31 | /// Jan/05/2016: Add getPointCloud function 32 | /// Jan/23/2016: Kinect Fusion 33 | /// Jan/25/2016: Face processing 34 | /// Jan/25/2016: Body index 35 | /// Jan/31/2016: HD Face processing 36 | /// Mar/01/2016: Depth camera intrinsics 37 | /// Mar/03/2016: Add Joints orientations 38 | /// Mar/09/2016: Add pointclouds with color and pointCloud MATLAB object 39 | /// Mar/15/2016: Add radial distortion to the color camera calibration 40 | /// Mar/18/2016: Fix HD face shape units 41 | /// Mar/25/2016: Update documentation 42 | /// Mar/31/2016: Add floor clip plane to the body data 43 | /////////////////////////////////////////////////////////////////////////// 44 | #include 45 | #include 46 | #include 47 | #include 48 | 49 | #define SAFE_FUSION_RELEASE_IMAGE_FRAME(p) { if (p) { static_cast(NuiFusionReleaseImageFrame(p)); (p)=NULL; } } 50 | #define SAFE_DELETE_ARRAY(p) { if (p) { delete[] (p); (p)=NULL; } } 51 | 52 | namespace k2 53 | { 54 | // Sources of Kinect data. These are selected when creating the Kin2 object 55 | enum{ 56 | COLOR = FrameSourceTypes::FrameSourceTypes_Color, 57 | DEPTH = FrameSourceTypes::FrameSourceTypes_Depth, 58 | INFRARED = FrameSourceTypes::FrameSourceTypes_Infrared, 59 | BODY_INDEX = FrameSourceTypes::FrameSourceTypes_BodyIndex, 60 | BODY = FrameSourceTypes::FrameSourceTypes_Body, 61 | FACE = 0x80, 62 | HD_FACE = 0x100 63 | }; 64 | typedef unsigned short int Flags; 65 | 66 | // FaceData structure returned by the getFaces function 67 | typedef struct _FaceData 68 | { 69 | RectI faceBox; 70 | PointF facePoints[FacePointType::FacePointType_Count]; 71 | Vector4 faceRotation; 72 | DetectionResult faceProperties[FaceProperty::FaceProperty_Count]; 73 | }FaceData; 74 | 75 | // HDFaceData structure returned by the getHDFaces function 76 | typedef struct _HDFaceData 77 | { 78 | RectI faceBox; 79 | Vector4 faceRotation; 80 | CameraSpacePoint headPivot; 81 | float animationUnits[FaceShapeAnimations_Count]; 82 | std::vector faceModel; 83 | 84 | }HDFaceData; 85 | } 86 | 87 | /*************************************************************************/ 88 | /************************** Kin2 Class ***********************************/ 89 | /*************************************************************************/ 90 | class Kin2 91 | { 92 | static const int cDepthWidth = 512; // depth image width 93 | static const int cDepthHeight = 424; // depth image height 94 | static const int cInfraredWidth = 512; // infrared image width 95 | static const int cInfraredHeight = 424; // infrared image height 96 | static const int cColorWidth = 1920; // color image width 97 | static const int cColorHeight = 1080; // color image height 98 | static const int cNumColorPix = cColorWidth*cColorHeight; // number of color pixels 99 | 100 | public: 101 | Kin2(UINT16 sources); // Constructor 102 | ~Kin2(); // Destructor 103 | 104 | // Initialize Kinect2 105 | void init(); 106 | 107 | /************ Data Sources *************/ 108 | void updateData(INT8 valid[]); 109 | void getDepth(UINT16 depth[],INT64& time, bool& validDepth); 110 | void getColor(unsigned char rgbImage[], INT64& time, bool& validColor); 111 | void getInfrared(UINT16 infrared[], INT64& time, bool& validInfrared); 112 | void getBodyIndex(BYTE bodyIndex[],bool& validBodyIndex); 113 | void getPointCloud(double pointCloud[], unsigned char colors[], bool color, bool& validData); 114 | void getDepthIntrinsics(CameraIntrinsics &intrinsics); 115 | 116 | /************ Mappings **************/ 117 | void mapDepthPoints2Color(double depthCoords[], int size, UINT16 colorCoords[]); 118 | void mapDepthPoints2Camera(double depthCoords[], int size, double cameraCoords[]); 119 | //bool mapDepthFrame2Color(ColorSpacePoint* depth2ColorMapping); 120 | 121 | void mapColorPoints2Depth(double colorCoords[], int size, UINT16 depthCoords[]); 122 | void mapColorPoints2Camera(double colorCoords[], int size, double cameraCoords[]); 123 | 124 | void mapCameraPoints2Depth(double cameraCoords[], int size, UINT16 depthCoords[]); 125 | void mapCameraPoints2Color(double cameraCoords[], int size, UINT16 colorCoords[]); 126 | 127 | void alignColor2Depth(unsigned char alignedImage[], bool& validData); 128 | 129 | /************ Body Tracking *****************/ 130 | void getBodies(std::vector >& bodiesJoints, 131 | std::vector >& bodiesJointsOrientations, 132 | std::vector& lhs, std::vector& rhs, Vector4 &fcp, INT64& time); 133 | 134 | /************ Face Processing public functions *****************/ 135 | void getFaces(std::vector& facesData); 136 | void getHDFaces(bool withVertices, std::vector& facesData); 137 | void buildHDFaceModels(int &collectionStatus, int &captureStatus); 138 | 139 | /*************** Kinect Fusion public functions ***************/ 140 | void KF_init(int voxelsPerMeter = 64, int voxelsX = 256, int voxelsY = 256, int voxelsZ = 256, bool processorType = true, bool autoReset = true); 141 | void KF_update(); 142 | void KF_getVolumeImage(BYTE volumeImg[]); 143 | void KF_reset(); 144 | HRESULT KF_getMesh(INuiFusionMesh **ppMesh); 145 | 146 | /*************** Other methods ***************/ 147 | void extractRotationInDegrees(Vector4& pQuaternion, double& dPitch, double& dYaw, double& dRoll); 148 | private: 149 | // Current Kinect 150 | IKinectSensor* m_pKinectSensor; // The Kinect sensor 151 | ICoordinateMapper* m_pCoordinateMapper; // The coordinate mapper 152 | 153 | // Frame reader 154 | IMultiSourceFrameReader* m_pMultiSourceFrameReader; // Kinect data reader 155 | 156 | // Heap storage for images 157 | UINT16* m_pDepthArray16U; // 16-bit depth image 158 | UINT16* m_pInfraredArray16U; // 16-bit infrared image 159 | BYTE* m_pColor; 160 | BYTE* m_pBodyIndex; 161 | 162 | // Heap storage for bodies 163 | IBody* m_ppBodies[BODY_COUNT]; 164 | bool m_bHaveBodyData; 165 | 166 | // Bodies floor clip plane 167 | Vector4 m_floorClipPlane; 168 | 169 | // Timestamps 170 | INT64 m_depthTimeStamp; 171 | INT64 m_colorTimeStamp; 172 | INT64 m_infraredTimeStamp; 173 | INT64 m_bodiesTimeStamp; 174 | 175 | // Face sources 176 | IFaceFrameSource* m_pFaceFrameSources[BODY_COUNT]; 177 | 178 | // Face readers 179 | IFaceFrameReader* m_pFaceFrameReaders[BODY_COUNT]; 180 | 181 | // HD Face sources 182 | IHighDefinitionFaceFrameSource* m_pHDFaceFrameSources[BODY_COUNT]; 183 | 184 | // HDFace readers 185 | IHighDefinitionFaceFrameReader* m_pHDFaceFrameReaders[BODY_COUNT]; 186 | 187 | // HD Face models 188 | IFaceModelBuilder* m_pFaceModelBuilder[BODY_COUNT]; 189 | bool m_faceModelReady[BODY_COUNT]; 190 | bool m_faceModelWarning[BODY_COUNT]; // keep track of the face model warning message 191 | IFaceAlignment* m_pFaceAlignment[BODY_COUNT]; 192 | IFaceModel* m_pFaceModel[BODY_COUNT]; 193 | 194 | // Flags of available data 195 | bool m_newDepthData; 196 | bool m_newColorData; 197 | bool m_newInfraredData; 198 | bool m_newBodyIndex; 199 | bool m_newPointCloudData; 200 | 201 | // Initialization flags 202 | k2::Flags m_flags; 203 | 204 | /************ Face Processing private functions *****************/ 205 | 206 | // define the face frame features required to be computed by this application 207 | static const DWORD c_FaceFrameFeatures = 208 | FaceFrameFeatures::FaceFrameFeatures_BoundingBoxInColorSpace 209 | | FaceFrameFeatures::FaceFrameFeatures_PointsInColorSpace 210 | | FaceFrameFeatures::FaceFrameFeatures_RotationOrientation 211 | | FaceFrameFeatures::FaceFrameFeatures_Happy 212 | | FaceFrameFeatures::FaceFrameFeatures_RightEyeClosed 213 | | FaceFrameFeatures::FaceFrameFeatures_LeftEyeClosed 214 | | FaceFrameFeatures::FaceFrameFeatures_MouthOpen 215 | | FaceFrameFeatures::FaceFrameFeatures_MouthMoved 216 | | FaceFrameFeatures::FaceFrameFeatures_LookingAway 217 | | FaceFrameFeatures::FaceFrameFeatures_Glasses 218 | | FaceFrameFeatures::FaceFrameFeatures_FaceEngagement; 219 | 220 | /************************************************************/ 221 | /****** Kinect Fusion variables and functions ******/ 222 | /************************************************************/ 223 | static const int cResetOnTimeStampSkippedMilliseconds = 1000; // ms 224 | static const int cResetOnNumberOfLostFrames = 100; 225 | 226 | /// Setup or update the Undistortion calculation for the connected camera 227 | HRESULT KF_SetupUndistortion(); 228 | HRESULT OnCoordinateMappingChanged(); 229 | 230 | /// Initialize Kinect Fusion volume and images for processing 231 | HRESULT KF_InitializeKinectFusion(); 232 | 233 | /// Handle new depth data 234 | void KF_ProcessDepth(); 235 | UINT m_cDepthImagePixels; 236 | 237 | /// The Kinect Fusion Reconstruction Volume 238 | INuiFusionReconstruction* m_pVolume; 239 | 240 | /// The Kinect Fusion Volume Parameters 241 | NUI_FUSION_RECONSTRUCTION_PARAMETERS m_reconstructionParams; 242 | 243 | // The Kinect Fusion Camera Transform 244 | Matrix4 m_worldToCameraTransform; 245 | 246 | // The default Kinect Fusion World to Volume Transform 247 | Matrix4 m_defaultWorldToVolumeTransform; 248 | 249 | /// Frames from the depth input 250 | UINT16* m_pDepthImagePixelBuffer; 251 | NUI_FUSION_IMAGE_FRAME* m_pDepthFloatImage; 252 | 253 | /// For depth distortion correction 254 | DepthSpacePoint* m_pDepthDistortionMap; 255 | UINT* m_pDepthDistortionLT; 256 | WAITABLE_HANDLE m_coordinateMappingChangedEvent; 257 | 258 | /// Kinect camera parameters. 259 | NUI_FUSION_CAMERA_PARAMETERS m_cameraParameters; 260 | bool m_bHaveValidCameraParameters; 261 | 262 | /// Frames generated from ray-casting the Reconstruction Volume 263 | NUI_FUSION_IMAGE_FRAME* m_pPointCloud; 264 | 265 | /// Images for display 266 | NUI_FUSION_IMAGE_FRAME* m_pShadedSurface; 267 | 268 | /// Camera Tracking parameters 269 | int m_cLostFrameCounter; 270 | bool m_bTrackingFailed; 271 | 272 | bool m_bResetReconstruction; 273 | /// Parameter to turn automatic reset of the reconstruction when camera tracking is lost on or off. 274 | /// Set to true in the KF_init to enable auto reset on cResetOnNumberOfLostFrames lost frames, 275 | /// or set false to never automatically reset. 276 | bool m_bAutoResetReconstructionWhenLost; 277 | 278 | /// Parameter to enable automatic reset of the reconstruction when there is a large 279 | /// difference in timestamp between subsequent frames. This should usually be set true as 280 | /// default to enable recorded .xef files to generate a reconstruction reset on looping of 281 | /// the playback or scrubbing, however, for debug purposes, it can be set false to prevent 282 | /// automatic reset on timeouts. 283 | bool m_bAutoResetReconstructionOnTimeout; 284 | 285 | /// Processing parameters 286 | int m_deviceIndex; 287 | NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE m_processorType; 288 | bool m_bInitializeError; 289 | float m_fMinDepthThreshold; 290 | float m_fMaxDepthThreshold; 291 | bool m_bMirrorDepthFrame; 292 | unsigned short m_cMaxIntegrationWeight; 293 | int m_cFrameCounter; 294 | 295 | /// Parameter to translate the reconstruction based on the minimum depth setting. When set to 296 | /// false, the reconstruction volume +Z axis starts at the camera lens and extends into the scene. 297 | /// Setting this true in the constructor will move the volume forward along +Z away from the 298 | /// camera by the minimum depth threshold to enable capture of very small reconstruction volumes 299 | /// by setting a non-identity camera transformation in the ResetReconstruction call. 300 | /// Small volumes should be shifted, as the Kinect hardware has a minimum sensing limit of ~0.35m, 301 | /// inside which no valid depth is returned, hence it is difficult to initialize and track robustly 302 | /// when the majority of a small volume is inside this distance. 303 | bool m_bTranslateResetPoseByMinDepthThreshold; 304 | }; // Kin2 class definition -------------------------------------------------------------------------------- /Kin2/Mex/Kin2.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jrterven/MultiKinCalib/6b797c9c5926ff4e45fde11d015bb7e95a6bf570/Kin2/Mex/Kin2.m -------------------------------------------------------------------------------- /Kin2/Mex/Kin2_face.cpp: -------------------------------------------------------------------------------- 1 | #include "Kin2.h" 2 | #include "mex.h" 3 | #include "class_handle.hpp" 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | /***********************************************************************/ 10 | /******************** Face Processing functions *************************/ 11 | /***********************************************************************/ 12 | void Kin2::getFaces(std::vector& facesData) 13 | { 14 | if (!(m_flags & k2::FACE)) 15 | { 16 | mexPrintf("ERROR: NO FACE FUNCTIONALITY SELECTED!\n"); 17 | return; 18 | } 19 | 20 | HRESULT hr; 21 | facesData.clear(); 22 | 23 | // iterate through each face reader 24 | for (int iFace = 0; iFace < BODY_COUNT; ++iFace) 25 | { 26 | // retrieve the latest face frame from this reader 27 | IFaceFrame* pFaceFrame = nullptr; 28 | hr = m_pFaceFrameReaders[iFace]->AcquireLatestFrame(&pFaceFrame); 29 | 30 | BOOLEAN bFaceTracked = false; 31 | if (SUCCEEDED(hr) && nullptr != pFaceFrame) 32 | { 33 | // check if a valid face is tracked in this face frame 34 | hr = pFaceFrame->get_IsTrackingIdValid(&bFaceTracked); 35 | } 36 | 37 | if (SUCCEEDED(hr)) 38 | { 39 | // If face tracked, save its data on the facesData structure array 40 | if (bFaceTracked) 41 | { 42 | IFaceFrameResult* pFaceFrameResult = nullptr; 43 | hr = pFaceFrame->get_FaceFrameResult(&pFaceFrameResult); 44 | 45 | k2::FaceData faceData; 46 | 47 | // need to verify if pFaceFrameResult contains data before trying to access it 48 | if (SUCCEEDED(hr) && pFaceFrameResult != nullptr) 49 | { 50 | hr = pFaceFrameResult->get_FaceBoundingBoxInColorSpace(&faceData.faceBox); 51 | 52 | if (SUCCEEDED(hr)) 53 | { 54 | hr = pFaceFrameResult->GetFacePointsInColorSpace(FacePointType::FacePointType_Count, faceData.facePoints); 55 | } 56 | 57 | if (SUCCEEDED(hr)) 58 | { 59 | hr = pFaceFrameResult->get_FaceRotationQuaternion(&faceData.faceRotation); 60 | } 61 | 62 | if (SUCCEEDED(hr)) 63 | { 64 | hr = pFaceFrameResult->GetFaceProperties(FaceProperty::FaceProperty_Count, faceData.faceProperties); 65 | } 66 | 67 | facesData.push_back(faceData); 68 | } 69 | 70 | SafeRelease(pFaceFrameResult); 71 | } 72 | else 73 | { 74 | // face tracking is not valid - attempt to fix the issue 75 | // a valid body is required to perform this step 76 | if (m_bHaveBodyData) 77 | { 78 | // check if the corresponding body is tracked 79 | // if this is true then update the face frame source to track this body 80 | IBody* pBody = m_ppBodies[iFace]; 81 | if (pBody != nullptr) 82 | { 83 | BOOLEAN bTracked = false; 84 | hr = pBody->get_IsTracked(&bTracked); 85 | 86 | UINT64 bodyTId; 87 | if (SUCCEEDED(hr) && bTracked) 88 | { 89 | // get the tracking ID of this body 90 | hr = pBody->get_TrackingId(&bodyTId); 91 | if (SUCCEEDED(hr)) 92 | { 93 | // update the face frame source with the tracking ID 94 | m_pFaceFrameSources[iFace]->put_TrackingId(bodyTId); 95 | } 96 | } 97 | } 98 | } 99 | } 100 | } 101 | 102 | SafeRelease(pFaceFrame); 103 | } 104 | } 105 | 106 | 107 | void Kin2::getHDFaces(bool withVertices, std::vector& facesData) 108 | { 109 | if (!(m_flags & k2::HD_FACE)) 110 | { 111 | mexPrintf("ERROR: NO HD-FACE FUNCTIONALITY SELECTED!\n"); 112 | return; 113 | } 114 | 115 | HRESULT hr; 116 | facesData.clear(); 117 | 118 | // iterate through each HD face reader 119 | for (int iFace = 0; iFace < BODY_COUNT; ++iFace) 120 | { 121 | // retrieve the latest face frame from this reader 122 | IHighDefinitionFaceFrame *pHDFaceFrame = nullptr; 123 | 124 | hr = m_pHDFaceFrameReaders[iFace]->AcquireLatestFrame(&pHDFaceFrame); 125 | 126 | BOOLEAN bFaceTracked = false; 127 | if (SUCCEEDED(hr) && nullptr != pHDFaceFrame) 128 | { 129 | // check if a valid face is tracked in this face frame 130 | hr = pHDFaceFrame->get_IsTrackingIdValid(&bFaceTracked); 131 | } 132 | 133 | // If face tracked, save its data on the facesData structure array 134 | if (bFaceTracked) 135 | { 136 | float animationUnits[FaceShapeAnimations_Count]={0}; 137 | UINT32 vertexCount; 138 | 139 | // Here we save the HD face data 140 | k2::HDFaceData faceData; 141 | 142 | hr = pHDFaceFrame->GetAndRefreshFaceAlignmentResult(m_pFaceAlignment[iFace]); 143 | 144 | if (SUCCEEDED(hr) && m_pFaceAlignment[iFace] != nullptr) 145 | { 146 | // Get the Animation units 147 | hr = m_pFaceAlignment[iFace]->GetAnimationUnits(FaceShapeAnimations_Count, animationUnits); 148 | 149 | if (SUCCEEDED(hr)) 150 | { 151 | for (int vi = 0; vi < FaceShapeAnimations_Count; vi++) 152 | faceData.animationUnits[vi] = animationUnits[vi]; 153 | } 154 | 155 | // If HD face model vertices are requested 156 | if (withVertices) 157 | { 158 | hr = GetFaceModelVertexCount(&vertexCount); 159 | //mexPrintf("Number of Vertices: %d", vertexCount); 160 | 161 | // If there is no model ready, issue a warning message (just once) 162 | if (!m_faceModelReady[iFace] && !m_faceModelWarning[iFace]) 163 | { 164 | mexPrintf("WARNING: No personal model has been created. An average face model will be used\n"); 165 | m_faceModelWarning[iFace] = true; 166 | } 167 | 168 | CameraSpacePoint *vertices = new CameraSpacePoint[vertexCount]; 169 | 170 | // Get the vertices (HD points) 171 | if (SUCCEEDED(hr)) 172 | hr = m_pFaceModel[iFace]->CalculateVerticesForAlignment(m_pFaceAlignment[iFace], vertexCount, vertices); 173 | 174 | if (SUCCEEDED(hr)) 175 | { 176 | faceData.faceModel.resize(vertexCount); 177 | 178 | for (int vi = 0; vi < vertexCount; vi++) 179 | faceData.faceModel[vi] = vertices[vi]; 180 | } 181 | 182 | if (vertices) 183 | { 184 | delete[] vertices; 185 | vertices = NULL; 186 | } 187 | } // if withVertices 188 | 189 | // Get the facebox 190 | if (SUCCEEDED(hr)) 191 | hr = m_pFaceAlignment[iFace]->get_FaceBoundingBox(&faceData.faceBox); 192 | 193 | // Get the face rotation 194 | if (SUCCEEDED(hr)) 195 | hr = m_pFaceAlignment[iFace]->get_FaceOrientation(&faceData.faceRotation); 196 | 197 | // Get the head pivot 198 | if (SUCCEEDED(hr)) 199 | { 200 | hr = m_pFaceAlignment[iFace]->get_HeadPivotPoint(&faceData.headPivot); 201 | } 202 | 203 | // Save the HD face data in the member variable m_HDfacesData 204 | facesData.push_back(faceData); 205 | } // if face alignment 206 | } // If face tracked 207 | else 208 | { 209 | // face tracking is not valid - attempt to fix the issue 210 | // a valid body is required to perform this step 211 | if (m_bHaveBodyData) 212 | { 213 | // check if the corresponding body is tracked 214 | // if this is true then update the face frame source to track this body 215 | IBody* pBody = m_ppBodies[iFace]; 216 | if (pBody != nullptr) 217 | { 218 | BOOLEAN bTracked = false; 219 | hr = pBody->get_IsTracked(&bTracked); 220 | 221 | UINT64 bodyTId; 222 | if (SUCCEEDED(hr) && bTracked) 223 | { 224 | // get the tracking ID of this body 225 | hr = pBody->get_TrackingId(&bodyTId); 226 | if (SUCCEEDED(hr)) 227 | { 228 | // update the face frame source with the tracking ID 229 | m_pHDFaceFrameSources[iFace]->put_TrackingId(bodyTId); 230 | } 231 | } 232 | } // if (pBody != nullptr) 233 | } // if (m_bHaveBodyData) 234 | } // if face tracked 235 | 236 | SafeRelease(pHDFaceFrame); 237 | } // for each face reader 238 | } // end getHDFaces function 239 | 240 | void Kin2::buildHDFaceModels(int &collectionStatus, int &captureStatus) 241 | { 242 | collectionStatus = -1; 243 | captureStatus = -1; 244 | 245 | if (!(m_flags & k2::HD_FACE)) 246 | { 247 | mexPrintf("ERROR: NO HD-FACE FUNCTIONALITY SELECTED!\n"); 248 | return; 249 | } 250 | 251 | HRESULT hr; 252 | 253 | // iterate through each HD face reader 254 | for (int iFace = 0; iFace < BODY_COUNT; ++iFace) 255 | { 256 | // retrieve the latest face frame from this reader 257 | IHighDefinitionFaceFrame *pHDFaceFrame = nullptr; 258 | 259 | hr = m_pHDFaceFrameReaders[iFace]->AcquireLatestFrame(&pHDFaceFrame); 260 | 261 | BOOLEAN bFaceTracked = false; 262 | if (SUCCEEDED(hr) && nullptr != pHDFaceFrame) 263 | { 264 | // check if a valid face is tracked in this face frame 265 | hr = pHDFaceFrame->get_IsTrackingIdValid(&bFaceTracked); 266 | } 267 | 268 | // If face tracked, try to align it 269 | if (SUCCEEDED(hr) && bFaceTracked) 270 | { 271 | IFaceModel *pFaceModel = nullptr; 272 | 273 | hr = pHDFaceFrame->GetAndRefreshFaceAlignmentResult(m_pFaceAlignment[iFace]); 274 | 275 | // If face aligned, continue building the model 276 | if (SUCCEEDED(hr) && m_pFaceAlignment[iFace] != nullptr) 277 | { 278 | // If face model not ready 279 | if (!m_faceModelReady[iFace]) 280 | { 281 | FaceModelBuilderCollectionStatus collection; 282 | hr = m_pFaceModelBuilder[iFace]->get_CollectionStatus(&collection); 283 | collectionStatus = (int)collection; 284 | 285 | // If model completed 286 | if (collection == FaceModelBuilderCollectionStatus::FaceModelBuilderCollectionStatus_Complete) 287 | { 288 | mexPrintf("Face Model Completed!\n"); 289 | 290 | IFaceModelData* pFaceModelData = nullptr; 291 | hr = m_pFaceModelBuilder[iFace]->GetFaceData(&pFaceModelData); 292 | 293 | // Produce the model 294 | if (SUCCEEDED(hr) && pFaceModelData != nullptr) 295 | { 296 | mexPrintf("Producing model...\n"); 297 | hr = pFaceModelData->ProduceFaceModel(&m_pFaceModel[iFace]); 298 | mexPrintf("Model Ready!\n"); 299 | 300 | // Set the model ready flag 301 | if (SUCCEEDED(hr) && m_pFaceModel[iFace] != nullptr) 302 | { 303 | m_faceModelReady[iFace] = true; 304 | } 305 | } 306 | SafeRelease(pFaceModelData); 307 | 308 | // Get the shape units (SU) i.e. the deformations wrt the base face model 309 | /* 310 | if (SUCCEEDED(hr)) 311 | { 312 | float deformations[FaceShapeDeformations_Count]; 313 | hr = m_pFaceModel[iFace]->GetFaceShapeDeformations(FaceShapeDeformations_Count, deformations); 314 | } 315 | */ 316 | } 317 | // if model not completed yet 318 | else 319 | { 320 | // Display Collection Status 321 | /* 322 | if (collection >= FaceModelBuilderCollectionStatus::FaceModelBuilderCollectionStatus_TiltedUpViewsNeeded) 323 | { 324 | mexPrintf("Need : Tilted Up Views\n"); 325 | } 326 | 327 | 328 | else if (collection >= FaceModelBuilderCollectionStatus::FaceModelBuilderCollectionStatus_RightViewsNeeded) 329 | { 330 | mexPrintf("Need : Right Views\n"); 331 | } 332 | 333 | else if (collection >= FaceModelBuilderCollectionStatus::FaceModelBuilderCollectionStatus_LeftViewsNeeded) 334 | { 335 | mexPrintf("Need : Left Views\n"); 336 | } 337 | 338 | else if (collection >= FaceModelBuilderCollectionStatus::FaceModelBuilderCollectionStatus_FrontViewFramesNeeded) 339 | { 340 | mexPrintf("Need : Front ViewFrames\n"); 341 | } 342 | */ 343 | 344 | // Display Capture Status 345 | FaceModelBuilderCaptureStatus capture; 346 | hr = m_pFaceModelBuilder[iFace]->get_CaptureStatus(&capture); 347 | 348 | captureStatus = (int)capture; 349 | 350 | /* 351 | switch (capture) 352 | { 353 | case FaceModelBuilderCaptureStatus::FaceModelBuilderCaptureStatus_OtherViewsNeeded: 354 | std::cout << "Other views needed" << std::endl; 355 | break; 356 | case FaceModelBuilderCaptureStatus::FaceModelBuilderCaptureStatus_FaceTooFar: 357 | std::cout << "Face Too Far from Camera" << std::endl; 358 | break; 359 | case FaceModelBuilderCaptureStatus::FaceModelBuilderCaptureStatus_FaceTooNear: 360 | std::cout << "Face Too Near to Camera" << std::endl; 361 | break; 362 | case FaceModelBuilderCaptureStatus_MovingTooFast: 363 | std::cout << "Moving Too Fast" << std::endl; 364 | break; 365 | case FaceModelBuilderCaptureStatus::FaceModelBuilderCaptureStatus_LostFaceTrack: 366 | std::cout << "Lost Face Track" << std::endl; 367 | break; 368 | case FaceModelBuilderCaptureStatus::FaceModelBuilderCaptureStatus_SystemError: 369 | std::cout << "ERROR: System Error" << std::endl; 370 | break; 371 | 372 | default: 373 | break; 374 | } 375 | */ 376 | } // collection not complete 377 | } // If face model not ready 378 | } // If face aligned 379 | } // If face tracked 380 | else 381 | { 382 | // face tracking is not valid - attempt to fix the issue 383 | // a valid body is required to perform this step 384 | if (m_bHaveBodyData) 385 | { 386 | // check if the corresponding body is tracked 387 | // if this is true then update the face frame source to track this body 388 | IBody* pBody = m_ppBodies[iFace]; 389 | if (pBody != nullptr) 390 | { 391 | BOOLEAN bTracked = false; 392 | hr = pBody->get_IsTracked(&bTracked); 393 | 394 | UINT64 bodyTId; 395 | if (SUCCEEDED(hr) && bTracked) 396 | { 397 | // get the tracking ID of this body 398 | hr = pBody->get_TrackingId(&bodyTId); 399 | if (SUCCEEDED(hr)) 400 | { 401 | // update the face frame source with the tracking ID 402 | m_pHDFaceFrameSources[iFace]->put_TrackingId(bodyTId); 403 | } 404 | } 405 | } // if (pBody != nullptr) 406 | } // if (m_bHaveBodyData) 407 | } // if face tracked 408 | }// for each face reader 409 | 410 | } // end buildHDFaceModels 411 | -------------------------------------------------------------------------------- /Kin2/Mex/Kin2_fusion.cpp: -------------------------------------------------------------------------------- 1 | #include "Kin2.h" 2 | #include "mex.h" 3 | #include "class_handle.hpp" 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | /***********************************************************************/ 10 | /******************** Kinect Fusion functions *************************/ 11 | /***********************************************************************/ 12 | void SetIdentityMatrix(Matrix4 &mat) 13 | { 14 | mat.M11 = 1; mat.M12 = 0; mat.M13 = 0; mat.M14 = 0; 15 | mat.M21 = 0; mat.M22 = 1; mat.M23 = 0; mat.M24 = 0; 16 | mat.M31 = 0; mat.M32 = 0; mat.M33 = 1; mat.M34 = 0; 17 | mat.M41 = 0; mat.M42 = 0; mat.M43 = 0; mat.M44 = 1; 18 | } 19 | 20 | void Kin2::KF_init(int voxelsPerMeter, int voxelsX, int voxelsY, int voxelsZ, bool processorType, bool autoReset) 21 | { 22 | // Check if depth source was activated 23 | if (!(m_flags & k2::DEPTH)) 24 | { 25 | mexPrintf("ERROR Initializing Kinec Fusion. No depth source activated.\n"); 26 | mexPrintf("Select depth when creating Kin2 object.\n"); 27 | return; 28 | } 29 | 30 | /* 31 | mexPrintf("voxelsPerMeter: %d\n",voxelsPerMeter); 32 | mexPrintf("voxelsX: %d\n",voxelsX); 33 | mexPrintf("voxelsY: %d\n",voxelsY); 34 | mexPrintf("voxelsZ: %d\n",voxelsZ); 35 | mexPrintf("processorType: %d\n",processorType); 36 | */ 37 | 38 | m_bInitializeError = false; 39 | m_pVolume = nullptr; 40 | m_bMirrorDepthFrame = false; 41 | m_bTranslateResetPoseByMinDepthThreshold = true; 42 | m_bAutoResetReconstructionWhenLost = autoReset; 43 | m_bResetReconstruction = false; 44 | m_cLostFrameCounter = 0; 45 | m_bTrackingFailed = false; 46 | m_cFrameCounter = 0; 47 | m_pDepthImagePixelBuffer = nullptr; 48 | m_pDepthDistortionMap = nullptr; 49 | m_pDepthDistortionLT = nullptr; 50 | m_pDepthFloatImage = nullptr; 51 | m_pPointCloud = nullptr; 52 | m_pShadedSurface = nullptr; 53 | m_bInitializeError = false; 54 | m_bHaveValidCameraParameters = false; 55 | m_cDepthImagePixels = cDepthWidth * cDepthHeight; 56 | 57 | // Define a cubic Kinect Fusion reconstruction volume, 58 | // with the Kinect at the center of the front face and the volume directly in front of Kinect. 59 | m_reconstructionParams.voxelsPerMeter = voxelsPerMeter;// 1000mm / 256vpm = ~3.9mm/voxel 60 | m_reconstructionParams.voxelCountX = voxelsX; // 384 / 256vpm = 1.5m wide reconstruction 61 | m_reconstructionParams.voxelCountY = voxelsY; // Memory = 384*384*384 * 4bytes per voxel 62 | m_reconstructionParams.voxelCountZ = voxelsZ; // This will require a GPU with at least 256MB 63 | 64 | // These parameters are for optionally clipping the input depth image 65 | m_fMinDepthThreshold = NUI_FUSION_DEFAULT_MINIMUM_DEPTH; // min depth in meters 66 | m_fMaxDepthThreshold = NUI_FUSION_DEFAULT_MAXIMUM_DEPTH; // max depth in meters 67 | 68 | // This parameter is the temporal averaging parameter for depth integration into the reconstruction 69 | m_cMaxIntegrationWeight = NUI_FUSION_DEFAULT_INTEGRATION_WEIGHT; // Reasonable for static scenes 70 | 71 | // This parameter sets whether GPU or CPU processing is used. Note that the CPU will likely be 72 | // too slow for real-time processing. 73 | if (processorType) 74 | m_processorType = NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_AMP; 75 | else 76 | m_processorType = NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_CPU; 77 | 78 | // If GPU processing is selected, we can choose the index of the device we would like to 79 | // use for processing by setting this zero-based index parameter. Note that setting -1 will cause 80 | // automatic selection of the most suitable device (specifically the DirectX11 compatible device 81 | // with largest memory), which is useful in systems with multiple GPUs when only one reconstruction 82 | // volume is required. Note that the automatic choice will not load balance across multiple 83 | // GPUs, hence users should manually select GPU indices when multiple reconstruction volumes 84 | // are required, each on a separate device. 85 | m_deviceIndex = -1; // automatically choose device index for processing 86 | 87 | SetIdentityMatrix(m_worldToCameraTransform); 88 | SetIdentityMatrix(m_defaultWorldToVolumeTransform); 89 | 90 | // We don't know these at object creation time, so we use nominal values. 91 | // These will later be updated in using the coordinate mapping. 92 | m_cameraParameters.focalLengthX = NUI_KINECT_DEPTH_NORM_FOCAL_LENGTH_X; 93 | m_cameraParameters.focalLengthY = NUI_KINECT_DEPTH_NORM_FOCAL_LENGTH_Y; 94 | m_cameraParameters.principalPointX = NUI_KINECT_DEPTH_NORM_PRINCIPAL_POINT_X; 95 | m_cameraParameters.principalPointY = NUI_KINECT_DEPTH_NORM_PRINCIPAL_POINT_Y; 96 | 97 | // Look for a connected Kinect, and create it if found 98 | HRESULT hr; 99 | hr = m_pCoordinateMapper->SubscribeCoordinateMappingChanged(&m_coordinateMappingChangedEvent); 100 | 101 | if (SUCCEEDED(hr)) 102 | { 103 | hr = KF_InitializeKinectFusion(); 104 | 105 | if (FAILED(hr)) 106 | { 107 | m_bInitializeError = true; 108 | } 109 | } 110 | else 111 | m_bInitializeError = true; 112 | }// end KF_init 113 | 114 | void Kin2::KF_update() 115 | { 116 | if (nullptr == m_pKinectSensor) 117 | { 118 | mexPrintf("KF: No Kinect sensor detected\n"); 119 | return; 120 | } 121 | 122 | if (m_coordinateMappingChangedEvent != NULL && 123 | WAIT_OBJECT_0 == WaitForSingleObject((HANDLE)m_coordinateMappingChangedEvent, 0)) 124 | { 125 | OnCoordinateMappingChanged(); 126 | ResetEvent((HANDLE)m_coordinateMappingChangedEvent); 127 | } 128 | 129 | if (!m_bHaveValidCameraParameters) 130 | { 131 | mexPrintf("KF: No valid camera parameters\n"); 132 | return; 133 | } 134 | 135 | m_bResetReconstruction = false; 136 | 137 | if (!m_pMultiSourceFrameReader) 138 | { 139 | mexPrintf("KF: No depth frame reader\n"); 140 | return; 141 | } 142 | 143 | if (m_pDepthArray16U) 144 | { 145 | //copy and remap depth 146 | 147 | const UINT bufferLength = m_cDepthImagePixels; 148 | UINT16 * pDepth = m_pDepthImagePixelBuffer; 149 | for (UINT i = 0; i < bufferLength; i++, pDepth++) 150 | { 151 | const UINT id = m_pDepthDistortionLT[i]; 152 | *pDepth = id < bufferLength ? m_pDepthArray16U[id] : 0; 153 | } 154 | 155 | KF_ProcessDepth(); 156 | } 157 | } // end KF_update 158 | 159 | 160 | void UpdateIntrinsics(NUI_FUSION_IMAGE_FRAME * pImageFrame, NUI_FUSION_CAMERA_PARAMETERS * params) 161 | { 162 | if (pImageFrame != nullptr && pImageFrame->pCameraParameters != nullptr && params != nullptr) 163 | { 164 | pImageFrame->pCameraParameters->focalLengthX = params->focalLengthX; 165 | pImageFrame->pCameraParameters->focalLengthY = params->focalLengthY; 166 | pImageFrame->pCameraParameters->principalPointX = params->principalPointX; 167 | pImageFrame->pCameraParameters->principalPointY = params->principalPointY; 168 | } 169 | 170 | // Confirm we are called correctly 171 | _ASSERT(pImageFrame != nullptr && pImageFrame->pCameraParameters != nullptr && params != nullptr); 172 | } // end UpdateIntrinsics 173 | 174 | HRESULT Kin2::KF_SetupUndistortion() 175 | { 176 | HRESULT hr = E_UNEXPECTED; 177 | 178 | if (m_cameraParameters.principalPointX != 0) 179 | { 180 | 181 | CameraSpacePoint cameraFrameCorners[4] = //at 1 meter distance. Take into account that depth frame is mirrored 182 | { 183 | /*LT*/{ -m_cameraParameters.principalPointX / m_cameraParameters.focalLengthX, m_cameraParameters.principalPointY / m_cameraParameters.focalLengthY, 1.f }, 184 | /*RT*/{ (1.f - m_cameraParameters.principalPointX) / m_cameraParameters.focalLengthX, m_cameraParameters.principalPointY / m_cameraParameters.focalLengthY, 1.f }, 185 | /*LB*/{ -m_cameraParameters.principalPointX / m_cameraParameters.focalLengthX, (m_cameraParameters.principalPointY - 1.f) / m_cameraParameters.focalLengthY, 1.f }, 186 | /*RB*/{ (1.f - m_cameraParameters.principalPointX) / m_cameraParameters.focalLengthX, (m_cameraParameters.principalPointY - 1.f) / m_cameraParameters.focalLengthY, 1.f } 187 | }; 188 | 189 | for (UINT rowID = 0; rowID < cDepthHeight; rowID++) 190 | { 191 | const float rowFactor = float(rowID) / float(cDepthHeight - 1); 192 | const CameraSpacePoint rowStart = 193 | { 194 | cameraFrameCorners[0].X + (cameraFrameCorners[2].X - cameraFrameCorners[0].X) * rowFactor, 195 | cameraFrameCorners[0].Y + (cameraFrameCorners[2].Y - cameraFrameCorners[0].Y) * rowFactor, 196 | 1.f 197 | }; 198 | 199 | const CameraSpacePoint rowEnd = 200 | { 201 | cameraFrameCorners[1].X + (cameraFrameCorners[3].X - cameraFrameCorners[1].X) * rowFactor, 202 | cameraFrameCorners[1].Y + (cameraFrameCorners[3].Y - cameraFrameCorners[1].Y) * rowFactor, 203 | 1.f 204 | }; 205 | 206 | const float stepFactor = 1.f / float(cDepthWidth - 1); 207 | const CameraSpacePoint rowDelta = 208 | { 209 | (rowEnd.X - rowStart.X) * stepFactor, 210 | (rowEnd.Y - rowStart.Y) * stepFactor, 211 | 0 212 | }; 213 | 214 | _ASSERT(cDepthWidth == NUI_DEPTH_RAW_WIDTH); 215 | CameraSpacePoint cameraCoordsRow[NUI_DEPTH_RAW_WIDTH]; 216 | 217 | CameraSpacePoint currentPoint = rowStart; 218 | for (UINT i = 0; i < cDepthWidth; i++) 219 | { 220 | cameraCoordsRow[i] = currentPoint; 221 | currentPoint.X += rowDelta.X; 222 | currentPoint.Y += rowDelta.Y; 223 | } 224 | 225 | hr = m_pCoordinateMapper->MapCameraPointsToDepthSpace(cDepthWidth, cameraCoordsRow, cDepthWidth, &m_pDepthDistortionMap[rowID * cDepthWidth]); 226 | if (FAILED(hr)) 227 | { 228 | mexPrintf("KF: Failed to initialize Kinect Coordinate Mapper.\n"); 229 | return hr; 230 | } 231 | } 232 | 233 | if (nullptr == m_pDepthDistortionLT) 234 | { 235 | mexPrintf("KF:Failed to initialize Kinect Fusion depth image distortion Lookup Table.\n"); 236 | return E_OUTOFMEMORY; 237 | } 238 | 239 | UINT* pLT = m_pDepthDistortionLT; 240 | for (UINT i = 0; i < m_cDepthImagePixels; i++, pLT++) 241 | { 242 | //nearest neighbor depth lookup table 243 | UINT x = UINT(m_pDepthDistortionMap[i].X + 0.5f); 244 | UINT y = UINT(m_pDepthDistortionMap[i].Y + 0.5f); 245 | 246 | *pLT = (x < cDepthWidth && y < cDepthHeight) ? x + y * cDepthWidth : UINT_MAX; 247 | } 248 | m_bHaveValidCameraParameters = true; 249 | } 250 | else 251 | { 252 | m_bHaveValidCameraParameters = false; 253 | } 254 | return S_OK; 255 | } // end KF_SetupUndistortion 256 | 257 | 258 | HRESULT Kin2::OnCoordinateMappingChanged() 259 | { 260 | HRESULT hr = E_UNEXPECTED; 261 | 262 | // Calculate the down sampled image sizes, which are used for the AlignPointClouds calculation frames 263 | CameraIntrinsics intrinsics = {}; 264 | 265 | m_pCoordinateMapper->GetDepthCameraIntrinsics(&intrinsics); 266 | 267 | float focalLengthX = intrinsics.FocalLengthX / NUI_DEPTH_RAW_WIDTH; 268 | float focalLengthY = intrinsics.FocalLengthY / NUI_DEPTH_RAW_HEIGHT; 269 | float principalPointX = intrinsics.PrincipalPointX / NUI_DEPTH_RAW_WIDTH; 270 | float principalPointY = intrinsics.PrincipalPointY / NUI_DEPTH_RAW_HEIGHT; 271 | 272 | if (m_cameraParameters.focalLengthX == focalLengthX && m_cameraParameters.focalLengthY == focalLengthY && 273 | m_cameraParameters.principalPointX == principalPointX && m_cameraParameters.principalPointY == principalPointY) 274 | return S_OK; 275 | 276 | m_cameraParameters.focalLengthX = focalLengthX; 277 | m_cameraParameters.focalLengthY = focalLengthY; 278 | m_cameraParameters.principalPointX = principalPointX; 279 | m_cameraParameters.principalPointY = principalPointY; 280 | 281 | _ASSERT(m_cameraParameters.focalLengthX != 0); 282 | 283 | UpdateIntrinsics(m_pDepthFloatImage, &m_cameraParameters); 284 | UpdateIntrinsics(m_pPointCloud, &m_cameraParameters); 285 | UpdateIntrinsics(m_pShadedSurface, &m_cameraParameters); 286 | 287 | if (nullptr == m_pDepthDistortionMap) 288 | { 289 | mexPrintf("Failed to initialize Kinect Fusion depth image distortion buffer.\n"); 290 | return E_OUTOFMEMORY; 291 | } 292 | 293 | hr = KF_SetupUndistortion(); 294 | return hr; 295 | } // end OnCoordinateMappingChanged 296 | 297 | // Initialize Kinect Fusion volume and images for processing 298 | // S_OK on success, otherwise failure code 299 | HRESULT Kin2::KF_InitializeKinectFusion() 300 | { 301 | HRESULT hr = S_OK; 302 | 303 | // Check to ensure suitable DirectX11 compatible hardware exists before initializing Kinect Fusion 304 | WCHAR description[MAX_PATH]; 305 | WCHAR instancePath[MAX_PATH]; 306 | UINT memorySize = 0; 307 | 308 | if (FAILED(hr = NuiFusionGetDeviceInfo( 309 | m_processorType, 310 | m_deviceIndex, 311 | &description[0], 312 | ARRAYSIZE(description), 313 | &instancePath[0], 314 | ARRAYSIZE(instancePath), 315 | &memorySize))) 316 | { 317 | if (hr == E_NUI_BADINDEX) 318 | { 319 | // This error code is returned either when the device index is out of range for the processor 320 | // type or there is no DirectX11 capable device installed. As we set -1 (auto-select default) 321 | // for the device index in the parameters, this indicates that there is no DirectX11 capable 322 | // device. The options for users in this case are to either install a DirectX11 capable device 323 | // (see documentation for recommended GPUs) or to switch to non-real-time CPU based 324 | // reconstruction by changing the processor type to NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_CPU. 325 | mexPrintf("No DirectX11 device detected, or invalid device index - Kinect Fusion requires a DirectX11 device for GPU-based reconstruction.\n"); 326 | } 327 | else 328 | { 329 | mexPrintf("Failed in call to NuiFusionGetDeviceInfo.\n"); 330 | } 331 | return hr; 332 | } 333 | 334 | // Create the Kinect Fusion Reconstruction Volume 335 | hr = NuiFusionCreateReconstruction( 336 | &m_reconstructionParams, 337 | m_processorType, m_deviceIndex, 338 | &m_worldToCameraTransform, 339 | &m_pVolume); 340 | 341 | if (FAILED(hr)) 342 | { 343 | if (E_NUI_GPU_FAIL == hr) 344 | { 345 | mexPrintf("Device %d not able to run Kinect Fusion, or error initializing.\n",m_deviceIndex); 346 | } 347 | else if (E_NUI_GPU_OUTOFMEMORY == hr) 348 | { 349 | mexPrintf("Device %d out of memory error initializing reconstruction - try a smaller reconstruction volume.\n",m_deviceIndex); 350 | } 351 | else if (NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_CPU != m_processorType) 352 | { 353 | mexPrintf("Failed to initialize Kinect Fusion reconstruction volume on device %d\n",m_deviceIndex); 354 | } 355 | else 356 | { 357 | mexPrintf("Failed to initialize Kinect Fusion reconstruction volume on CPU.\n");; 358 | } 359 | 360 | return hr; 361 | } 362 | 363 | // Save the default world to volume transformation to be optionally used in ResetReconstruction 364 | hr = m_pVolume->GetCurrentWorldToVolumeTransform(&m_defaultWorldToVolumeTransform); 365 | if (FAILED(hr)) 366 | { 367 | mexPrintf("Failed in call to GetCurrentWorldToVolumeTransform.\n"); 368 | return hr; 369 | } 370 | 371 | if (m_bTranslateResetPoseByMinDepthThreshold) 372 | { 373 | // This call will set the world-volume transformation 374 | KF_reset(); 375 | } 376 | 377 | // Frames generated from the depth input 378 | hr = NuiFusionCreateImageFrame(NUI_FUSION_IMAGE_TYPE_FLOAT, cDepthWidth, cDepthHeight, &m_cameraParameters, &m_pDepthFloatImage); 379 | if (FAILED(hr)) 380 | { 381 | mexPrintf("Failed to initialize Kinect Fusion image.\n"); 382 | return hr; 383 | } 384 | 385 | // Create images to raycast the Reconstruction Volume 386 | hr = NuiFusionCreateImageFrame(NUI_FUSION_IMAGE_TYPE_POINT_CLOUD, cDepthWidth, cDepthHeight, &m_cameraParameters, &m_pPointCloud); 387 | if (FAILED(hr)) 388 | { 389 | mexPrintf("Failed to initialize Kinect Fusion image.\n"); 390 | return hr; 391 | } 392 | 393 | // Create images to raycast the Reconstruction Volume 394 | hr = NuiFusionCreateImageFrame(NUI_FUSION_IMAGE_TYPE_COLOR, cDepthWidth, cDepthHeight, &m_cameraParameters, &m_pShadedSurface); 395 | if (FAILED(hr)) 396 | { 397 | mexPrintf("Failed to initialize Kinect Fusion image.\n"); 398 | return hr; 399 | } 400 | 401 | _ASSERT(m_pDepthImagePixelBuffer == nullptr); 402 | m_pDepthImagePixelBuffer = new(std::nothrow) UINT16[m_cDepthImagePixels]; 403 | if (nullptr == m_pDepthImagePixelBuffer) 404 | { 405 | mexPrintf("Failed to initialize Kinect Fusion depth image pixel buffer.\n"); 406 | return hr; 407 | } 408 | 409 | _ASSERT(m_pDepthDistortionMap == nullptr); 410 | m_pDepthDistortionMap = new(std::nothrow) DepthSpacePoint[m_cDepthImagePixels]; 411 | if (nullptr == m_pDepthDistortionMap) 412 | { 413 | mexPrintf("Failed to initialize Kinect Fusion depth image distortion buffer.\n"); 414 | return E_OUTOFMEMORY; 415 | } 416 | 417 | SAFE_DELETE_ARRAY(m_pDepthDistortionLT); 418 | m_pDepthDistortionLT = new(std::nothrow) UINT[m_cDepthImagePixels]; 419 | 420 | if (nullptr == m_pDepthDistortionLT) 421 | { 422 | mexPrintf("Failed to initialize Kinect Fusion depth image distortion Lookup Table.\n"); 423 | return E_OUTOFMEMORY; 424 | } 425 | 426 | // If we have valid parameters, let's go ahead and use them. 427 | if (m_cameraParameters.focalLengthX != 0) 428 | { 429 | KF_SetupUndistortion(); 430 | } 431 | 432 | return hr; 433 | } // end KF_InitializeKinectFusion 434 | 435 | // Handle new depth data and perform Kinect Fusion processing 436 | void Kin2::KF_ProcessDepth() 437 | { 438 | if (m_bInitializeError) 439 | { 440 | return; 441 | } 442 | 443 | HRESULT hr = S_OK; 444 | 445 | // To enable playback of a .xef file through Kinect Studio and reset of the reconstruction 446 | // if the .xef loops, we test for when the frame timestamp has skipped a large number. 447 | // Note: this will potentially continually reset live reconstructions on slow machines which 448 | // cannot process a live frame in less time than the reset threshold. Increase the number of 449 | // milliseconds in cResetOnTimeStampSkippedMilliseconds if this is a problem. 450 | if (m_bAutoResetReconstructionOnTimeout && m_cFrameCounter != 0 && m_bResetReconstruction) 451 | { 452 | KF_reset(); 453 | 454 | if (FAILED(hr)) 455 | { 456 | return; 457 | } 458 | } 459 | 460 | // Return if the volume is not initialized 461 | if (nullptr == m_pVolume) 462 | { 463 | mexPrintf("Kinect Fusion reconstruction volume not initialized. Please try reducing volume size or restarting.\n"); 464 | return; 465 | } 466 | 467 | //////////////////////////////////////////////////////// 468 | // Depth to DepthFloat 469 | 470 | // Convert the pixels describing extended depth as unsigned short type in millimeters to depth 471 | // as floating point type in meters. 472 | hr = m_pVolume->DepthToDepthFloatFrame(m_pDepthImagePixelBuffer, m_cDepthImagePixels * sizeof(UINT16), m_pDepthFloatImage, m_fMinDepthThreshold, m_fMaxDepthThreshold, m_bMirrorDepthFrame); 473 | 474 | if (FAILED(hr)) 475 | { 476 | mexPrintf("Kinect Fusion NuiFusionDepthToDepthFloatFrame call failed.\n"); 477 | return; 478 | } 479 | 480 | //////////////////////////////////////////////////////// 481 | // ProcessFrame 482 | 483 | // Perform the camera tracking and update the Kinect Fusion Volume 484 | // This will create memory on the GPU, upload the image, run camera tracking and integrate the 485 | // data into the Reconstruction Volume if successful. Note that passing nullptr as the final 486 | // parameter will use and update the internal camera pose. 487 | hr = m_pVolume->ProcessFrame(m_pDepthFloatImage, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, m_cMaxIntegrationWeight, nullptr, &m_worldToCameraTransform); 488 | 489 | // Test to see if camera tracking failed. 490 | // If it did fail, no data integration or raycast for reference points and normals will have taken 491 | // place, and the internal camera pose will be unchanged. 492 | if (FAILED(hr)) 493 | { 494 | if (hr == E_NUI_FUSION_TRACKING_ERROR) 495 | { 496 | m_cLostFrameCounter++; 497 | m_bTrackingFailed = true; 498 | mexPrintf("Kinect Fusion camera tracking failed! Align the camera to the last tracked position.\n"); 499 | } 500 | else 501 | { 502 | mexPrintf("Kinect Fusion ProcessFrame call failed!\n"); 503 | return; 504 | } 505 | } 506 | else 507 | { 508 | Matrix4 calculatedCameraPose; 509 | hr = m_pVolume->GetCurrentWorldToCameraTransform(&calculatedCameraPose); 510 | 511 | if (SUCCEEDED(hr)) 512 | { 513 | // Set the pose 514 | m_worldToCameraTransform = calculatedCameraPose; 515 | m_cLostFrameCounter = 0; 516 | m_bTrackingFailed = false; 517 | } 518 | } 519 | 520 | if (m_bAutoResetReconstructionWhenLost && m_bTrackingFailed && m_cLostFrameCounter >= cResetOnNumberOfLostFrames) 521 | { 522 | // Automatically clear volume and reset tracking if tracking fails 523 | KF_reset(); 524 | 525 | // Set bad tracking message 526 | mexPrintf("Kinect Fusion camera tracking failed, automatically reset volume.\n"); 527 | } 528 | 529 | //////////////////////////////////////////////////////// 530 | // CalculatePointCloud 531 | 532 | // Raycast all the time, even if we camera tracking failed, to enable us to visualize what is happening with the system 533 | hr = m_pVolume->CalculatePointCloud(m_pPointCloud, &m_worldToCameraTransform); 534 | 535 | if (FAILED(hr)) 536 | { 537 | mexPrintf("Kinect Fusion CalculatePointCloud call failed.\n"); 538 | return; 539 | } 540 | 541 | //////////////////////////////////////////////////////// 542 | // ShadePointCloud and render 543 | 544 | hr = NuiFusionShadePointCloud(m_pPointCloud, &m_worldToCameraTransform, nullptr, m_pShadedSurface, nullptr); 545 | 546 | if (FAILED(hr)) 547 | { 548 | mexPrintf("Kinect Fusion NuiFusionShadePointCloud call failed.\n"); 549 | return; 550 | } 551 | } // end KF_ProcessDepth 552 | 553 | // cDepthWidth * cDepthHeight * cBytesPerPixel 554 | void Kin2::KF_getVolumeImage(BYTE volumeImg[]) 555 | { 556 | BYTE * pBuffer = m_pShadedSurface->pFrameBuffer->pBits; 557 | 558 | int pixCount = cDepthWidth * cDepthHeight; 559 | 560 | // copy data (424x512) BYTE buffer to Matlab output 561 | // sweep the entire matrix copying data to output matrix 562 | int size = 4*cDepthWidth; 563 | for (int x=0, k=0; x < cDepthWidth*4; x+=4) 564 | for (int y=0; y CalculateMesh(1, ppMesh); 579 | } // end KF_getMesh 580 | 581 | // Reset the reconstruction camera pose and clear the volume. 582 | void Kin2::KF_reset() 583 | { 584 | HRESULT hr = S_OK; 585 | 586 | SetIdentityMatrix(m_worldToCameraTransform); 587 | 588 | // Translate the reconstruction volume location away from the world origin by an amount equal 589 | // to the minimum depth threshold. This ensures that some depth signal falls inside the volume. 590 | // If set false, the default world origin is set to the center of the front face of the 591 | // volume, which has the effect of locating the volume directly in front of the initial camera 592 | // position with the +Z axis into the volume along the initial camera direction of view. 593 | if (m_bTranslateResetPoseByMinDepthThreshold) 594 | { 595 | Matrix4 worldToVolumeTransform = m_defaultWorldToVolumeTransform; 596 | 597 | // Translate the volume in the Z axis by the minDepthThreshold distance 598 | float minDist = (m_fMinDepthThreshold < m_fMaxDepthThreshold) ? m_fMinDepthThreshold : m_fMaxDepthThreshold; 599 | worldToVolumeTransform.M43 -= (minDist * m_reconstructionParams.voxelsPerMeter); 600 | 601 | hr = m_pVolume->ResetReconstruction(&m_worldToCameraTransform, &worldToVolumeTransform); 602 | } 603 | else 604 | { 605 | hr = m_pVolume->ResetReconstruction(&m_worldToCameraTransform, nullptr); 606 | } 607 | 608 | m_cLostFrameCounter = 0; 609 | m_cFrameCounter = 0; 610 | 611 | if (SUCCEEDED(hr)) 612 | { 613 | m_bTrackingFailed = false; 614 | 615 | mexPrintf("Reconstruction has been reset."); 616 | } 617 | else 618 | { 619 | mexPrintf("Failed to reset reconstruction."); 620 | } 621 | } // end KF_reset 622 | -------------------------------------------------------------------------------- /Kin2/Mex/Kin2_mapping.cpp: -------------------------------------------------------------------------------- 1 | #include "Kin2.h" 2 | #include "mex.h" 3 | #include "class_handle.hpp" 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | ///////// Function: getPointCloud /////////////////////////////////////////// 10 | // Get camera points from depth frame and copy them to Matlab matrix 11 | // You must call updateData first and have depth activated 12 | /////////////////////////////////////////////////////////////////////////// 13 | 14 | void Kin2::getPointCloud(double pointCloud[], unsigned char colors[], bool color, bool& validData) 15 | { 16 | // Create coordinate mapping from depth to camera 17 | HRESULT hr; 18 | const int numDepthPoints = cDepthWidth * cDepthHeight; 19 | 20 | if(m_newPointCloudData) 21 | { 22 | CameraSpacePoint cameraPoints[numDepthPoints]; 23 | hr = m_pCoordinateMapper->MapDepthFrameToCameraSpace(numDepthPoints, 24 | (UINT16*)m_pDepthArray16U, numDepthPoints, cameraPoints); 25 | 26 | // If successfull depth to camera space mapping 27 | if (SUCCEEDED(hr)) 28 | { 29 | // if the user want color 30 | if(color) 31 | { 32 | // map camera points to color space 33 | ColorSpacePoint colorPoints[numDepthPoints]; 34 | hr = m_pCoordinateMapper->MapCameraPointsToColorSpace(numDepthPoints, 35 | cameraPoints, numDepthPoints, colorPoints); 36 | 37 | // fill up the colors matrix with R,G,B values from the current color image 38 | if (SUCCEEDED(hr)) 39 | { 40 | int colorCoordX, colorCoordY; 41 | for (int i = 0; i < numDepthPoints; i++) 42 | { 43 | colorCoordX = (UINT16)colorPoints[i].X; 44 | colorCoordY = (UINT16)colorPoints[i].Y; 45 | 46 | // Sample the RGB components from the color image 47 | unsigned char R, G, B; 48 | 49 | // first make sure the coordinates maps to a valid point in color space 50 | int colorX = (int)(floor(colorCoordX + 0.5)); 51 | int colorY = (int)(floor(colorCoordY + 0.5)); 52 | if ((colorX >= 0) && (colorX < cColorWidth) && (colorY >= 0) && (colorY < cColorHeight)) 53 | { 54 | // calculate index into color array 55 | int colorIndex = (colorX + (colorY * cColorWidth)) * 4; 56 | 57 | R = m_pColor[colorIndex]; 58 | G = m_pColor[colorIndex + 1]; 59 | B = m_pColor[colorIndex + 2]; 60 | } 61 | 62 | colors[i] = R; 63 | colors[i + numDepthPoints] = G; 64 | colors[i + numDepthPoints + numDepthPoints] = B; 65 | } 66 | } 67 | } // if color 68 | 69 | // Fill-up the point cloud with x,y,z values from camera space 70 | for (int i = 0; i < numDepthPoints; i++) 71 | { 72 | float X, Y, Z; 73 | X = cameraPoints[i].X; 74 | Y = cameraPoints[i].Y; 75 | Z = cameraPoints[i].Z; 76 | 77 | pointCloud[i] = X; 78 | pointCloud[i + numDepthPoints] = Y; 79 | pointCloud[i + numDepthPoints + numDepthPoints] = Z; 80 | } 81 | } // If successfull depth to camera space mapping 82 | else 83 | { 84 | pointCloud[0] = 0; 85 | pointCloud[1] = 0; 86 | pointCloud[2] = 0; 87 | mexPrintf("Error getting depth-to-camera mapping.\n"); 88 | } 89 | } 90 | validData = m_newPointCloudData; 91 | m_newPointCloudData = false; 92 | } 93 | 94 | ///////// Function: mapDepthPoints2Color ////////////////////////////// 95 | // Map the input points in depth coordinates to points in color coordinates 96 | /////////////////////////////////////////////////////////////////////////// 97 | void Kin2::mapDepthPoints2Color(double depthCoords[], int size, UINT16 colorCoords[]) 98 | { 99 | int numDepthPoints = size; 100 | 101 | // Depth coordinates 102 | DepthSpacePoint* depthPoints = new DepthSpacePoint[numDepthPoints]; 103 | 104 | // Depth values 105 | UINT16* depthValues = new UINT16[numDepthPoints]; 106 | 107 | DepthSpacePoint d; 108 | 109 | for(int i=0; iMapDepthPointsToColorSpace(numDepthPoints, depthPoints, 126 | numDepthPoints, depthValues, numDepthPoints, colorPoints); 127 | 128 | if (SUCCEEDED(hr)) 129 | { 130 | for (int i = 0; i < numDepthPoints; i++) 131 | { 132 | colorCoords[i] = (int)colorPoints[i].X; 133 | colorCoords[i+size] = (int)colorPoints[i].Y; 134 | } 135 | } 136 | else 137 | { 138 | colorCoords[0] = 0; 139 | colorCoords[1] = 0; 140 | mexPrintf("Mapping error.\n"); 141 | } 142 | 143 | delete[] depthPoints; depthPoints = NULL; 144 | delete[] depthValues; depthValues = NULL; 145 | delete[] colorPoints; colorPoints = NULL; 146 | } // end mapDepthPoints2Color 147 | 148 | ///////// Function: mapDepthPoints2Camera ////////////////////////////// 149 | // Map the input points in depth coordinates to points in camera coordinates 150 | /////////////////////////////////////////////////////////////////////////// 151 | void Kin2::mapDepthPoints2Camera(double depthCoords[], int size, double cameraCoords[]) 152 | { 153 | int numDepthPoints = size; 154 | 155 | // Depth coordinates 156 | DepthSpacePoint* depthPoints = new DepthSpacePoint[numDepthPoints]; 157 | 158 | // Depth values 159 | UINT16* depthValues = new UINT16[numDepthPoints]; 160 | 161 | DepthSpacePoint d; 162 | 163 | for(int i=0; iMapDepthPointsToCameraSpace(numDepthPoints, depthPoints, 180 | numDepthPoints, depthValues, numDepthPoints, cameraPoints); 181 | 182 | if (SUCCEEDED(hr)) 183 | { 184 | for (int i = 0; i < numDepthPoints; i++) 185 | { 186 | cameraCoords[i] = cameraPoints[i].X; 187 | cameraCoords[i+size] = cameraPoints[i].Y; 188 | cameraCoords[i+size+size] = cameraPoints[i].Z; 189 | } 190 | } 191 | else 192 | { 193 | cameraCoords[0] = 0; 194 | cameraCoords[1] = 0; 195 | cameraCoords[2] = 0; 196 | mexPrintf("Mapping error.\n"); 197 | } 198 | 199 | delete[] depthPoints; depthPoints = NULL; 200 | delete[] depthValues; depthValues = NULL; 201 | delete[] cameraPoints; cameraPoints = NULL; 202 | } // end mapDepthPoints2Camera 203 | 204 | /* 205 | bool Kin2::mapDepthFrame2Color(UINT16* depth2colorCoordinates) 206 | { 207 | HRESULT hr; 208 | 209 | int numDepthPoints = cDepthWidth * cDepthHeight; 210 | 211 | // Create coordinate mapping from depth to color 212 | ColorSpacePoint* depth2ColorMapping = new ColorSpacePoint[numDepthPoints]; 213 | 214 | hr = m_pCoordinateMapper->MapDepthFrameToColorSpace(numDepthPoints, 215 | (UINT16*)m_pDepthArray16U, numDepthPoints, depth2ColorMapping); 216 | 217 | if (SUCCEEDED(hr)) 218 | { 219 | for (int i = 0; i < numDepthPoints; i++) 220 | { 221 | depth2colorCoordinates[i] = (int)depth2ColorMapping[i].X; 222 | depth2colorCoordinates[i + numDepthPoints] = (int)depth2ColorMapping[i].Y; 223 | } 224 | } 225 | else 226 | { 227 | colorCoords[0] = 0; 228 | colorCoords[1] = 0; 229 | mexPrintf("Mapping error.\n"); 230 | } 231 | 232 | if (SUCCEEDED(hr)) return true; 233 | else return false; 234 | } // end mapDepthFrame2Color 235 | */ 236 | 237 | ///////// Function: mapColorPoint2Depth ////////////////////////////// 238 | // Map points in color coordinates to points in depth coordinates 239 | /////////////////////////////////////////////////////////////////////////// 240 | void Kin2::mapColorPoints2Depth(double colorCoords[], int size, UINT16 depthCoords[]) 241 | { 242 | // create heap storage for the coordinate mapping from color to depth 243 | DepthSpacePoint* pColor2Depth = new DepthSpacePoint[cColorWidth * cColorHeight]; 244 | 245 | // Get the mapping from color to depth 246 | HRESULT hr; 247 | hr = m_pCoordinateMapper->MapColorFrameToDepthSpace(cDepthWidth * cDepthHeight, 248 | (UINT16*)m_pDepthArray16U, cColorWidth * cColorHeight, pColor2Depth); 249 | 250 | if (SUCCEEDED(hr)) 251 | { 252 | int x, y, idx, depthX, depthY; 253 | 254 | for(int i=0; iMapColorFrameToCameraSpace(cDepthWidth * cDepthHeight, 290 | (UINT16*)m_pDepthArray16U, cColorWidth * cColorHeight, pColor2Camera); 291 | 292 | if (SUCCEEDED(hr)) 293 | { 294 | int x, y, idx; 295 | double camX, camY, camZ; 296 | 297 | for(int i=0; iMapCameraPointsToDepthSpace(numPoints, 346 | camPoints, numPoints, depthPoints); 347 | 348 | if (SUCCEEDED(hr)) 349 | { 350 | for (int i = 0; i < numPoints; i++) 351 | { 352 | depthCoords[i] = (UINT16)depthPoints[i].X; 353 | depthCoords[i+size] = (UINT16)depthPoints[i].Y; 354 | } 355 | } 356 | else 357 | { 358 | depthCoords[0] = 0; 359 | depthCoords[1] = 0; 360 | mexPrintf("Camera to Depth Mapping error.\n"); 361 | } 362 | 363 | delete[] depthPoints; depthPoints = NULL; 364 | delete[] camPoints; camPoints = NULL; 365 | } // end mapCameraPoints2Depth 366 | 367 | void Kin2::mapCameraPoints2Color(double cameraCoords[], int size, UINT16 colorCoords[]) 368 | { 369 | int numPoints = size; 370 | 371 | CameraSpacePoint* camPoints = new CameraSpacePoint[numPoints]; 372 | 373 | CameraSpacePoint c; 374 | 375 | for(int i=0; iMapCameraPointsToColorSpace(numPoints, 388 | camPoints, numPoints, colorPoints); 389 | 390 | if (SUCCEEDED(hr)) 391 | { 392 | for (int i = 0; i < numPoints; i++) 393 | { 394 | colorCoords[i] = (UINT16)colorPoints[i].X; 395 | colorCoords[i+size] = (UINT16)colorPoints[i].Y; 396 | } 397 | } 398 | else 399 | { 400 | colorCoords[0] = 0; 401 | colorCoords[1] = 0; 402 | mexPrintf("Camera to Color Mapping error.\n"); 403 | } 404 | 405 | delete[] colorPoints; colorPoints = NULL; 406 | delete[] camPoints; camPoints = NULL; 407 | } // end mapCameraPoints2Color 408 | 409 | void Kin2::alignColor2Depth(unsigned char alignedImage[], bool& validData) 410 | { 411 | HRESULT hr; 412 | const int numDepthPoints = cDepthWidth * cDepthHeight; 413 | 414 | // Map from depth to color 415 | ColorSpacePoint depth2ColorMapping[numDepthPoints]; 416 | 417 | hr = m_pCoordinateMapper->MapDepthFrameToColorSpace(numDepthPoints, 418 | (UINT16*)m_pDepthArray16U, numDepthPoints, depth2ColorMapping); 419 | 420 | // fill up the output matrix with R,G,B values from the current color image 421 | if (SUCCEEDED(hr)) 422 | { 423 | int colorCoordX, colorCoordY; 424 | for (int x=0, k=0; x < cDepthWidth; x++) 425 | { 426 | for (int y=0; y = 0) && (colorX < cColorWidth) && (colorY >= 0) && (colorY < cColorHeight)) 439 | { 440 | // calculate index into color array 441 | int colorIndex = (colorX + (colorY * cColorWidth)) * 4; 442 | 443 | R = m_pColor[colorIndex]; 444 | G = m_pColor[colorIndex + 1]; 445 | B = m_pColor[colorIndex + 2]; 446 | } 447 | 448 | alignedImage[k] = R; 449 | alignedImage[k + numDepthPoints] = G; 450 | alignedImage[k + numDepthPoints + numDepthPoints] = B; 451 | } 452 | } 453 | validData = true; 454 | } 455 | else 456 | { 457 | validData = false; 458 | mexPrintf("Depth to Color Mapping error.\n"); 459 | } 460 | } // end alignColor2Depth 461 | -------------------------------------------------------------------------------- /Kin2/Mex/Kin2_mex.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jrterven/MultiKinCalib/6b797c9c5926ff4e45fde11d015bb7e95a6bf570/Kin2/Mex/Kin2_mex.mexw64 -------------------------------------------------------------------------------- /Kin2/Mex/calibCostFun.m: -------------------------------------------------------------------------------- 1 | 2 | function fun = calibCostFun(x0) 3 | 4 | persistent X3d x2d quatRot 5 | 6 | height = 1080; 7 | width = 1920; 8 | 9 | % The first iteration loads the data 10 | if isempty(X3d) 11 | X3d = []; 12 | x2d = []; 13 | 14 | % Get the 3D points from the matching results 15 | % 3D points as a 3 x n matrix 16 | load('calibData.mat'); 17 | quatRot = rot; 18 | X3d = pointcloud'; 19 | 20 | % 2D points as a 2 x n matrix 21 | x2d = double(proj2d)'; 22 | 23 | % Remove outliers from X3d in both matrices 24 | x2dValidCols = ~any( isnan( x2d ) | isinf( x2d ) | x2d > width | x2d < 0, 1 ); 25 | x3dValidCols = ~any( isnan( X3d ) | isinf( X3d ) | X3d > 8, 1 ); 26 | validCols = x2dValidCols & x3dValidCols; 27 | 28 | x2d = x2d(:,validCols); 29 | X3d = X3d(:,validCols); 30 | end 31 | 32 | f = x0(1); % focal length 33 | cx = x0(2); % principal point x 34 | cy = x0(3); % principal point y 35 | 36 | % Rotation 37 | Rq = [x0(4) x0(5) x0(6) x0(7)]; 38 | 39 | if quatRot 40 | R = quat2rotm(Rq); 41 | else 42 | R = eye(3); 43 | end 44 | 45 | % Translation 46 | t = [x0(8);x0(9);x0(10)]; 47 | 48 | % Build camera matrix 49 | intrinsic = [f 0 cx; 50 | 0 f cy; 51 | 0 0 1]; 52 | 53 | k1 = x0(11); 54 | k2 = x0(12); 55 | k3 = x0(13); 56 | 57 | N = size(X3d,2); 58 | Xw = X3d; 59 | xc = x2d; 60 | xc(2,:) = height - xc(2,:); 61 | 62 | % Apply extrinsic parameters 63 | proj = R * Xw + repmat(t,1,size(Xw,2)); 64 | 65 | % Apply Intrinsic parameters to get the projection 66 | proj = intrinsic * proj; 67 | % Dehomogenization 68 | proj = proj ./ repmat(proj(3,:),3,1); 69 | 70 | u = proj(1,:); 71 | v = proj(2,:); 72 | ud=xc(1,:); 73 | vd=xc(2,:); 74 | 75 | % Normalized coordinates in the image plane 76 | un = (u - cx)/f; 77 | vn = (v - cy)/f; 78 | 79 | % Calculate the Radial Distortion 80 | r = sqrt(un.^2 + vn.^2); 81 | 82 | compRad(1,:) = 1 + k1*r.^2 + k2*r.^4 + k3*r.^6; 83 | compRad(2,:) = 1 + k1*r.^2 + k2*r.^4 + k3*r.^6; 84 | 85 | % Undistort the normalized point coordinates in the image plane 86 | un_undist = un.*compRad(1,:); 87 | vn_undist = vn.*compRad(2,:); 88 | 89 | % Unormalized the points 90 | u_undist = (un_undist * f) + cx; 91 | v_undist = (vn_undist * f) + cy; 92 | 93 | % Reprojection error 94 | fun(1,:)= u_undist - ud; 95 | fun(2,:)= v_undist - vd; 96 | 97 | err = fun .* fun; 98 | err = sum(err(:)); 99 | %disp(sqrt(err/N)); 100 | end -------------------------------------------------------------------------------- /Kin2/Mex/class_handle.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CLASS_HANDLE_HPP__ 2 | #define __CLASS_HANDLE_HPP__ 3 | #include "mex.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #define CLASS_HANDLE_SIGNATURE 0xFF00F0A5 10 | template class class_handle 11 | { 12 | public: 13 | class_handle(base *ptr) : ptr_m(ptr), name_m(typeid(base).name()) { signature_m = CLASS_HANDLE_SIGNATURE; } 14 | ~class_handle() { signature_m = 0; delete ptr_m; } 15 | bool isValid() { return ((signature_m == CLASS_HANDLE_SIGNATURE) && !strcmp(name_m.c_str(), typeid(base).name())); } 16 | base *ptr() { return ptr_m; } 17 | 18 | private: 19 | uint32_t signature_m; 20 | std::string name_m; 21 | base *ptr_m; 22 | }; 23 | 24 | template inline mxArray *convertPtr2Mat(base *ptr) 25 | { 26 | mexLock(); 27 | mxArray *out = mxCreateNumericMatrix(1, 1, mxUINT64_CLASS, mxREAL); 28 | *((uint64_t *)mxGetData(out)) = reinterpret_cast(new class_handle(ptr)); 29 | return out; 30 | } 31 | 32 | template inline class_handle *convertMat2HandlePtr(const mxArray *in) 33 | { 34 | if (mxGetNumberOfElements(in) != 1 || mxGetClassID(in) != mxUINT64_CLASS || mxIsComplex(in)) 35 | mexErrMsgTxt("Input must be a real uint64 scalar."); 36 | class_handle *ptr = reinterpret_cast *>(*((uint64_t *)mxGetData(in))); 37 | if (!ptr->isValid()) 38 | mexErrMsgTxt("Handle not valid."); 39 | return ptr; 40 | } 41 | 42 | template inline base *convertMat2Ptr(const mxArray *in) 43 | { 44 | return convertMat2HandlePtr(in)->ptr(); 45 | } 46 | 47 | template inline void destroyObject(const mxArray *in) 48 | { 49 | delete convertMat2HandlePtr(in); 50 | mexUnlock(); 51 | } 52 | 53 | // Safe release for interfaces 54 | template 55 | inline void SafeRelease(Interface *& pInterfaceToRelease) 56 | { 57 | if (pInterfaceToRelease != NULL) 58 | { 59 | pInterfaceToRelease->Release(); 60 | pInterfaceToRelease = NULL; 61 | } 62 | } 63 | 64 | #endif // __CLASS_HANDLE_HPP__ 65 | -------------------------------------------------------------------------------- /Kin2/README.txt: -------------------------------------------------------------------------------- 1 | C++ wrapper functions for the Microsoft Kinect 2, using Microsoft SDK. 2 | 3 | Requirements: 4 | - Kinect2 SDK. http://www.microsoft.com/en-us/download/details.aspx?id=44561 5 | - Visual Studio 2012 or newer compiler 6 | - MATLAB 2013a or newer (for Visual Studio 2012 support) 7 | - MATLAB 2015b or newer for pointCloudDemo2, which uses MATLAB's built-in pointCloud object 8 | 9 | Usage: 10 | 1) Set the compiler using mex -setup C++ 11 | 2) Open compile_cpp_files and set the include and lib paths of Kinect2 SDK (see the provided paths) 12 | 3) Add to the windows path the bin directory containing the Kinect20.Fusion.dll and Kinect20.Face.dll 13 | For example: C:\Program Files\Microsoft SDKs\Kinect\v2.0_1409\bin 14 | 4) If you modify Windows path, close Matlab and open it again in order to detect the changes. 15 | 3) Run compile_cpp_files.m 16 | 17 | Demos: 18 | 1) videoDemo.m: displays depth, color, and infrared video. 19 | 2) mappingDemo.m: displays depth and color video, and allows to map points from one image to the other (See usage comments at the beginning of the script). 20 | 3) mapping2CamDemo.m: displays depth and color and allows to map points from depth and color to camera space and viceversa. 21 | 4) bodyDemo.m: displays depth and color and the skeleton on both images 22 | 5) pointCloudDemo.m: displays depth and a colored point cloud on a scatter3 23 | 6) pointCloudDemo2.m displays depth and a colored point cloud using MATLAB's built-in pointCloud object and pcshow. 24 | 7) bodyIndexDemo.m: displays body index frames 25 | 8) faceDemo.m: detect and track faces showing the facial landmarks and face properties 26 | 9) faceHDDemo.m: detect and track faces showing the 17 animation units and the high definition model 27 | 10) faceHDDemo2.m: builds a face model for the user and track the faces using this model. 28 | 11) kinectFusionDemo.m: demonstrates the use of Kinect Fusion. This is still in BETA. Need fixing memory leakage in C++ causing MATLAB to crash on a second run. 29 | 12) calibrationDemo.m: obtain depth camera intrinsic parameters and color camera parameters. -------------------------------------------------------------------------------- /Kin2/bodyDemo.m: -------------------------------------------------------------------------------- 1 | % BODYDEMO Illustrates how to use the Kin2 object to get and draw the 2 | % Skeleton data 3 | % 4 | % Juan R. Terven, jrterven@hotmail.com 5 | % Diana M. Cordova, diana_mce@hotmail.com 6 | % 7 | % Citation: 8 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 9 | % Computer Programming. 10 | % https://github.com/jrterven/Kin2, 2016. 11 | 12 | addpath('Mex'); 13 | clear all 14 | close all 15 | 16 | % Create Kinect 2 object and initialize it 17 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 18 | % 'face' and 'HDface' 19 | k2 = Kin2('color','depth','body'); 20 | 21 | % images sizes 22 | d_width = 512; d_height = 424; outOfRange = 4000; 23 | c_width = 1920; c_height = 1080; 24 | 25 | % Color image is to big, let's scale it down 26 | COL_SCALE = 1.0; 27 | 28 | % Create matrices for the images 29 | depth = zeros(d_height,d_width,'uint16'); 30 | color = zeros(c_height*COL_SCALE,c_width*COL_SCALE,3,'uint8'); 31 | 32 | % depth stream figure 33 | d.h = figure; 34 | d.ax = axes; 35 | d.im = imshow(zeros(d_height,d_width,'uint8')); 36 | %hold on; 37 | 38 | title('Depth Source (press q to exit)') 39 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 40 | 41 | % color stream figure 42 | c.h = figure; 43 | c.ax = axes; 44 | c.im = imshow(color,[]); 45 | title('Color Source (press q to exit)'); 46 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 47 | %hold on 48 | 49 | % Loop until pressing 'q' on any figure 50 | k=[]; 51 | 52 | disp('Press q on any figure to exit') 53 | while true 54 | % Get frames from Kinect and save them on underlying buffer 55 | validData = k2.updateData; 56 | 57 | % Before processing the data, we need to make sure that a valid 58 | % frame was acquired. 59 | if validData 60 | % Copy data to Matlab matrices 61 | depth = k2.getDepth; 62 | color = k2.getColor; 63 | 64 | % update depth figure 65 | depth8u = uint8(depth*(255/outOfRange)); 66 | depth8uc3 = repmat(depth8u,[1 1 3]); 67 | d.im = imshow(depth8uc3, 'Parent', d.ax); 68 | 69 | %set(d.im,'CData',depth8uc3); 70 | 71 | % update color figure 72 | color = imresize(color,COL_SCALE); 73 | c.im = imshow(color, 'Parent', c.ax); 74 | 75 | %set(c.im,'CData',color); 76 | 77 | % Get 3D bodies joints 78 | % Input parameter can be 'Quat' or 'Euler' for the joints 79 | % orientations. 80 | % getBodies returns a structure array. 81 | % The structure array (bodies) contains 6 bodies at most 82 | % Each body has: 83 | % -Position: 3x25 matrix containing the x,y,z of the 25 joints in 84 | % camera space coordinates 85 | % - Orientation: 86 | % If input parameter is 'Quat': 4x25 matrix containing the 87 | % orientation of each joint in [x; y; z, w] 88 | % If input parameter is 'Euler': 3x25 matrix containing the 89 | % orientation of each joint in [Pitch; Yaw; Roll] 90 | % -TrackingState: state of each joint. These can be: 91 | % NotTracked=0, Inferred=1, or Tracked=2 92 | % -LeftHandState: state of the left hand 93 | % -RightHandState: state of the right hand 94 | [bodies, fcp, timeStamp] = k2.getBodies('Quat'); 95 | 96 | % Number of bodies detected 97 | numBodies = size(bodies,2); 98 | %disp(['Bodies Detected: ' num2str(numBodies)]) 99 | 100 | % Example of how to extract information from getBodies output. 101 | if numBodies > 0 102 | % first body info: 103 | %disp(bodies(1).TrackingState) 104 | %disp(bodies(1).RightHandState) 105 | %disp(bodies(1).LeftHandState) 106 | 107 | %disp('Right Hand Orientation') % see Kin2.m constants 108 | %disp(bodies(1).Orientation(:,k2.JointType_HandRight)); 109 | 110 | disp('Floor Clip Plane') 111 | disp(fcp); 112 | 113 | disp('Body Timestamp') 114 | disp(timeStamp); 115 | 116 | % To get the joints on depth image space, you can use: 117 | %pos2D = k2.mapCameraPoints2Depth(bodies(1).Position'); 118 | end 119 | 120 | %To get the joints on color image space, you can use: 121 | %pos2D = k2.mapCameraPoints2Color(bodies(1).Position'); 122 | 123 | % Draw bodies on depth image 124 | % Parameters: 125 | % 1) image axes 126 | % 2) bodies structure 127 | % 3) Destination image (depth or color) 128 | % 4) Joints' size (circle raddii) 129 | % 5) Bones' Thickness 130 | % 6) Hands' Size 131 | k2.drawBodies(d.ax,bodies,'depth',5,3,15); 132 | 133 | % Draw bodies on color image 134 | k2.drawBodies(c.ax,bodies,'color',10,6,30); 135 | 136 | end 137 | 138 | % If user presses 'q', exit loop 139 | if ~isempty(k) 140 | if strcmp(k,'q'); break; end; 141 | end 142 | 143 | pause(0.02) 144 | end 145 | 146 | % Close kinect object 147 | k2.delete; 148 | 149 | close all; 150 | -------------------------------------------------------------------------------- /Kin2/bodyIndexDemo.m: -------------------------------------------------------------------------------- 1 | % BODYINDEXDEMO Illustrates how to use the Kin2 to use the body index data 2 | % 3 | % Juan R. Terven, jrterven@hotmail.com 4 | % Diana M. Cordova, diana_mce@hotmail.com 5 | % 6 | % Citation: 7 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 8 | % Computer Programming. 9 | % https://github.com/jrterven/Kin2, 2016. 10 | 11 | addpath('Mex'); 12 | clear all 13 | close all 14 | 15 | % Create Kinect 2 object and initialize it 16 | % Select sources as input parameters. 17 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 18 | % 'face' and 'HDface' 19 | k2 = Kin2('color','depth','body_index'); 20 | 21 | % images sizes 22 | depth_width = 512; depth_height = 424; outOfRange = 4000; 23 | color_width = 1920; color_height = 1080; 24 | 25 | % Color image is to big, let's scale it down 26 | colorScale = 0.4; 27 | 28 | % Create matrices for the images 29 | depth = zeros(depth_height,depth_width,'uint16'); 30 | bodyIndex = zeros(depth_height,depth_width,'uint8'); 31 | color = zeros(color_height*colorScale,color_width*colorScale,3,'uint8'); 32 | 33 | % depth stream figure 34 | figure, h1 = imshow(depth,[0 outOfRange]); 35 | title('Depth Source (press q to exit)') 36 | colormap('Jet') 37 | colorbar 38 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 39 | 40 | % color stream figure 41 | figure, h2 = imshow(color,[]); 42 | title('Color Source (press q to exit)'); 43 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 44 | 45 | % body index stream figure 46 | figure, h3 = imshow(bodyIndex,[]); 47 | title('Body Index Source (press q to exit)'); 48 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 49 | 50 | % Loop until pressing 'q' on any figure 51 | k=[]; 52 | 53 | disp('Press q on any figure to exit') 54 | while true 55 | % Get frames from Kinect and save them on underlying buffer 56 | validData = k2.updateData; 57 | 58 | % Before processing the data, we need to make sure that a valid 59 | % frame was acquired. 60 | if validData 61 | % Copy data to Matlab matrices 62 | depth = k2.getDepth; 63 | color = k2.getColor; 64 | bodyIndex = k2.getBodyIndex; 65 | 66 | % update depth figure 67 | depth(depth>outOfRange) = outOfRange; % truncate depht 68 | set(h1,'CData',depth); 69 | 70 | % update color figure 71 | color = imresize(color,colorScale); 72 | set(h2,'CData',color); 73 | 74 | % update body index figure 75 | set(h3,'CData',bodyIndex); 76 | end 77 | 78 | % If user presses 'q', exit loop 79 | if ~isempty(k) 80 | if strcmp(k,'q'); break; end; 81 | end 82 | 83 | pause(0.02) 84 | end 85 | 86 | % Close kinect object 87 | k2.delete; 88 | 89 | close all; 90 | -------------------------------------------------------------------------------- /Kin2/calibrationDemo.m: -------------------------------------------------------------------------------- 1 | % CALIBRATIONDEMO Illustrates how to use the Kin2 class to obtain the 2 | % cameras' intrinsic parameters. 3 | % 4 | % Usage: 5 | % press 'd' to obtain the depth camera intrinsics. 6 | % press 'c' to obtain the color camera intrinsics. 7 | % 8 | % Juan R. Terven, jrterven@hotmail.com 9 | % Diana M. Cordova, diana_mce@hotmail.com 10 | % 11 | % Citation: 12 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 13 | % Computer Programming. 14 | % https://github.com/jrterven/Kin2, 2016. 15 | % 16 | 17 | addpath('Mex'); 18 | clear all 19 | close all 20 | 21 | % Create Kinect 2 object and initialize it 22 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 23 | % 'face' and 'HDface' 24 | k2 = Kin2('color','depth'); 25 | 26 | % images sizes 27 | depth_width = 512; depth_height = 424; outOfRange = 4000; 28 | color_width = 1920; color_height = 1080; 29 | 30 | % Color image is to big, let's scale it down 31 | colorScale = 0.4; 32 | 33 | % Create matrices for the images 34 | depth = zeros(depth_height,depth_width,'uint16'); 35 | color = zeros(color_height*colorScale,color_width*colorScale,3,'uint8'); 36 | 37 | % depth stream figure 38 | figure, h1 = imshow(depth,[0 outOfRange]); 39 | title('Depth Source (press q to exit)') 40 | colormap('Jet') 41 | colorbar 42 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 43 | 44 | % color stream figure 45 | figure, h2 = imshow(color,[]); 46 | title('Color Source (press q to exit)'); 47 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 48 | 49 | % Loop until pressing 'q' on any figure 50 | k=[]; 51 | 52 | disp(' '); disp('Usage:'); 53 | disp('Press d to obtain depth camera intrinsic parameters') 54 | disp('Press c to calibrate color camera') 55 | disp('Press q on any figure to exit') 56 | 57 | while true 58 | % Get frames from Kinect and save them on underlying buffer 59 | validData = k2.updateData; 60 | 61 | % Before processing the data, we need to make sure that a valid 62 | % frame was acquired. 63 | if validData 64 | % Copy data to Matlab matrices 65 | depth = k2.getDepth; 66 | color = k2.getColor; 67 | 68 | % update depth figure 69 | depth(depth>outOfRange) = outOfRange; % truncate depht 70 | set(h1,'CData',depth); 71 | 72 | % update color figure 73 | color = imresize(color,colorScale); 74 | set(h2,'CData',color); 75 | 76 | end 77 | 78 | % If user presses 'q', exit loop 79 | if ~isempty(k) 80 | if strcmp(k,'q') 81 | break; 82 | elseif strcmp(k,'d') 83 | calib = k2.getDepthIntrinsics; 84 | disp(' '); 85 | disp('------------ Depth Intrinsics ------------') 86 | disp(['Focal Length X: ' num2str(calib.FocalLengthX)]); 87 | disp(['Focal Length Y: ' num2str(calib.FocalLengthY)]); 88 | disp(['Principal Point X: ' num2str(calib.PrincipalPointX)]); 89 | disp(['Principal Point Y: ' num2str(calib.PrincipalPointY)]); 90 | disp(['Radial Distortion 2nd order: ' num2str(calib.RadialDistortionSecondOrder)]); 91 | disp(['Radial Distortion 4th order: ' num2str(calib.RadialDistortionFourthOrder)]); 92 | disp(['Radial Distortion 6th order: ' num2str(calib.RadialDistortionSixthOrder)]); 93 | disp('--------------------------------------------'); 94 | k = []; 95 | elseif strcmp(k,'c') 96 | calib = k2.getColorCalib; 97 | disp(' '); 98 | disp('------------ Color Camera Parameters ------------') 99 | disp(['Focal Length X: ' num2str(calib.FocalLengthX)]); 100 | disp(['Focal Length Y: ' num2str(calib.FocalLengthY)]); 101 | disp(['Principal Point X: ' num2str(calib.PrincipalPointX)]); 102 | disp(['Principal Point Y: ' num2str(calib.PrincipalPointY)]); 103 | disp('Rotation Wrt Depth camera:'); 104 | disp(num2str(calib.Rotation)); 105 | disp(['Translation x,y,z wrt depth camera(meters): ' num2str(calib.Translation)]); 106 | disp(['Radial Distortion 2nd order: ' num2str(calib.RadialDistortionSecondOrder)]); 107 | disp(['Radial Distortion 4th order: ' num2str(calib.RadialDistortionFourthOrder)]); 108 | disp(['Radial Distortion 6th order: ' num2str(calib.RadialDistortionSixthOrder)]); 109 | disp('--------------------------------------------'); 110 | k = []; 111 | end; 112 | 113 | end 114 | 115 | pause(0.02) 116 | end 117 | 118 | % Close kinect object 119 | k2.delete; 120 | 121 | close all; 122 | -------------------------------------------------------------------------------- /Kin2/compile_cpp_files.m: -------------------------------------------------------------------------------- 1 | function compile_cpp_files 2 | % compile_cpp_files compiles the Kin2 toolbox. 3 | % The C++ code is located in 6 files: 4 | % Kin2.h: Kin2 class definition. 5 | % Kin2_base.cpp: Kin2 class implementation of the base functionality including body data. 6 | % Kin2_mapping.cpp: Kin2 class implementation of the mapping functionality 7 | % Kin2_face.cpp: Kin2 class implementation of the Face and HD face processing. 8 | % Kin2_fusion.cpp: Kin2 class implementation of the 3D reconstruction. 9 | % Kin2_mex.cpp: MexFunction implementation. 10 | % 11 | % Requirements: 12 | % - Kinect2 SDK. http://www.microsoft.com/en-us/download/details.aspx?id=44561 13 | % - Visual Studio 2012 or newer compiler 14 | % - Matlab 2013a or newer (in order to support Visual Studio 2012) 15 | % 16 | % Usage: 17 | % 1) Set the compiler using mex -setup C++ (note it doesn't work with 18 | % compilers older than VS2012. 19 | % 2) Set the IncludePath and LibPath variables in this file to the correct locations 20 | % (see example below) 21 | % 3) Add to the windows path the bin directory containing the 22 | % Kinect20.Fusion.dll and Kinect20.Face.dll 23 | % For example: C:\Program Files\Microsoft SDKs\Kinect\v2.0_1409\bin 24 | % 4) Close Matlab and open it again. 25 | % 5) Run this function. 26 | % 27 | % Author: Juan R. Terven, jrterven@hotmail.com 28 | IncludePath = 'C:\Program Files\Microsoft SDKs\Kinect\v2.0_1409\inc'; 29 | LibPath = 'C:\Program Files\Microsoft SDKs\Kinect\v2.0_1409\Lib\x64'; 30 | 31 | cd Mex 32 | mex ('-compatibleArrayDims', '-v', 'Kin2_mex.cpp', 'Kin2_base.cpp', ... 33 | 'Kin2_mapping.cpp','Kin2_face.cpp','Kin2_fusion.cpp', ... 34 | ['-L' LibPath],'-lKinect20', '-lKinect20.Fusion', '-lKinect20.Face',['-I' IncludePath]); 35 | -------------------------------------------------------------------------------- /Kin2/faceDemo.m: -------------------------------------------------------------------------------- 1 | % FACEDEMO Illustrates how to use the Kin2 object to get and draw the 2 | % face data 3 | % 4 | % Note: You must add to the windows path the bin directory containing the 5 | % Kinect20.Face.dll. 6 | % For example: C:\Program Files\Microsoft SDKs\Kinect\v2.0_1409\bin 7 | % 8 | % Juan R. Terven, jrterven@hotmail.com 9 | % Diana M. Cordova, diana_mce@hotmail.com 10 | % 11 | % Citation: 12 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 13 | % Computer Programming. 14 | % https://github.com/jrterven/Kin2, 2016. 15 | 16 | addpath('Mex'); 17 | clear all 18 | close all 19 | 20 | % Create Kinect 2 object and initialize it 21 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 22 | % 'face' and 'HDface' 23 | k2 = Kin2('color','face'); 24 | 25 | % images sizes 26 | c_width = 1920; c_height = 1080; 27 | 28 | % Color image is to big, let's scale it down 29 | COL_SCALE = 1.0; 30 | 31 | % Create matrices for the images 32 | color = zeros(c_height*COL_SCALE,c_width*COL_SCALE,3,'uint8'); 33 | 34 | % color stream figure 35 | c.h = figure; 36 | c.ax = axes; 37 | c.im = imshow(color,[]); 38 | title('Color Source (press q to exit)'); 39 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 40 | 41 | % Loop until pressing 'q' on any figure 42 | k=[]; 43 | 44 | disp('Press q on any figure to exit') 45 | while true 46 | % Get frames from Kinect and save them on underlying buffer 47 | validData = k2.updateData; 48 | 49 | % Before processing the data, we need to make sure that a valid 50 | % frame was acquired. 51 | if validData 52 | % Get color frame 53 | color = k2.getColor; 54 | 55 | % Get the faces data 56 | % faces is a structure array with at most 6 faces. Each face has 57 | % the following fields: 58 | % - FaceBox: rectangle coordinates representing the face position in 59 | % color space. [left, top, right, bottom]. 60 | % - FacePoints: 2 x 5 matrix representing 5 face landmarks: 61 | % left eye, right eye, nose, right and left mouth corners. 62 | % - FaceRotation: 1 x 3 vector containing: pitch, yaw, roll angles 63 | % - FaceProperties: 1 x 8 vector containing the detection result of 64 | % each of the face properties. 65 | % The face properties are: 66 | % Happy, Engaged, WearingGlasses, LeftEyeClosed, RightEyeClosed, 67 | % MouthOpen, MouthMoved, LookingAway 68 | % The detection results are: 69 | % Unknown = 0, No = 1, Maybe = 2, Yes = 3; 70 | faces = k2.getFaces; 71 | 72 | % update color figure 73 | color = imresize(color,COL_SCALE); 74 | c.im = imshow(color, 'Parent', c.ax); 75 | 76 | % Display the faces data: 77 | % Parameters: 78 | % 1) image axes 79 | % 2) faces structure obtained with getFaces 80 | % 3) face landmarks size (radius) 81 | % 4) display text information? 82 | % 5) information font size in pixels 83 | k2.drawFaces(c.ax,faces,5,true,20); 84 | 85 | end 86 | 87 | % If user presses 'q', exit loop 88 | if ~isempty(k) 89 | if strcmp(k,'q'); break; end; 90 | end 91 | 92 | pause(0.02) 93 | end 94 | 95 | % Close kinect object 96 | k2.delete; 97 | 98 | close all; 99 | -------------------------------------------------------------------------------- /Kin2/faceHDDemo.m: -------------------------------------------------------------------------------- 1 | % FACEHDDEMO Illustrates how to use the Kin2 object to get and display the 2 | % HD face data 3 | % 4 | % Note: You must add to the windows path the bin directory containing the 5 | % Kinect20.Face.dll. 6 | % For example: C:\Program Files\Microsoft SDKs\Kinect\v2.0_1409\bin 7 | % 8 | % Juan R. Terven, jrterven@hotmail.com 9 | % Diana M. Cordova, diana_mce@hotmail.com 10 | % 11 | % Citation: 12 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 13 | % Computer Programming. 14 | % https://github.com/jrterven/Kin2, 2016. 15 | 16 | addpath('Mex'); 17 | clear all 18 | close all 19 | 20 | % Create Kinect 2 object and initialize it 21 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 22 | % 'face' and 'HDface' 23 | k2 = Kin2('color','HDface'); 24 | 25 | % images sizes 26 | c_width = 1920; c_height = 1080; 27 | 28 | % Color image is to big, let's scale it down 29 | COL_SCALE = 1.0; 30 | 31 | % Create matrices for the images 32 | color = zeros(c_height*COL_SCALE,c_width*COL_SCALE,3,'uint8'); 33 | 34 | % color stream figure 35 | c.h = figure; 36 | c.ax = axes; 37 | c.im = imshow(color,[]); 38 | title('Color Source (press q to exit)'); 39 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 40 | 41 | model = zeros(3,1347); 42 | figure, hmodel = plot3(model(1,:),model(2,:),model(3,:),'.'); 43 | %axis([-1 1 -1 1 -1 1]) 44 | title('HD Face Model (press q to exit)') 45 | xlabel('X'), ylabel('Y'), zlabel('Z'); 46 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 47 | 48 | % Loop until pressing 'q' on any figure 49 | k=[]; 50 | 51 | disp('Press q on any figure to exit') 52 | while true 53 | % Get frames from Kinect and save them on underlying buffer 54 | validData = k2.updateData; 55 | 56 | % Before processing the data, we need to make sure that a valid 57 | % frame was acquired. 58 | if validData 59 | % Get color frame 60 | color = k2.getColor; 61 | 62 | % update color figure 63 | color = imresize(color,COL_SCALE); 64 | c.im = imshow(color, 'Parent', c.ax); 65 | 66 | % Get the HDfaces data 67 | % the output faces is a structure array with at most 6 faces. Each face has 68 | % the following fields: 69 | % - FaceBox: rectangle coordinates representing the face position in 70 | % color space. [left, top, right, bottom]. 71 | % - FaceRotation: 1 x 3 vector containing: pitch, yaw, roll angles 72 | % - HeadPivot: 1 x 3 vector, computed center of the head, 73 | % which the face may be rotated around. 74 | % This point is defined in the Kinect body coordinate system. 75 | % - AnimationUnits: 17 animation units (AUs). Most of the AUs are 76 | % expressed as a numeric weight varying between 0 and 1. 77 | % For details see https://msdn.microsoft.com/en-us/library/microsoft.kinect.face.faceshapeanimations.aspx 78 | % - ShapeUnits: 94 hape units (SUs). Each SU is expressed as a 79 | % numeric weight that typically varies between -2 and +2. 80 | % For details see https://msdn.microsoft.com/en-us/library/microsoft.kinect.face.faceshapedeformations.aspx 81 | % - FaceModel: 3 x 1347 points of a 3D face model computed by face capture 82 | faces = k2.getHDFaces('WithVertices','true'); 83 | 84 | % Display the HD faces data and face model(1347 points): 85 | % Parameters: 86 | % 1) image axes 87 | % 2) faces structure obtained with getFaces 88 | % 3) display HD face model vertices(1347 points)? 89 | % 4) display text information (animation units)? 90 | % 5) text font size in pixels 91 | k2.drawHDFaces(c.ax,faces,true,true,20); 92 | 93 | % Plot face model points 94 | if size(faces,2) > 0 95 | model = faces(1).FaceModel; 96 | set(hmodel,'XData',model(1,:),'YData',model(2,:),'ZData',model(3,:)); 97 | view(0,90) 98 | end 99 | end 100 | 101 | % If user presses 'q', exit loop 102 | if ~isempty(k) 103 | if strcmp(k,'q'); break; end; 104 | end 105 | 106 | pause(0.02) 107 | end 108 | 109 | % Close kinect object 110 | k2.delete; 111 | 112 | %close all; 113 | -------------------------------------------------------------------------------- /Kin2/faceHDDemo2.m: -------------------------------------------------------------------------------- 1 | % FACEHDDEMO2 Illustrates how to build a personal face model and use it to 2 | % track a face. 3 | % Quoting the High definition face tracking documentation: 4 | % https://msdn.microsoft.com/en-us/library/dn785525.aspx 5 | % "The tracking quality increases if the face has been captured, 6 | % and the output of the capture used to initialize the face tracking. 7 | % This enables the face tracker to use the precise geometry of the face 8 | % instead of an average geometry, and results in more accurate 9 | % characterization of face motions." 10 | % 11 | % Note: You must add to the windows path the bin directory containing the 12 | % Kinect20.Face.dll. 13 | % For example: C:\Program Files\Microsoft SDKs\Kinect\v2.0_1409\bin 14 | % 15 | % Juan R. Terven, jrterven@hotmail.com 16 | % Diana M. Cordova, diana_mce@hotmail.com 17 | % 18 | % Citation: 19 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 20 | % Computer Programming. 21 | % https://github.com/jrterven/Kin2, 2016. 22 | 23 | addpath('Mex'); 24 | clear all 25 | close all 26 | 27 | % Create Kinect 2 object and initialize it 28 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 29 | % 'face' and 'HDface' 30 | k2 = Kin2('color','HDface'); 31 | 32 | % images sizes 33 | c_width = 1920; c_height = 1080; 34 | 35 | % Color image is to big, let's scale it down 36 | COL_SCALE = 1.0; 37 | 38 | % Create matrices for the images 39 | color = zeros(c_height*COL_SCALE,c_width*COL_SCALE,3,'uint8'); 40 | 41 | % color stream figure 42 | c.h = figure; 43 | c.ax = axes; 44 | c.im = imshow(color,[]); 45 | title('Color Source (press q to exit)'); 46 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 47 | 48 | % Loop until pressing 'q' on any figure 49 | k=[]; 50 | 51 | faceModelReady = false; 52 | 53 | disp('Press q on any figure to exit') 54 | while true 55 | % Get frames from Kinect and save them on underlying buffer 56 | validData = k2.updateData; 57 | 58 | % Before processing the data, we need to make sure that a valid 59 | % frame was acquired. 60 | if validData 61 | % Get color frame 62 | color = k2.getColor; 63 | 64 | % update color figure 65 | color = imresize(color,COL_SCALE); 66 | c.im = imshow(color, 'Parent', c.ax); 67 | 68 | % Build a face model 69 | % Follow the instructions displayed on Command Window. 70 | % The user should be in front of the Kinect V2 in an adecuate 71 | % distance (> 1m) in order to track the body and face. 72 | % Move the face to the sides and up and down slowly to capture the 73 | % whole face and produce the model. 74 | % Input parameters: CollectionStatus and CaptureStatus display 75 | % status information on the Command Window 76 | if ~faceModelReady 77 | faceModelReady = k2.buildHDFaceModels('CollectionStatus','true','CaptureStatus','true'); 78 | else 79 | % Once the model is built, track the face using the model. 80 | % The model is internally attached to the face. 81 | faces = k2.getHDFaces('WithVertices','true'); 82 | k2.drawHDFaces(c.ax,faces,true,true,20); 83 | end 84 | 85 | end 86 | 87 | % If user presses 'q', exit loop 88 | if ~isempty(k) 89 | if strcmp(k,'q'); break; end; 90 | end 91 | 92 | pause(0.02) 93 | end 94 | 95 | % Close kinect object 96 | k2.delete; 97 | 98 | %close all; 99 | -------------------------------------------------------------------------------- /Kin2/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jrterven/MultiKinCalib/6b797c9c5926ff4e45fde11d015bb7e95a6bf570/Kin2/image.png -------------------------------------------------------------------------------- /Kin2/kinectFusionDemo.m: -------------------------------------------------------------------------------- 1 | % KINECTFUSIONDEMO Illustrates how to use the Kin2 to perform 3D 2 | % reconstruction 3 | % 4 | % Note: You must add to the windows path the bin directory containing the 5 | % Kinect20.Fusion.dll 6 | % For example: C:\Program Files\Microsoft SDKs\Kinect\v2.0_1409\bin 7 | % 8 | % WARNING: KINECT FUSION FUNCTIONALITY IS STILL IN BETA 9 | % WE NEED TO FIX MEMORY LEAKAGE IN C++ CAUSING MATLAB TO CRASH AFTER 10 | % A SECOND RUN. 11 | % 12 | % Juan R. Terven, jrterven@hotmail.com 13 | % Diana M. Cordova, diana_mce@hotmail.com 14 | % 15 | % Citation: 16 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 17 | % Computer Programming. 18 | % https://github.com/jrterven/Kin2, 2016. 19 | 20 | addpath('../Mex'); 21 | clear all 22 | close all 23 | 24 | % Create Kinect 2 object and initialize it 25 | % Select sources as input parameters. 26 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 27 | % 'face' and 'HDface' 28 | k2 = Kin2('color','depth'); 29 | 30 | k2.KF_init; 31 | 32 | % images sizes 33 | depth_width = 512; depth_height = 424; outOfRange = 4000; 34 | color_width = 1920; color_height = 1080; 35 | 36 | % Color image is to big, let's scale it down 37 | colorScale = 0.4; 38 | 39 | % Create matrices for the images 40 | depth = zeros(depth_height,depth_width,'uint16'); 41 | volume = zeros(depth_height,depth_width,3,'uint8'); 42 | color = zeros(color_height*colorScale,color_width*colorScale,3,'uint8'); 43 | 44 | % depth stream figure 45 | figure, h1 = imshow(depth,[0 outOfRange]); 46 | title('Depth Source (press q to exit)') 47 | colormap('Jet') 48 | colorbar 49 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 50 | 51 | % color stream figure 52 | figure, h2 = imshow(color,[]); 53 | title('Color Source (press q to exit)'); 54 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 55 | 56 | % volume stream figure 57 | figure, h3 = imshow(volume,[]); 58 | title('Volume Source (press q to exit)'); 59 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 60 | 61 | % Loop until pressing 'q' on any figure 62 | k=[]; 63 | disp('Press q on any figure to exit') 64 | while true 65 | tic 66 | % Get frames from Kinect and save them on underlying buffer 67 | validData = k2.updateData; 68 | 69 | % Before processing the data, we need to make sure that a valid 70 | % frame was acquired. 71 | if validData 72 | % Copy data to Matlab matrices 73 | depth = k2.getDepth; 74 | color = k2.getColor; 75 | 76 | k2.KF_update; 77 | volume = k2.KF_getVolumeImage; 78 | 79 | % update depth figure 80 | depth(depth>outOfRange) = outOfRange; % truncate depht 81 | set(h1,'CData',depth); 82 | 83 | % update color figure 84 | color = imresize(color,colorScale); 85 | set(h2,'CData',color); 86 | 87 | % update infrared figure 88 | set(h3,'CData',volume); 89 | end 90 | 91 | % If user presses 'q', exit loop 92 | if ~isempty(k) 93 | if strcmp(k,'q'); break; end; 94 | if strcmp(k,'m'); 95 | mesh = k2.KF_getMesh; 96 | k=[]; 97 | end; 98 | end 99 | 100 | pause(0.02) 101 | end 102 | 103 | % Close kinect object 104 | k2.delete; 105 | 106 | close all; 107 | -------------------------------------------------------------------------------- /Kin2/mapping2CamDemo.m: -------------------------------------------------------------------------------- 1 | % MAPPINGTOCAMDEMO Illustrates how to map points between depth and color images 2 | % 3 | % Usage: 4 | % - Press 'd' to select a point on the depth image. The selected point 5 | % will be mapped from depth to camera and the resulting coordinates are 6 | % printed on command window. Then the camera coordinates are mapped 7 | % back to depth space and printed to command window. 8 | % - Press 'c' to select a point on the color image. The selected point 9 | % will be mapped from color to camera and the resulting coordinates are 10 | % printed on command window. Then the camera coordinates are mapped 11 | % back to color space and printed to command window. 12 | % - Press 'q' to exit. 13 | % 14 | % Juan R. Terven, jrterven@hotmail.com 15 | % Diana M. Cordova, diana_mce@hotmail.com 16 | % 17 | % Citation: 18 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 19 | % Computer Programming. 20 | % https://github.com/jrterven/Kin2, 2016. 21 | % 22 | addpath('Mex'); 23 | clear all 24 | close all 25 | 26 | % Create Kinect 2 object and initialize it 27 | % Select sources as input parameters. 28 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 29 | % 'face' and 'HDface' 30 | k2 = Kin2('color','depth'); 31 | 32 | % images sizes 33 | depth_width = 512; depth_height = 424; outOfRange = 4000; 34 | color_width = 1920; color_height = 1080; 35 | 36 | % Color image is to big, let's scale it down 37 | COL_SCALE = 0.5; 38 | 39 | % Create matrices for the images 40 | depth = zeros(depth_height,depth_width,'uint16'); 41 | color = zeros(color_height*COL_SCALE,color_width*COL_SCALE,3,'uint8'); 42 | 43 | % Images used to draw the markers 44 | depthAdditions = zeros(depth_height,depth_width,3,'uint8'); 45 | colorAdditions = zeros(color_height*COL_SCALE,color_width*COL_SCALE,3,'uint8'); 46 | 47 | % depth stream figure 48 | h1 = figure; 49 | hdepth = imshow(depth,[0 255]); 50 | title('Depth Source (press q to exit)') 51 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 52 | 53 | % color stream figure 54 | h2 = figure; 55 | hcolor = imshow(color,[]); 56 | title('Color Source (press q to exit)'); 57 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 58 | 59 | 60 | % Loop until pressing 'q' on any figure 61 | k=[]; 62 | 63 | disp('Instructions:') 64 | disp('Press d to select a point on the depth image') 65 | disp('Press c to select a point on the color image') 66 | disp('Press q on any figure to exit') 67 | 68 | while true 69 | % Get frames from Kinect and save them on underlying buffer 70 | validData = k2.updateData; 71 | 72 | % Before processing the data, we need to make sure that a valid 73 | % frame was acquired. 74 | if validData 75 | % Copy data to Matlab matrices 76 | depth = k2.getDepth; 77 | color = k2.getColor; 78 | 79 | % update depth figure 80 | depth8u = uint8(depth*(255/outOfRange)); 81 | depth8uc3 = repmat(depth8u,[1 1 3]); 82 | set(hdepth,'CData',depth8uc3 + depthAdditions); 83 | 84 | % update color figure 85 | color = imresize(color,COL_SCALE); 86 | set(hcolor,'CData',color + colorAdditions); 87 | end 88 | 89 | % If user presses 'd' enter to points selection mode on the depth image 90 | % If user presses 'c' enter to points selection mode on the color image 91 | % If user presses 'q', exit loop 92 | if ~isempty(k) 93 | if strcmp(k,'d') 94 | figure(h1); 95 | title('Clic the image to sample a point'); 96 | 97 | % Grab 1 points 98 | [x,y] = ginput(1); 99 | disp('Input depth coordinates'); 100 | disp([x y]) 101 | % Draw the selected points in the depth image 102 | depthAdditions = insertMarker(depthAdditions,[x y],'Color','red'); 103 | 104 | % Map the point from depth coordinates to camera coordinates 105 | % Input: 1 x 2 matrix (1 points, x,y) 106 | % Output: 1 x 3 matrix (1 point, x,y,z) 107 | camCoords = k2.mapDepthPoints2Camera([x y]); 108 | 109 | disp('Mapped camera coordinates'); 110 | disp(camCoords); 111 | 112 | % Map the resulting camera point back to depth space 113 | depthCoords = k2.mapCameraPoints2Depth(camCoords); 114 | disp('Mapped depth coordinates'); 115 | disp(depthCoords); 116 | 117 | k = []; 118 | elseif strcmp(k,'c') 119 | figure(h2); 120 | title('Clic the image to sample 5 points'); 121 | 122 | % Grab 1 point 123 | [x,y] = ginput(1); 124 | disp('Input color coordinates'); 125 | disp([x/COL_SCALE y/COL_SCALE]); 126 | 127 | % Draw the selected point in the color image 128 | colorAdditions = insertMarker(colorAdditions,[x y],'Color','green','Size',5); 129 | 130 | % Map the points from color coordinates to camera coordinates 131 | % Input: 1 x 2 matrix (1 points, x,y) 132 | % Output: 1 x 3 matrix (1 point, x,y,z) 133 | camCoords = k2.mapColorPoints2Camera([x/COL_SCALE y/COL_SCALE]); 134 | 135 | disp('Mapped camera coordinates') 136 | disp(camCoords); 137 | 138 | % Map the resulting camera point back to color space 139 | colorCoords = k2.mapCameraPoints2Color(camCoords); 140 | disp('Mapped color coordinates'); 141 | disp(colorCoords); 142 | 143 | k = []; 144 | end 145 | 146 | if strcmp(k,'q'); break; end; 147 | end 148 | 149 | pause(0.02) 150 | end 151 | 152 | % Close kinect object 153 | k2.delete; 154 | 155 | close all 156 | -------------------------------------------------------------------------------- /Kin2/mappingDemo.m: -------------------------------------------------------------------------------- 1 | % MAPPINGDEMO Illustrates how to map points between depth and color images 2 | % 3 | % Usage: 4 | % - Press 'd' to select 5 points on the depth image. The selected points 5 | % will be mapped from depth to color and will be displayed on both 6 | % images in red. 7 | % - Press 'c' to select 5 point on the color image. The selected points 8 | % will be mapped from color to depth and will be displayed on both 9 | % images in green. 10 | % - Press 'q' to exit. 11 | % 12 | % Juan R. Terven, jrterven@hotmail.com 13 | % Diana M. Cordova, diana_mce@hotmail.com 14 | % 15 | % Citation: 16 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 17 | % Computer Programming. 18 | % https://github.com/jrterven/Kin2, 2016. 19 | 20 | addpath('Mex'); 21 | clear all 22 | close all 23 | 24 | % Create a Kin2 object and initialize it 25 | % Select sources as input parameters. 26 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 27 | % 'face' and 'HDface' 28 | k2 = Kin2('color','depth'); 29 | 30 | % images sizes 31 | d_width = 512; d_height = 424; outOfRange = 4000; 32 | c_width = 1920; c_height = 1080; 33 | 34 | % Color image is to big, let's scale it down 35 | COL_SCALE = 0.5; 36 | 37 | % Create matrices for the images 38 | depth = zeros(d_height,d_width,'uint16'); 39 | color = zeros(c_height*COL_SCALE,c_width*COL_SCALE,3,'uint8'); 40 | 41 | % Images used to draw the markers 42 | depthAdditions = zeros(d_height,d_width,3,'uint8'); 43 | colorAdditions = zeros(c_height*COL_SCALE,c_width*COL_SCALE,3,'uint8'); 44 | 45 | % depth stream figure 46 | d.h = figure; 47 | d.ax = axes('units','pixels'); 48 | d.im = imshow(depth,[0 255]); 49 | title('Depth Source (press q to exit)') 50 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 51 | 52 | % color stream figure 53 | c.h = figure; 54 | c.im = imshow(color,[]); 55 | title('Color Source (press q to exit)'); 56 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 57 | 58 | 59 | % Loop until pressing 'q' on any figure 60 | k=[]; 61 | 62 | disp('Instructions:') 63 | disp('Press d to select a point on the depth image') 64 | disp('Press c to select a point on the color image') 65 | disp('Press q on any figure to exit') 66 | 67 | while true 68 | % Get frames from Kinect and save them on underlying buffer 69 | validData = k2.updateData; 70 | 71 | % Before processing the data, we need to make sure that a valid 72 | % frame was acquired. 73 | if validData 74 | % Copy data to Matlab matrices 75 | depth = k2.getDepth; 76 | color = k2.getColor; 77 | 78 | % update depth figure 79 | depth8u = uint8(depth*(255/outOfRange)); 80 | depth8uc3 = repmat(depth8u,[1 1 3]); 81 | set(d.im,'CData',depth8uc3 + depthAdditions); 82 | 83 | % update color figure 84 | color = imresize(color,COL_SCALE); 85 | set(c.im,'CData',color + colorAdditions); 86 | end 87 | 88 | % If user presses 'd' enter to points selection mode on the depth image 89 | % If user presses 'c' enter to points selection mode on the color image 90 | % If user presses 'q', exit loop 91 | if ~isempty(k) 92 | if strcmp(k,'d') 93 | figure(d.h); 94 | title('Clic the image to sample 5 points'); 95 | 96 | % Grab 5 points 97 | [x,y] = ginput(5); 98 | disp('Input depth coordinates'); 99 | disp([x y]) 100 | % Draw the selected points in the depth image 101 | depthAdditions = insertMarker(depthAdditions,[x y],'Color','red'); 102 | 103 | % Using the mapping, map the points from depth coordinates to color coordinates 104 | % Input and output: n x 2 matrix (n points) 105 | colorCoords = k2.mapDepthPoints2Color([x y]); 106 | colorCoords = colorCoords * COL_SCALE; % scale the color coordinates 107 | 108 | disp('Output color coordinates'); 109 | disp(colorCoords); 110 | 111 | % Draw the output coordinates on the color image 112 | colorAdditions = insertMarker(colorAdditions, colorCoords,'Color','red','Size',10); 113 | 114 | k = []; 115 | elseif strcmp(k,'c') 116 | figure(c.h); 117 | title('Clic the image to sample 5 points'); 118 | 119 | % Grab 5 points 120 | [x,y] = ginput(5); 121 | disp('Input color coordinates'); 122 | disp([x y]); 123 | 124 | % Draw the selected points in the color image 125 | colorAdditions = insertMarker(colorAdditions,[x y],'Color','green','Size',5); 126 | 127 | % Using the mapping, map the points from color coordinates to depth coordinates 128 | % Input and output: n x 2 matrix (n points) 129 | depthCoords = k2.mapColorPoints2Depth([x/COL_SCALE y/COL_SCALE]); 130 | 131 | disp('Output depth coordinates') 132 | disp(depthCoords); 133 | 134 | % Drae the output coordinates on the depth image 135 | depthAdditions = insertMarker(depthAdditions,depthCoords,'Color','green'); 136 | 137 | 138 | k = []; 139 | end 140 | 141 | if strcmp(k,'q'); break; end; 142 | end 143 | 144 | pause(0.02) 145 | end 146 | 147 | % Close kinect object 148 | k2.delete; 149 | 150 | close all 151 | -------------------------------------------------------------------------------- /Kin2/pointCloudDemo.m: -------------------------------------------------------------------------------- 1 | % POINTCLOUDDEMO Illustrates how to use the Kin2 class to get the 2 | % pointcloud in camera space 3 | % 4 | % Juan R. Terven, jrterven@hotmail.com 5 | % Diana M. Cordova, diana_mce@hotmail.com 6 | % 7 | % Citation: 8 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 9 | % Computer Programming. 10 | % https://github.com/jrterven/Kin2, 2016. 11 | 12 | addpath('Mex'); 13 | clear all 14 | close all 15 | 16 | % Create Kinect 2 object and initialize it 17 | % Select sources as input parameters. 18 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 19 | % 'face' and 'HDface' 20 | k2 = Kin2('color', 'depth'); 21 | 22 | % images sizes 23 | depth_width = 512; depth_height = 424; outOfRange = 4000; 24 | 25 | % Create matrices for the images 26 | depth = zeros(depth_height,depth_width,'uint16'); 27 | pc = zeros(depth_height*depth_width,3); 28 | 29 | % depth stream figure 30 | figure, h1 = imshow(depth,[0 outOfRange]); 31 | title('Depth Source (press q to exit)') 32 | colormap('Jet') 33 | colorbar 34 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 35 | 36 | % point cloud figure 37 | figure 38 | pcax = axes; 39 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 40 | 41 | % Loop until pressing 'q' on any figure 42 | k=[]; 43 | 44 | disp('Press q on any figure to exit') 45 | downsample = 2; % subsample pointcloud 46 | while true 47 | % Get frames from Kinect and save them on underlying buffer 48 | validData = k2.updateData; 49 | 50 | % Before processing the data, we need to make sure that a valid 51 | % frame was acquired. 52 | if validData 53 | % Copy data to Matlab matrices 54 | depth = k2.getDepth; 55 | 56 | % update depth figure 57 | depth(depth>outOfRange) = outOfRange; % truncate depht 58 | set(h1,'CData',depth); 59 | 60 | % Obtain the point cloud with color 61 | [pc, pcColors] = k2.getPointCloud('output','raw','color','true'); 62 | pcColors = double(pcColors)/255.0; 63 | scatter3(pcax,pc(:,1),pc(:,2),pc(:,3),6,pcColors,'Marker','.'); 64 | axis(pcax,[-3 3 -3 3 0 4]) 65 | xlabel(pcax,'X'), ylabel(pcax,'Y'), zlabel(pcax,'Z'); 66 | view(pcax,180,-90) 67 | 68 | end 69 | 70 | % If user presses 'q', exit loop 71 | if ~isempty(k) 72 | if strcmp(k,'q'); 73 | break; 74 | elseif strcmp(k,'p'); 75 | pause; 76 | end; 77 | end 78 | 79 | pause(0.02) 80 | end 81 | 82 | % Close kinect object 83 | k2.delete; 84 | 85 | %close all; 86 | -------------------------------------------------------------------------------- /Kin2/pointCloudDemo2.m: -------------------------------------------------------------------------------- 1 | % POINTCLOUDDEMO2 Illustrates how to use the Kin2 class to obtain and display the 2 | % pointcloud with color. 3 | % 4 | % Note: This demo uses the pointCloud object available introduced in MATLAB 2015b 5 | % Older versions of MATLAB will not recognize this object. 6 | % 7 | % Juan R. Terven, jrterven@hotmail.com 8 | % Diana M. Cordova, diana_mce@hotmail.com 9 | % 10 | % Citation: 11 | % Terven J. Cordova D.M., "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 12 | % Computer Programming. 13 | % https://github.com/jrterven/Kin2, 2016. 14 | 15 | addpath('Mex'); 16 | clear all 17 | close all 18 | 19 | % Create Kinect 2 object and initialize it 20 | % Select sources as input parameters. 21 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 22 | % 'face' and 'HDface' 23 | k2 = Kin2('color','depth'); 24 | 25 | % images sizes 26 | depth_width = 512; depth_height = 424; outOfRange = 4000; 27 | 28 | % Create matrices for the images 29 | depth = zeros(depth_height,depth_width,'uint16'); 30 | pc = pointCloud(zeros(depth_height*depth_width,3)); 31 | 32 | 33 | % depth stream figure 34 | figure, h1 = imshow(depth,[0 outOfRange]); 35 | title('Depth Source (close figure to exit)') 36 | colormap('Jet') 37 | colorbar 38 | 39 | % point cloud figure 40 | pcFig.h = figure; 41 | pcFig.ax = pcshow(pc); 42 | 43 | disp('Close any figure to exit') 44 | downsample = 2; % subsample pointcloud 45 | 46 | % Main Loop 47 | while true 48 | % Get frames from Kinect and save them on underlying buffer 49 | validData = k2.updateData; 50 | 51 | % Before processing the data, we need to make sure that a valid 52 | % frame was acquired. 53 | if validData 54 | % Copy data to Matlab matrices 55 | depth = k2.getDepth; 56 | 57 | % update depth figure 58 | depth(depth>outOfRange) = outOfRange; % truncate depht 59 | 60 | % Display the depth image, 61 | % if the user closes the window, the program ends 62 | try 63 | set(h1,'CData',depth); 64 | catch 65 | break; % break the main loop 66 | end 67 | 68 | % Get the pointcloud with color from the Kinect 69 | % Select the output 'pointCloud' to use the MATLAB built-in 70 | % pointCloud object. 71 | % For MATLAB versions older than 2015b, use 'output','raw' and use 72 | % scatter3 to plot the point cloud. See pointCloudDemo1.m 73 | pc = k2.getPointCloud('output','pointCloud','color','true'); 74 | 75 | % Display the point cloud, 76 | % if the user closes the window, the program ends 77 | try 78 | pcshow(pc,'Parent',pcFig.ax,'VerticalAxis','Y'); 79 | title(pcFig.ax,'Point Cloud'); 80 | xlabel(pcFig.ax,'X'); ylabel(pcFig.ax,'Y'); zlabel(pcFig.ax,'Z'); 81 | axis(pcFig.ax,[-4 4 -4 4 -4 4]); 82 | catch 83 | break; % break the main loop 84 | end 85 | end 86 | 87 | pause(0.02); 88 | end 89 | 90 | % Close kinect object 91 | k2.delete; 92 | -------------------------------------------------------------------------------- /Kin2/videoDemo.m: -------------------------------------------------------------------------------- 1 | % VIDEODEMO Illustrates how to use the Kin2 class which is an interface for 2 | % Kinect2 SDK functionality 3 | % 4 | % Juan R. Terven, jrterven@hotmail.com 5 | % Diana M. Cordova, diana_mce@hotmail.com 6 | % 7 | % Citation: 8 | % Terven Juan. Cordova-Esparza Diana, "Kin2. A Kinect 2 Toolbox for MATLAB", Science of 9 | % Computer Programming, 2016. DOI: http://dx.doi.org/10.1016/j.scico.2016.05.009 10 | % 11 | % https://github.com/jrterven/Kin2, 2016. 12 | % 13 | 14 | addpath('Mex'); 15 | clear all 16 | close all 17 | 18 | % Create Kinect 2 object and initialize it 19 | % Available sources: 'color', 'depth', 'infrared', 'body_index', 'body', 20 | % 'face' and 'HDface' 21 | k2 = Kin2('color','depth','infrared'); 22 | 23 | % images sizes 24 | depth_width = 512; depth_height = 424; outOfRange = 4000; 25 | color_width = 1920; color_height = 1080; 26 | 27 | % Color image is to big, let's scale it down 28 | colorScale = 1; 29 | 30 | % Create matrices for the images 31 | depth = zeros(depth_height,depth_width,'uint16'); 32 | infrared = zeros(depth_height,depth_width,'uint16'); 33 | color = zeros(color_height*colorScale,color_width*colorScale,3,'uint8'); 34 | depthColor = zeros(depth_height,depth_width,3,'uint8'); 35 | 36 | % depth stream figure 37 | figure, h1 = imshow(depth,[0 outOfRange]); 38 | title('Depth Source (press q to exit)') 39 | colormap('Jet') 40 | colorbar 41 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 42 | 43 | % color stream figure 44 | figure, h2 = imshow(color,[]); 45 | title('Color Source (press q to exit)'); 46 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 47 | 48 | % infrared stream figure 49 | figure, h3 = imshow(infrared); 50 | title('Infrared Source (press q to exit)'); 51 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 52 | 53 | % depthColor stream figure 54 | figure, h4 = imshow(depthColor,[]); 55 | title('Color2DephAligned (press q to exit)'); 56 | set(gcf,'keypress','k=get(gcf,''currentchar'');'); % listen keypress 57 | 58 | % Loop until pressing 'q' on any figure 59 | k=[]; 60 | 61 | disp('Press q on any figure to exit') 62 | while true 63 | % Get frames from Kinect and save them on underlying buffer 64 | validData = k2.updateData; 65 | 66 | % Before processing the data, we need to make sure that a valid 67 | % frame was acquired. 68 | if validData 69 | % Copy data to Matlab matrices 70 | depth = k2.getDepth; 71 | color = k2.getColor; 72 | infrared = k2.getInfrared; 73 | depthColor = k2.getAlignColor2Depth; 74 | 75 | % update depth figure 76 | depth(depth>outOfRange) = outOfRange; % truncate depht 77 | set(h1,'CData',depth); 78 | 79 | % update color figure 80 | color = imresize(color,colorScale); 81 | set(h2,'CData',color); 82 | 83 | % update infrared figure 84 | %infrared = imadjust(infrared,[0 0.2],[0.5 1]); 85 | infrared = imadjust(infrared,[],[],0.5); 86 | set(h3,'CData',infrared); 87 | 88 | set(h4,'CData',depthColor); 89 | end 90 | 91 | % If user presses 'q', exit loop 92 | if ~isempty(k) 93 | if strcmp(k,'q'); break; end; 94 | end 95 | 96 | pause(0.02) 97 | end 98 | 99 | % Close kinect object 100 | k2.delete; 101 | 102 | close all; 103 | -------------------------------------------------------------------------------- /PreCalib.m: -------------------------------------------------------------------------------- 1 | % function: 2 | % [Rs, ts, T] = PreCalib(camNum,dataAcqFile) 3 | % 4 | % Description: 5 | % Perform a pre-calibration of the extrinsic parameters between a pair of 6 | % Kinect cameras. 7 | % 8 | % Dependencies: 9 | % - function CostFunPreCalib: this function is the one we wish to 10 | % minimize. 11 | % - file 'calibParameters.mat' created with proj0_ServerMultiKinectCalib. 12 | % From this file we use the variables: 'dataDir', 'minFunType', 'pointsToConsider' 13 | % - file dataAcqFile created with proj01_ServerCapturePointsFromCalibObject.m 14 | % From this file we get the calibration camera points of the camera 15 | % we wish to calibrate i.e. cam2.camPoints, cam3.camPoints, etc. 16 | % 17 | % Inputs: 18 | % - camNum: Number of camera to calibrate wrt the reference camera 19 | % - file dataAcqFile 20 | % 21 | % Usage: 22 | % 23 | % Return: 24 | % - Rs, ts: Estimation of the extrinsic parameters of the camNum wrt the 25 | % reference camera. 26 | % - T: 4x4 transformation matrix composed by Rs and Tt 27 | % 28 | % Authors: 29 | % Diana M. Cordova 30 | % Juan R. Terven 31 | % Date: 16-Jan-2016 32 | 33 | function [Rs, ts, T] = PreCalib(camA,camB,dataAcqFile) 34 | 35 | % Load calibration parameters: 36 | %'dataDir', 'minFunType', 'pointsToConsider' 37 | load('calibParameters.mat'); 38 | load(dataAcqFile); 39 | 40 | %% Calibrate Depth Camera 41 | R = eye(3); 42 | t = [0,0,0]; 43 | 44 | % Extract the data from the structures located in dataAcqFile 45 | if camA == 1 46 | XwA = cam1.camPoints; 47 | elseif camA == 2 48 | XwA = cam2.camPoints; 49 | elseif camA == 3 50 | XwA = cam3.camPoints; 51 | elseif camA == 4 52 | XwA = cam4.camPoints; 53 | end 54 | 55 | if camB == 1 56 | XwB = cam1.camPoints; 57 | elseif camB == 2 58 | XwB = cam2.camPoints; 59 | elseif camB == 3 60 | XwB = cam3.camPoints; 61 | elseif camB == 4 62 | XwB = cam4.camPoints; 63 | end 64 | 65 | if strcmp(minFunType,'pointReg') 66 | 67 | 68 | if pointsToConsider ~= -1 69 | XwA = XwA(1:pointsToConsider,:); 70 | XwB = XwB(1:pointsToConsider,:); 71 | end 72 | 73 | [Rs,ts] = rigid_transform_3D(XwA, XwB); 74 | rmse = calculateRegistrationError(XwA',XwB',Rs,ts); 75 | disp(['RMSE of Precalibration:' num2str(rmse)]); 76 | 77 | elseif strcmp(minFunType,'fsolve') 78 | 79 | % Generates a file with variables for the cost function 80 | save([dataDir '/variablesForCostFunPreCalib.mat'],'camNum','Xw1','Xw2'); 81 | 82 | x0 = [0, 0, 0, t(1), t(2), t(3)]; 83 | 84 | options = optimset('MaxFunEvals',100000,'TolFun',1e-100,'TolX',1e-100, 'MaxIter', 10000); 85 | x = fsolve('proj02_CostFunPreCalib',x0,options); 86 | 87 | Reul = [x(1) x(2) x(3)]; 88 | 89 | Rx=[1 0 0;0 cos(Reul(1)) sin(Reul(1));0 -sin(Reul(1)) cos(Reul(1))]; 90 | Ry=[cos(Reul(2)) 0 -sin(Reul(2));0 1 0;sin(Reul(2)) 0 cos(Reul(2))]; 91 | Rz=[cos(Reul(3)) sin(Reul(3)) 0;-sin(Reul(3)) cos(Reul(3)) 0;0 0 1]; 92 | 93 | Rs = Rx*Ry*Rz; 94 | ts = [x(4); x(5); x(6)]; 95 | else 96 | disp('Invalid minimization function: only supports pointReg or fsolve'); 97 | end 98 | 99 | % Test if it is a valid rotation matrix 100 | disp('R:'), disp(Rs) 101 | disp('Rotation determinant:') 102 | det(Rs) 103 | 104 | disp('t='), disp(ts) 105 | 106 | T = [Rs ts; 0 0 0 1]; 107 | 108 | 109 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MultiKinCalib 2 | Multiple Kinect V2 Calibration 3 | 4 | Run main script to display the main GUI. 5 | Connect a Kinect V2 to the local computer and one or more Kinect V2 to remote computers on the same LAN. 6 | Run each step of the main GUI. 7 | 8 | ## Setup 9 | Setup calibration parameters such as: 10 | Computer role: Server(local machine) or client. 11 | Cameras: Number of cameras. 12 | Images to save: Number of acquisition images (minumum 2). 13 | Output directory: Where to save the data acquisition and the intermediate results, 14 | Marching distance: Point cloud matching distance in millimeters. 15 | Skew: Calibrate with skew parameter (yes or no). 16 | Radial dist Coeff: Number of radial distortion parameters (2 or 3). 17 | Tangential dist coeff: Calibrate with tangential distortion (yes or no). 18 | Points on calib object: Number of collinear points on the calibration object. 19 | Calib oject length: Length of calib object. 20 | 21 | ## Data Acquisition 22 | In this step, the server communicates with the remote clients to begin syncrhonized data acquisition. 23 | 24 | ## Precalibration 25 | With the data acquired in the previous step, the system estimates the pose of each camera. 26 | 27 | ## Matching 28 | Using the estimates of the pose of each camera, the system gathers matching points between adjacent views. 29 | 30 | ## Intrinsic Initialization 31 | With the multiple matching points from the previous step, the system estimates an initialization of the intrinsic parameters. 32 | 33 | ## Non-linear optimization 34 | Perform a non-linear optimization of the intrinsic and extrinsic parameters for each camera. 35 | 36 | ## Point Cloud 37 | Merge point cloud data using all the cameras. 38 | -------------------------------------------------------------------------------- /S05_calcReprojectionError.m: -------------------------------------------------------------------------------- 1 | function repError = proj05_calcReprojectionError(result) 2 | 3 | load('calibParameters.mat'); 4 | 5 | X3d = []; 6 | x2d = []; 7 | height = 0; 8 | width = 0; 9 | 10 | camNum = result.CamNum; 11 | camType = result.CamType; 12 | 13 | if camNum == 1 14 | load(dataAcqFile) 15 | else 16 | % Load the point clouds: cam2_1Matches, cam3_1Matches, etc 17 | load(matchingResultsFile); 18 | end 19 | 20 | if strcmp(camType, 'depth') 21 | height = 424; 22 | width = 512; 23 | elseif strcmp(camType, 'color') 24 | height = 1080; 25 | width = 1920; 26 | end 27 | 28 | % Get the 3D points from the matching results 29 | % 3D points as a 3 x n matrix 30 | if camNum == 1 31 | X3d = (cam1.pointcloud(1:100:end,:))'; 32 | 33 | % 2D points as a 2 x n matrix 34 | if strcmp(camType, 'depth') 35 | x2d = double(cam1.depthProj(1:100:end,:))'; 36 | elseif strcmp(camType, 'color') 37 | x2d = double(cam1.colorProj(1:100:end,:))'; 38 | end 39 | elseif camNum == 2 40 | X3d = cam2_1Matches'; 41 | 42 | % 2D points as a 2 x n matrix 43 | if strcmp(camType, 'depth') 44 | x2d = double(cam2_1depthProj)'; 45 | elseif strcmp(camType, 'color') 46 | x2d = double(cam2_1colorProj)'; 47 | end 48 | elseif camNum == 3 49 | X3d = cam3_1Matches'; 50 | 51 | if strcmp(camType, 'depth') 52 | x2d = double(cam3_1depthProj)'; 53 | elseif strcmp(camType, 'color') 54 | x2d = double(cam3_1colorProj)'; 55 | end 56 | elseif camNum == 4 57 | X3d = cam4_1Matches'; 58 | 59 | if strcmp(camType, 'depth') 60 | x2d = double(cam4_1depthProj)'; 61 | elseif strcmp(camType, 'color') 62 | x2d = double(cam4_1colorProj)'; 63 | end 64 | end 65 | 66 | % Remove outliers from X3d in both matrices 67 | % Find columns with invalid values 68 | x2dValidCols = ~any( isnan( x2d ) | isinf( x2d ) | x2d > width | x2d < 0, 1 ); 69 | x3dValidCols = ~any( isnan( X3d ) | isinf( X3d ) | X3d > 8, 1 ); 70 | validCols = x2dValidCols & x3dValidCols; 71 | 72 | x2d = x2d(:,validCols); 73 | X3d = X3d(:,validCols); 74 | 75 | intrinsic = result.Intrinsics; 76 | f = intrinsic(1,1); 77 | cx = intrinsic(1,3); 78 | cy = intrinsic(2,3); 79 | 80 | % Rotation 81 | R = result.Rot; 82 | 83 | % Translation 84 | t = result.t; 85 | 86 | radDistNumCoeff = size(result.RadDist,2); 87 | if isempty(result.TanDist) 88 | distTan = false; 89 | else 90 | distTan = true; 91 | end 92 | 93 | if radDistNumCoeff == 2 94 | k1 = result.RadDist(1); 95 | k2 = result.RadDist(2); 96 | elseif radDistNumCoeff == 3 97 | k1 = result.RadDist(1); 98 | k2 = result.RadDist(2); 99 | k3 = result.RadDist(3); 100 | end 101 | 102 | if distTan 103 | p1 = result.TanDist(1); 104 | p2 = result.TanDist(2); 105 | end 106 | 107 | N = size(X3d,2); 108 | 109 | Xw = X3d; 110 | xc = x2d; 111 | xc(2,:) = height - xc(2,:); 112 | 113 | % Apply extrinsic parameters 114 | proj = R * Xw + repmat(t,1,size(Xw,2)); 115 | 116 | % Apply Intrinsic parameters to get the projection 117 | proj = intrinsic * proj; 118 | proj = proj ./ repmat(proj(3,:),3,1); 119 | 120 | % Distortion correction 121 | if radDistNumCoeff > 0 122 | u = proj(1,:); 123 | v = proj(2,:); 124 | ud=xc(1,:); 125 | vd=xc(2,:); 126 | 127 | r = sqrt((u-cx).^2 + (v-cy).^2); 128 | 129 | if radDistNumCoeff == 2 130 | comp(1,:) = (1 + k1*r.^2 + k2*r.^4); 131 | comp(2,:) = (1 + k1*r.^2 + k2*r.^4); 132 | elseif radDistNumCoeff == 3 133 | comp(1,:) = (1 + k1*r.^2 + k2*r.^4 + k3*r.^6); 134 | comp(2,:) = (1 + k1*r.^2 + k2*r.^4 + k3*r.^6); 135 | end 136 | 137 | if distTan 138 | comp(1,:) = comp(1,:) + 2*p1*(u-cx).*(v-cy) + p2*(r.^2+2*(u-cx).^2); 139 | comp(2,:) = comp(2,:) + p1*(r.^2+2*(v-cy).^2) + 2*p2*(u-cx).*(v-cy); 140 | end 141 | 142 | % Reprojection error with distortion 143 | errors(1,:)= (u-cx).*comp(1,:) - (ud-cx); 144 | errors(2,:)= (v-cy).*comp(2,:) -(vd-cy); 145 | else 146 | % Reprojection error without distortion 147 | errors = proj(1:2,:) - xc; 148 | end 149 | 150 | % Display the reproyection error 151 | err = errors .* errors; 152 | err = sum(err(:)); 153 | repError = sqrt(err/N); 154 | 155 | end 156 | -------------------------------------------------------------------------------- /S05_costFunVec.m: -------------------------------------------------------------------------------- 1 | % Function: 2 | % proj05_costFunVec 3 | % 4 | % Description: 5 | % Function that we wish to minimize. 6 | % 7 | % Dependencies: 8 | % - calibParameters.mat: file with variables defined in proj0_Multi_Kinect_Calibration.m 9 | % such as: dataDir, distortRad, distortTan, withSkew 10 | % - matchingResults.mat: file containing the 3D matching points 11 | % (cam2_1Matches, cam3_1Matches, etc)and 2D projections 12 | % (cam2_1depthProj, cam2_1colorProj, etc) 13 | % - variablesForCostFun.mat: file with variables 'camNum','camType' 14 | % created in proj05_FinalCalibration. 15 | % 16 | % Inputs: 17 | % x0: parameters that we wish to find by minimizing the function f 18 | % 19 | % Usage: 20 | % This function is called by an optimization function such as fsolve, 21 | % lsqnonlin, fmincon 22 | % 23 | % Results: 24 | % find the values of x0 that minimize f 25 | % 26 | % Authors: 27 | % Diana M. Cordova 28 | % Juan R. Terven 29 | % Date: 16-Jan-2016 30 | function fun = proj05_costFunVec(x0) 31 | 32 | persistent X3d x2d distRad distTan height width with_skew; 33 | 34 | % The first iteration loads the data 35 | if isempty(X3d) 36 | X3d = []; 37 | x2d = []; 38 | 39 | % Load dataDir, distortRad, distortTan, withSkew 40 | load('calibParameters.mat'); 41 | distRad = distortRad; 42 | distTan = distortTan; 43 | with_skew = withSkew; 44 | 45 | % Load variables camNum and camType 46 | load([dataDir '/variablesForCostFun.mat']); 47 | 48 | if camNum == 1 49 | load(dataAcqFile) 50 | else 51 | % Load the point clouds: cam2_1Matches, cam3_1Matches, etc 52 | load(matchingResultsFile); 53 | end 54 | 55 | if strcmp(camType, 'depth') 56 | height = 424; 57 | width = 512; 58 | elseif strcmp(camType, 'color') 59 | height = 1080; 60 | width = 1920; 61 | end 62 | 63 | 64 | % Get the 3D points from the matching results 65 | % 3D points as a 3 x n matrix 66 | if camNum == 1 67 | X3d = (cam1.pointcloud(1:100:end,:))'; 68 | 69 | % 2D points as a 2 x n matrix 70 | if strcmp(camType, 'depth') 71 | x2d = double(cam1.depthProj(1:100:end,:))'; 72 | elseif strcmp(camType, 'color') 73 | x2d = double(cam1.colorProj(1:100:end,:))'; 74 | end 75 | elseif camNum == 2 76 | X3d = cam2_1Matches'; 77 | 78 | % 2D points as a 2 x n matrix 79 | if strcmp(camType, 'depth') 80 | x2d = double(cam2_1depthProj)'; 81 | elseif strcmp(camType, 'color') 82 | x2d = double(cam2_1colorProj)'; 83 | end 84 | elseif camNum == 3 85 | X3d = cam3_1Matches'; 86 | 87 | if strcmp(camType, 'depth') 88 | x2d = double(cam3_1depthProj)'; 89 | elseif strcmp(camType, 'color') 90 | x2d = double(cam3_1colorProj)'; 91 | end 92 | elseif camNum == 4 93 | X3d = cam4_1Matches'; 94 | 95 | if strcmp(camType, 'depth') 96 | x2d = double(cam4_1depthProj)'; 97 | elseif strcmp(camType, 'color') 98 | x2d = double(cam4_1colorProj)'; 99 | end 100 | end 101 | 102 | % Remove outliers from X3d in both matrices 103 | % Find columns with invalid values 104 | x2dValidCols = ~any( isnan( x2d ) | isinf( x2d ) | x2d > width | x2d < 0, 1 ); 105 | x3dValidCols = ~any( isnan( X3d ) | isinf( X3d ) | X3d > 8, 1 ); 106 | validCols = x2dValidCols & x3dValidCols; 107 | 108 | x2d = x2d(:,validCols); 109 | X3d = X3d(:,validCols); 110 | 111 | % x2d(:,any(X3d > 8)) = []; % remove columns with values greater than 8 meters 112 | % x2d(:,any(X3d == inf)) = []; 113 | % x2d(:,any(X3d == -inf)) = []; 114 | % 115 | % X3d(:,any(X3d > 8)) = []; % remove columns with values greater than 8 meters 116 | % X3d(:,any(X3d == inf)) = []; 117 | % X3d(:,any(X3d == -inf)) = []; 118 | % 119 | % % Now remove outliers from x2d in both matrices 120 | % X3d(:,any(x2d > width)) = []; 121 | % X3d(:,any(x2d < 0)) = []; % remove columns with negative values 122 | % X3d(:,any(x2d == inf)) = []; 123 | % X3d(:,any(x2d == -inf)) = []; 124 | % 125 | % x2d(:,any(x2d > width)) = []; 126 | % x2d(:,any(x2d < 0)) = []; % remove columns with negative values 127 | % x2d(:,any(x2d == inf)) = []; 128 | % x2d(:,any(x2d == -inf)) = []; 129 | 130 | end 131 | 132 | f = x0(1); % focal length 133 | cx = x0(2); % principal point 134 | cy = x0(3); 135 | 136 | % Rotation 137 | R = eul2r(x0(4),x0(5),x0(6)); 138 | % Rx=[1 0 0;0 cos(x0(4)) sin(x0(4));0 -sin(x0(4)) cos(x0(4))]; 139 | % Ry=[cos(x0(5)) 0 -sin(x0(5));0 1 0;sin(x0(5)) 0 cos(x0(5))]; 140 | % Rz=[cos(x0(6)) sin(x0(6)) 0;-sin(x0(6)) cos(x0(6)) 0;0 0 1]; 141 | % R = Rx*Ry*Rz; 142 | 143 | % Translation 144 | t = [x0(7);x0(8);x0(9)]; 145 | 146 | 147 | if distRad == 2 148 | k1 = x0(10); 149 | k2 = x0(11); 150 | if distTan 151 | p1 = x0(12); 152 | p2 = x0(13); 153 | end 154 | elseif distRad == 3 155 | k1 = x0(10); 156 | k2 = x0(11); 157 | k3 = x0(12); 158 | if distTan 159 | p1 = x0(13); 160 | p2 = x0(14); 161 | end 162 | end 163 | 164 | 165 | if with_skew 166 | s = x0(end); 167 | else 168 | s = 0; 169 | end 170 | 171 | 172 | intrinsic = [f s cx; 173 | 0 f cy; 174 | 0 0 1]; 175 | 176 | 177 | N = size(X3d,2); 178 | 179 | Xw = X3d; 180 | xc = x2d; 181 | xc(2,:) = height - xc(2,:); 182 | 183 | % Apply extrinsic parameters 184 | proj = R * Xw + repmat(t,1,size(Xw,2)); 185 | 186 | % Apply Intrinsic parameters to get the projection 187 | proj = intrinsic * proj; 188 | proj = proj ./ repmat(proj(3,:),3,1); 189 | 190 | % Distortion correction 191 | if distRad > 0 192 | u = proj(1,:); 193 | v = proj(2,:); 194 | ud=xc(1,:); 195 | vd=xc(2,:); 196 | 197 | r = sqrt((u-cx).^2 + (v-cy).^2); 198 | 199 | if distRad == 2 200 | compRad(1,:) = 1 + k1*r.^2 + k2*r.^4; 201 | compRad(2,:) = 1 + k1*r.^2 + k2*r.^4; 202 | elseif distRad == 3 203 | compRad(1,:) = 1 + k1*r.^2 + k2*r.^4 + k3*r.^6; 204 | compRad(2,:) = 1 + k1*r.^2 + k2*r.^4 + k3*r.^6; 205 | end 206 | 207 | compTan = zeros(2,size(u,2)); 208 | if distTan 209 | compTan(1,:) = 2*p1*(u-cx).*(v-cy) + p2*(r.^2+2*(u-cx).^2); 210 | compTan(2,:) = p1*(r.^2+2*(v-cy).^2) + 2*p2*(u-cx).*(v-cy); 211 | end 212 | 213 | % Reprojection error with distortion 214 | fun(1,:)= ((u-cx).*compRad(1,:) + compTan(1,:)) - (ud-cx); 215 | fun(2,:)= ((v-cy).*compRad(2,:) + compTan(2,:)) -(vd-cy); 216 | else 217 | % Reprojection error without distortion 218 | fun = proj(1:2,:) - xc; 219 | end 220 | 221 | % Display the reproyection error 222 | err = fun .* fun; 223 | err = sum(err(:)); 224 | disp(sqrt(err/N)); 225 | 226 | -------------------------------------------------------------------------------- /Step02_PreCalibration.m: -------------------------------------------------------------------------------- 1 | % function: 2 | % Step02_PreCalibration(camCount,dataAcqFile,preCalibResultsFile) 3 | % 4 | % Description: 5 | % Perform a pre-calibration of the extrinsic parameters of all the 6 | % Kinect cameras. 7 | % 8 | % Dependencies: 9 | % - function proj02_CostFunPreCalib: this function is the one we wish to 10 | % minimize. 11 | % - file 'calibParameters.mat' created with proj0_ServerMultiKinectCalib. 12 | % From this file we use the variables: 'dataDir', 'minFunType', 'pointsToConsider' 13 | % - file dataAcqFile created with proj01_ServerCapturePointsFromCalibObject.m 14 | % From this file we get the calibration camera points of the camera 15 | % we wish to calibrate i.e. cam2.camPoints, cam3.camPoints, etc. 16 | % 17 | % Inputs: 18 | % - camCount: Number of cameras to calibrate 19 | % - dataAcqFile: file containing the data from the acquisition step. 20 | % - preCalibResultsFile: name of the output file containing the results 21 | % of the pre-calibration 22 | % 23 | % Return: 24 | % Save the results on the preCalibResultsFile specified in the arguments 25 | % 26 | % Authors: 27 | % Diana M. Cordova 28 | % Juan R. Terven 29 | % Date: 16-Jan-2016 30 | function Step02_PreCalibration(camCount,dataAcqFile,preCalibResultsFile) 31 | 32 | disp('Step 2: Pre-calibration'); 33 | 34 | % Estimate extrinsic parameters between camera 1 and camera 2 35 | disp('Estimate extrinsic parameters between camera 1 and camera 2'); 36 | [R1_2, t1_2, T1_2] = PreCalib(1,2,dataAcqFile); 37 | save(preCalibResultsFile,'R1_2','t1_2'); 38 | 39 | if camCount > 2 40 | % Estimate extrinsic parameters between camera 3 and camera 1 41 | disp('Estimate extrinsic parameters between camera 1 and camera 3'); 42 | [R1_3, t1_3, T1_3] = PreCalib(1,3,dataAcqFile); 43 | save(preCalibResultsFile,'R1_3','t1_3', '-append'); 44 | 45 | disp('Estimate extrinsic parameters between camera 2 and 3'); 46 | [R2_3, t2_3, T2_3] = PreCalib(2,3,dataAcqFile); 47 | save(preCalibResultsFile,'R2_3','t2_3', '-append'); 48 | end 49 | 50 | if camCount > 3 51 | % Estimate extrinsic parameters between camera 4 and camera 1 52 | disp('Estimate extrinsic parameters between camera 1 and camera 4'); 53 | [R1_4, t1_4, T1_4] = PreCalib(1,4,dataAcqFile); 54 | save(preCalibResultsFile,'R1_4','t1_4', '-append'); 55 | end -------------------------------------------------------------------------------- /Step03_Find3DMatches.m: -------------------------------------------------------------------------------- 1 | % function: 2 | % Find3DMatches 3 | % 4 | % Description: 5 | % Find 3D matches 6 | % 7 | % Dependencies: 8 | % 9 | % Inputs: 10 | % 11 | % Usage: 12 | % 13 | % Return: 14 | % 15 | % Authors: 16 | % Diana M. Cordova 17 | % Juan R. Terven 18 | % Date: 16-Jan-2016 19 | % 20 | function [cam2_1Matches,cam2_1depthProj,cam2_1colorProj] = Find3DMatches(pc1, pc2, T2_1, ... 21 | cam2DepthProj, cam2ColorProj, minDist3D) 22 | 23 | % k2 = Kin2('color','depth'); 24 | % 25 | % while true 26 | % validData = k2.updateKin2; 27 | % 28 | % if validData 29 | % break; 30 | % end 31 | % pause(0.03); 32 | % end 33 | 34 | 35 | % Transform cam2 points to cam1 in order to find matches by distance 36 | pc2 = pc2'; 37 | pc2h = [pc2; ones(1,size(pc2,2))]; 38 | pc2_1 = T2_1 \ pc2h; 39 | pc2_1 = pc2_1(1:end-1,1:end); 40 | pc2_1 = pc2_1'; 41 | 42 | % Find matching points between camera 1 and 2 43 | %disp('Searching for Matching Points between cameras'); 44 | [cam2_1Matches,cam2_1depthProj,cam2_1colorProj] = matching3DNN(pc1, pc2_1, cam2DepthProj, cam2ColorProj, minDist3D/1000); 45 | disp('Finish finding matching points'); 46 | 47 | % Transform the matches back to camera 2 and get its 2D projections 48 | % on the depth camera 49 | % cam2PCh = [cam2_1Matches'; ones(1,size(cam2_1Matches,1))]; 50 | % cam2PCh = T2_1 * cam2PCh; 51 | % cam2PCh = cam2PCh(1:end-1,1:end)'; 52 | % cam2_1depthProj = k2.mapCameraPoints2Depth(cam2PCh); 53 | 54 | % and the 2D projections on the color camera 55 | % cam2_1colorProj = k2.mapCameraPoints2Color(cam2PCh); 56 | 57 | % k2.delete; 58 | end -------------------------------------------------------------------------------- /Step03_Matching.m: -------------------------------------------------------------------------------- 1 | % function: 2 | % Step03_Matching(camCount,dataAcqFile,preCalibResultsFile,minDist3D,matchingResultsFile) 3 | % 4 | % Description: 5 | % Perform the point cloud matching step between all pairs of cameras. 6 | % 7 | % Dependencies: 8 | % - function Find3DMatches: peform the actual matching between a pair of 9 | % pointclouds. 10 | % 11 | % Inputs: 12 | % - camCount: Number of cameras to calibrate 13 | % - dataAcqFile: file containing the data from the acquisition step. 14 | % - preCalibResultsFile: name of the output file containing the results 15 | % of the pre-calibration 16 | % - minDist3D: matching distance 17 | % - matchingResultsFile: out file containing the matching results. 18 | % 19 | % Return: 20 | % Save the results on the matchingResultsFile specified in the arguments 21 | % 22 | % Authors: 23 | % Diana M. Cordova 24 | % Juan R. Terven 25 | % Date: 16-Jan-2016 26 | function Step03_Matching(camCount,dataAcqFile,minDist3D,matchingResultsFile) 27 | load(dataAcqFile); 28 | 29 | disp('Step 3: 3D Pointcloud Matching'); 30 | 31 | % Finds the 3D matches between pointclouds 32 | % Generates a file called matchingResultsFile 33 | disp('Searching for matchings between Cam1 and Cam2'); 34 | [cam2_1Matches,cam2_1depthProj,cam2_1colorProj] = Find3DMatches( ... 35 | pc1, pc2, T1_2, cam2.depthProj, cam2.colorProj, minDist3D); 36 | 37 | save(matchingResultsFile,'cam2_1Matches','cam2_1depthProj','cam2_1colorProj'); 38 | 39 | if camCount > 2 40 | disp('Searching for matchings between Cam1 and Cam3'); 41 | [cam3_1Matches,cam3_1depthProj,cam3_1colorProj] = Find3DMatches( ... 42 | pc1, pc3, T1_3, cam3.depthProj, cam3.colorProj, minDist3D); 43 | save(matchingResultsFile,'cam3_1Matches', ... 44 | 'cam3_1depthProj','cam3_1colorProj','-append'); 45 | 46 | disp('Searching for matchings between Cam2 and Cam3'); 47 | [cam3_2Matches,cam3_2depthProj,cam3_2colorProj] = Find3DMatches( ... 48 | pc2, pc3, T2_3, cam3.depthProj, cam3.colorProj, minDist3D); 49 | save(matchingResultsFile,'cam3_2Matches', ... 50 | 'cam3_2depthProj','cam3_2colorProj','-append'); 51 | end 52 | 53 | if camCount > 3 54 | disp('Searching for matchings between Cam1 and Cam4'); 55 | [cam4_1Matches,cam4_1depthProj,cam4_1colorProj] = Find3DMatches( ... 56 | pc1, pc4, T1_4, cam4.depthProj, cam4.colorProj, minDist3D); 57 | save(matchingResultsFile,'cam4_1Matches', ... 58 | 'cam4_1depthProj','cam4_1colorProj','-append'); 59 | end -------------------------------------------------------------------------------- /Step04_IntrinsicParametersEstimation.m: -------------------------------------------------------------------------------- 1 | % function: 2 | % Step04_IntrinsicParametersEstimation(camCount,dataAcqFile,preCalibResultsFile,matchingResultsFile,initIntrinsicsFile) 3 | % 4 | % Description: 5 | % Estimates intrinsics parameters for all the cameras (depth and color 6 | % for each Kinect). 7 | % 8 | % Dependencies: 9 | % - function EstimateIntrins: estimates the camera intrinsics using the method from 10 | % Prince, Simon JD. Computer vision: models, learning, and inference. 11 | % Cambridge University Press, 2012. 12 | % 13 | % Inputs: 14 | % - camCount: Number of cameras to calibrate 15 | % - dataAcqFile: file containing the data from the acquisition step. 16 | % - preCalibResultsFile: name of the output file containing the results 17 | % of the pre-calibration 18 | % - matchingResultsFile: out file containing the matching results. 19 | % - initIntrinsicsFile: out file with the estimated intrinsic parameters. 20 | % 21 | % Return: 22 | % Save the results on the initIntrinsicsFile specified in the arguments. 23 | % 24 | % Authors: 25 | % Diana M. Cordova 26 | % Juan R. Terven 27 | % Date: Feb-2016 28 | function Step04_IntrinsicParametersEstimation(dataAcqFile,matchingResultsFile,preCalibResultsFile,initIntrinsicsFile) 29 | % Initialize Intrinsics of depth cam1 using its pointcloud 30 | preIntrinsicsD1 = EstimateIntrins(dataAcqFile,matchingResultsFile,preCalibResultsFile,1, 'depth'); 31 | 32 | % Initialize Intrinsics of color cam1 using its pointcloud 33 | preIntrinsicsC1 = EstimateIntrins(dataAcqFile,matchingResultsFile,preCalibResultsFile,1, 'color'); 34 | 35 | save(initIntrinsicsFile,'preIntrinsicsD1','preIntrinsicsC1'); 36 | 37 | if camCount > 1 38 | % Initialize Intrinsics of depth cam2 using the pointclouds matches with cam1 39 | preIntrinsicsD2 = EstimateIntrins(dataAcqFile,matchingResultsFile,preCalibResultsFile,2, 'depth'); 40 | 41 | % Initialize Intrinsics of color cam2 using the pointclouds matches with cam1 42 | preIntrinsicsC2 = EstimateIntrins(dataAcqFile,matchingResultsFile,preCalibResultsFile,2, 'color'); 43 | 44 | save(initIntrinsicsFile,'preIntrinsicsD2','preIntrinsicsC2','-append'); 45 | end 46 | 47 | if camCount > 2 48 | % Initialize Intrinsics of depth cam2 using the pointclouds matches with cam1 49 | preIntrinsicsD3 = EstimateIntrins(dataAcqFile,matchingResultsFile,preCalibResultsFile,3, 'depth'); 50 | 51 | % Initialize Intrinsics of color cam2 using the pointclouds matches with cam1 52 | preIntrinsicsC3 = EstimateIntrins(dataAcqFile,matchingResultsFile,preCalibResultsFile,3, 'color'); 53 | 54 | save(initIntrinsicsFile,'preIntrinsicsD3','preIntrinsicsC3','-append'); 55 | end 56 | 57 | if camCount > 3 58 | % Initialize Intrinsics of depth cam2 using the pointclouds matches with cam1 59 | preIntrinsicsD4 = EstimateIntrins(dataAcqFile,matchingResultsFile,preCalibResultsFile,4, 'depth'); 60 | 61 | % Initialize Intrinsics of color cam2 using the pointclouds matches with cam1 62 | preIntrinsicsC4 = EstimateIntrins(dataAcqFile,matchingResultsFile,preCalibResultsFile,4, 'color'); 63 | 64 | save(initIntrinsicsFile,'preIntrinsicsD4','preIntrinsicsC4','-append'); 65 | end -------------------------------------------------------------------------------- /Step05_FinalCalibration.m: -------------------------------------------------------------------------------- 1 | % function: 2 | % Step05_FinalCalibration(camCount,preCalibResultsFile,initIntrinsicsFile,finalCalibResults) 3 | % 4 | % Description: 5 | % Perform the final calibration of all the cameras using a non-linear 6 | % optimization. 7 | % 8 | % Dependencies: 9 | % - function FinalCalibration: performs the calibration of a single 10 | % camera. 11 | % - file 'calibParameters.mat' created with proj0_ServerMultiKinectCalib. 12 | % From this file we use the variables: 'dataDir', 'minFunType', 'pointsToConsider' 13 | % 14 | % Inputs: 15 | % - camCount: Number of cameras to calibrate 16 | % - preCalibResultsFile: name of the output file containing the results 17 | % of the pre-calibration 18 | % - initIntrinsicsFile: out file with the estimated intrinsic parameters. 19 | % 20 | % Return: 21 | % Save the results on the finalCalibResults specified in the arguments 22 | % 23 | % Authors: 24 | % Diana M. Cordova 25 | % Juan R. Terven 26 | % Date: Feb-2016 27 | function Step05_FinalCalibration(camCount,preCalibResultsFile,initIntrinsicsFile,finalCalibResults) 28 | disp('Step 4: Final Joint Calibration'); 29 | 30 | % Calibrate camera 1 31 | disp('Calibrating Depth camera 1...') 32 | [result1, result2] = FinalCalibration(preCalibResultsFile, ... 33 | initIntrinsicsFile, 1, 'depth'); 34 | save(finalCalibResults,'-struct','result1'); % Save results on .mat 35 | 36 | disp('Calibrating Color camera 1...') 37 | [result1, result2] = FinalCalibration(preCalibResultsFile, ... 38 | initIntrinsicsFile, 1, 'color'); 39 | save(finalCalibResults,'-struct','result1','-append'); 40 | 41 | if camCount > 1 42 | % Calibrate camera 2 43 | disp('Calibrating Depth camera 2...') 44 | [result1, result2] = FinalCalibration(preCalibResultsFile, ... 45 | initIntrinsicsFile, 2, 'depth'); 46 | save(finalCalibResults,'-struct','result1','-append'); 47 | 48 | disp('Calibrating Color camera 2...') 49 | [result1, result2] = FinalCalibration(preCalibResultsFile, ... 50 | initIntrinsicsFile, 2, 'color'); 51 | save(finalCalibResults,'-struct','result1','-append'); 52 | end 53 | 54 | if camCount > 2 55 | % Calibrate camera 3 56 | disp('Calibrating Depth camera 3...') 57 | [result1, result2] = FinalCalibration(preCalibResultsFile, ... 58 | initIntrinsicsFile, 3, 'depth'); 59 | save(finalCalibResults,'-struct','result1','-append') 60 | 61 | disp('Calibrating Color camera 3...') 62 | [result1, result2] = FinalCalibration(preCalibResultsFile, ... 63 | initIntrinsicsFile, 3, 'color'); 64 | save(finalCalibResults,'-struct','result1','-append') 65 | end 66 | 67 | if camCount > 3 68 | % Calibrate camera 4 69 | disp('Calibrating Depth camera 4...') 70 | [result1, result2] = FinalCalibration(preCalibResultsFile, ... 71 | initIntrinsicsFile, 4, 'depth'); 72 | save(finalCalibResults,'-struct','result1','-append'); 73 | 74 | disp('Calibrating Color camera 4...') 75 | [result1, result2] = FinalCalibration(preCalibResultsFile, ... 76 | initIntrinsicsFile, 4, 'color'); 77 | save(finalCalibResults,'-struct','result1','-append') 78 | end -------------------------------------------------------------------------------- /TCPIPCommands.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jrterven/MultiKinCalib/6b797c9c5926ff4e45fde11d015bb7e95a6bf570/TCPIPCommands.mat -------------------------------------------------------------------------------- /TCPIPbroadcastCommand.m: -------------------------------------------------------------------------------- 1 | function ack = TCPIPbroadcastCommand(tcpCons, numCon, command, message) 2 | % If any of the commands fail, ack will be false 3 | ack = true; 4 | 5 | % Network Client 1: 6 | % Send command 7 | if numCon > 0 8 | if TCPIPsendCommand(tcpCons{1},command, ['client 1: ' message]) == false 9 | ack = false; 10 | return; 11 | end 12 | end 13 | 14 | % Network Client 2: 15 | % Send command 16 | if numCon > 1 17 | if TCPIPsendCommand(tcpCons{2},command, ['client 2: ' message]) == false 18 | ack = false; 19 | return; 20 | end 21 | end 22 | 23 | % Network Client 3: 24 | % Send command 25 | if numCon > 2 26 | if TCPIPsendCommand(tcpCons{3},command, ['client 3: ' message]) == false 27 | ack = false; 28 | return; 29 | end 30 | end 31 | end -------------------------------------------------------------------------------- /TCPIPgetResponses.m: -------------------------------------------------------------------------------- 1 | function resp = TCPIPgetResponses(tcpCons, numCon, errorMsg) 2 | % Camera Network Commands 3 | load('TCPIPCommands.mat'); 4 | 5 | resp = VALID_FRAME; 6 | 7 | if numCon > 0 8 | r1 = fread(tcpCons{1},1,'double'); 9 | if r1 == ERROR 10 | disp(['client 1:' errorMsg]) 11 | resp = ERROR; 12 | end 13 | end 14 | 15 | if numCon > 1 16 | r2 = fread(tcpCons{2},1,'double'); 17 | if r2 == ERROR 18 | disp(['client 2:' errorMsg]) 19 | resp = ERROR; 20 | end 21 | end 22 | 23 | if numCon > 2 24 | r3 = fread(tcpCons{3},1,'double'); 25 | if r3 == ERROR 26 | disp(['client 3:' errorMsg]) 27 | resp = ERROR; 28 | end 29 | end 30 | end -------------------------------------------------------------------------------- /TCPIPsendCommand.m: -------------------------------------------------------------------------------- 1 | function ack = TCPIPsendCommand(connection,command, message) 2 | % Camera Network Commands 3 | load('TCPIPCommands.mat'); 4 | 5 | fwrite(connection,command,'double'); 6 | response = fread(connection,1,'double'); 7 | 8 | if response == ACK 9 | disp(message); 10 | ack = true; 11 | else 12 | disp('Communication error!') 13 | ack = false; 14 | end 15 | end -------------------------------------------------------------------------------- /dataAcq.fig: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jrterven/MultiKinCalib/6b797c9c5926ff4e45fde11d015bb7e95a6bf570/dataAcq.fig -------------------------------------------------------------------------------- /findPointAfromInfrared.m: -------------------------------------------------------------------------------- 1 | % Function: 2 | % findPointAfromInfrared 3 | % 4 | % Description: 5 | % Finds the nearest 3D world point to the marker. 6 | % The point A is the fixed point, we use a reflective tape on the 7 | % ground near this point. So when detecting the red points, the nearest 8 | % point to this (refAw) will be point A. 9 | % 10 | % Dependencies: 11 | % Kin2 class 12 | % 13 | % 14 | % Inputs: 15 | % kinect: Kin2 object 16 | % 17 | % Usage: 18 | % This function is called at the beggining of the images acquisition for 19 | % calibration. This function returns the location of the infrared marker 20 | % (reflective tape) such that this point be used for detecting the 21 | % reference point in the calibration object. 22 | % 23 | % Returns: 24 | % refAw: position of the infrared marker in camera space (X,Y,Z). 25 | % refAc: poistion of the infrared marker in color space (x,y) 26 | % 27 | % Authors: 28 | % Diana M. Cordova 29 | % Juan R. Terven 30 | % 31 | % Date: 05-June-2016 32 | 33 | function [refAw, refAc] = findPointAfromInfrared(kinect, infrared) 34 | imgBW = im2bw(infrared); 35 | % Remove all those pixels less than 300px 36 | imgBW = bwareaopen(imgBW,5); 37 | 38 | % calculate the centroid 39 | stat = regionprops(imgBW,'centroid'); 40 | 41 | refAw = zeros(1,3); 42 | refAc = zeros(1,2); 43 | 44 | if length(stat) >= 1 45 | refA = stat(1).Centroid; 46 | 47 | % Search for the nearest neighbor that can be mapped to 48 | % camera space 49 | movement = 1; % move 1 pixel 50 | n1 = refA; n2 = refA; n3 = refA; n4 = refA; 51 | for i=1:100 52 | % Move refA on the 4 neighbors 53 | n1(1) = n1(1) + movement; % move in positive x direction 54 | n2(1) = n2(1) - movement; % move in negative x direction 55 | n3(2) = n3(2) + movement; 56 | n4(2) = n4(2) - movement; 57 | 58 | ns = [n1;n2;n3;n4]; 59 | refAws = kinect.mapDepthPoints2Camera(ns); 60 | if ~isinf(refAws(1,1)) 61 | refAw = refAws(1,:); 62 | break; 63 | elseif ~isinf(refAws(2,1)) 64 | refAw = refAws(2,:); 65 | break; 66 | elseif ~isinf(refAws(3,1)) 67 | refAw = refAws(3,:); 68 | break; 69 | elseif ~isinf(refAws(4,1)) 70 | refAw = refAws(4,:); 71 | break; 72 | end 73 | 74 | movement = movement + 1; 75 | end 76 | 77 | refAc = kinect.mapCameraPoints2Color(refAw); 78 | %plot(refAc(1),refAc(2),'y*') 79 | 80 | end 81 | end % findPointAfromInfrared function -------------------------------------------------------------------------------- /getCalibDataFromClient.m: -------------------------------------------------------------------------------- 1 | function [camData, pc, depthPr, colorPr] = getCalibDataFromClient(tcpCons, camCount, ... 2 | camDataSize, pointcloudSize, depthProjSize, colorProjSize) 3 | % Load Camera Network Commands and port numbers 4 | load('TCPIPCommands.mat'); 5 | 6 | connections = camCount; 7 | 8 | camData = cell(camCount,1); 9 | pc = cell(camCount,1); 10 | 11 | if connections > 0 12 | fwrite(tcpCons{1},REQUEST,'double'); 13 | response = fread(tcpCons{1},1,'double'); % read ack 14 | 15 | disp('Reading cam data from Client1'); 16 | camraw = fread(tcpCons{1},prod(camDataSize),'double'); 17 | fwrite(tcpCons{1},ACK,'double'); 18 | camData{1} = reshape(camraw,camDataSize); 19 | 20 | disp('Reading pointcloud from Client1'); 21 | pcraw = fread(tcpCons{1},prod(pointcloudSize),'double'); 22 | fwrite(tcpCons{1},ACK,'double'); 23 | pc{1} = reshape(pcraw,pointcloudSize); 24 | 25 | disp('Reading depthProj from Client1'); 26 | depthraw = fread(tcpCons{1},prod(depthProjSize),'double'); 27 | fwrite(tcpCons{1},ACK,'double'); 28 | depthPr{1} = reshape(depthraw,depthProjSize); 29 | 30 | disp('Reading colorProj from Client1'); 31 | colorraw = fread(tcpCons{1},prod(colorProjSize),'double'); 32 | fwrite(tcpCons{1},ACK,'double'); 33 | colorPr{1} = reshape(colorraw,colorProjSize); 34 | end 35 | 36 | if connections > 1 37 | fwrite(tcpCons{2},REQUEST,'double'); 38 | response = fread(tcpCons{2},1,'double'); % read ack 39 | 40 | disp('Reading cam data from Client2'); 41 | camraw = fread(tcpCons{2},prod(camDataSize),'double'); 42 | fwrite(tcpCons{2},ACK,'double'); 43 | camData{2} = reshape(camraw,camDataSize); 44 | 45 | disp('Reading pointcloud from Client2'); 46 | pcraw = fread(tcpCons{2},prod(pointcloudSize),'double'); 47 | fwrite(tcpCons{2},ACK,'double'); 48 | pc{2} = reshape(pcraw,pointcloudSize); 49 | 50 | disp('Reading depthProj from Client2'); 51 | depthraw = fread(tcpCons{2},prod(depthProjSize),'double'); 52 | fwrite(tcpCons{2},ACK,'double'); 53 | depthPr{2} = reshape(depthraw,depthProjSize); 54 | 55 | disp('Reading colorProj from Client2'); 56 | colorraw = fread(tcpCons{2},prod(colorProjSize),'double'); 57 | fwrite(tcpCons{2},ACK,'double'); 58 | colorPr{2} = reshape(colorraw,colorProjSize); 59 | end 60 | 61 | if connections > 2 62 | fwrite(tcpCons{3},REQUEST,'double'); 63 | response = fread(tcpCons{3},1,'double'); % read ack 64 | 65 | disp('Reading cam data from Client3'); 66 | camraw = fread(tcpCons{3},prod(camDataSize),'double'); 67 | fwrite(tcpCons{3},ACK,'double'); 68 | camData{3} = reshape(camraw,camDataSize); 69 | 70 | disp('Reading pointcloud from Client3'); 71 | pcraw = fread(tcpCons{3},prod(pointcloudSize),'double'); 72 | fwrite(tcpCons{3},ACK,'double'); 73 | pc{3} = reshape(pcraw,pointcloudSize); 74 | 75 | disp('Reading depthProj from Client3'); 76 | depthraw = fread(tcpCons{3},prod(depthProjSize),'double'); 77 | fwrite(tcpCons{3},ACK,'double'); 78 | depthPr{3} = reshape(depthraw,depthProjSize); 79 | 80 | disp('Reading colorProj from Client3'); 81 | colorraw = fread(tcpCons{3},prod(colorProjSize),'double'); 82 | fwrite(tcpCons{3},ACK,'double'); 83 | colorPr{3} = reshape(colorraw,colorProjSize); 84 | end 85 | end -------------------------------------------------------------------------------- /initialization.fig: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jrterven/MultiKinCalib/6b797c9c5926ff4e45fde11d015bb7e95a6bf570/initialization.fig -------------------------------------------------------------------------------- /knnsearch.m: -------------------------------------------------------------------------------- 1 | function [idx,D]=knnsearch(varargin) 2 | % KNNSEARCH Linear k-nearest neighbor (KNN) search 3 | % IDX = knnsearch(Q,R,K) searches the reference data set R (n x d array 4 | % representing n points in a d-dimensional space) to find the k-nearest 5 | % neighbors of each query point represented by eahc row of Q (m x d array). 6 | % The results are stored in the (m x K) index array, IDX. 7 | % 8 | % IDX = knnsearch(Q,R) takes the default value K=1. 9 | % 10 | % IDX = knnsearch(Q) or IDX = knnsearch(Q,[],K) does the search for R = Q. 11 | % 12 | % Rationality 13 | % Linear KNN search is the simplest appraoch of KNN. The search is based on 14 | % calculation of all distances. Therefore, it is normally believed only 15 | % suitable for small data sets. However, other advanced approaches, such as 16 | % kd-tree and delaunary become inefficient when d is large comparing to the 17 | % number of data points. On the other hand, the linear search in MATLAB is 18 | % relatively insensitive to d due to the vectorization. In this code, the 19 | % efficiency of linear search is further improved by using the JIT 20 | % aceeleration of MATLAB. Numerical example shows that its performance is 21 | % comparable with kd-tree algorithm in mex. 22 | % 23 | % See also, kdtree, nnsearch, delaunary, dsearch 24 | 25 | % By Yi Cao at Cranfield University on 25 March 2008 26 | 27 | % Example 1: small data sets 28 | %{ 29 | R=randn(100,2); 30 | Q=randn(3,2); 31 | idx=knnsearch(Q,R); 32 | plot(R(:,1),R(:,2),'b.',Q(:,1),Q(:,2),'ro',R(idx,1),R(idx,2),'gx'); 33 | %} 34 | 35 | % Example 2: ten nearest points to [0 0] 36 | %{ 37 | R=rand(100,2); 38 | Q=[0 0]; 39 | K=10; 40 | idx=knnsearch(Q,R,10); 41 | r=max(sqrt(sum(R(idx,:).^2,2))); 42 | theta=0:0.01:pi/2; 43 | x=r*cos(theta); 44 | y=r*sin(theta); 45 | plot(R(:,1),R(:,2),'b.',Q(:,1),Q(:,2),'co',R(idx,1),R(idx,2),'gx',x,y,'r-','linewidth',2); 46 | %} 47 | 48 | % Example 3: cputime comparion with delaunay+dsearch I, a few to look up 49 | %{ 50 | R=randn(10000,4); 51 | Q=randn(500,4); 52 | t0=cputime; 53 | idx=knnsearch(Q,R); 54 | t1=cputime; 55 | T=delaunayn(R); 56 | idx1=dsearchn(R,T,Q); 57 | t2=cputime; 58 | fprintf('Are both indices the same? %d\n',isequal(idx,idx1)); 59 | fprintf('CPU time for knnsearch = %g\n',t1-t0); 60 | fprintf('CPU time for delaunay = %g\n',t2-t1); 61 | %} 62 | % Example 4: cputime comparion with delaunay+dsearch II, lots to look up 63 | %{ 64 | Q=randn(10000,4); 65 | R=randn(500,4); 66 | t0=cputime; 67 | idx=knnsearch(Q,R); 68 | t1=cputime; 69 | T=delaunayn(R); 70 | idx1=dsearchn(R,T,Q); 71 | t2=cputime; 72 | fprintf('Are both indices the same? %d\n',isequal(idx,idx1)); 73 | fprintf('CPU time for knnsearch = %g\n',t1-t0); 74 | fprintf('CPU time for delaunay = %g\n',t2-t1); 75 | %} 76 | % Example 5: cputime comparion with kd-tree by Steven Michael (mex file) 77 | % kd-tree by Steven Michael 78 | %{ 79 | Q=randn(10000,10); 80 | R=randn(500,10); 81 | t0=cputime; 82 | idx=knnsearch(Q,R); 83 | t1=cputime; 84 | tree=kdtree(R); 85 | idx1=kdtree_closestpoint(tree,Q); 86 | t2=cputime; 87 | fprintf('Are both indices the same? %d\n',isequal(idx,idx1)); 88 | fprintf('CPU time for knnsearch = %g\n',t1-t0); 89 | fprintf('CPU time for delaunay = %g\n',t2-t1); 90 | %} 91 | 92 | % Check inputs 93 | [Q,R,K,fident] = parseinputs(varargin{:}); 94 | 95 | % Check outputs 96 | error(nargoutchk(0,2,nargout)); 97 | 98 | % C2 = sum(C.*C,2)'; 99 | [N,M] = size(Q); 100 | L=size(R,1); 101 | idx = zeros(N,K); 102 | D = idx; 103 | 104 | if K==1 105 | % Loop for each query point 106 | for k=1:N 107 | d=zeros(L,1); 108 | for t=1:M 109 | d=d+(R(:,t)-Q(k,t)).^2; 110 | end 111 | if fident 112 | d(k)=inf; 113 | end 114 | [D(k),idx(k)]=min(d); 115 | end 116 | else 117 | for k=1:N 118 | d=zeros(L,1); 119 | for t=1:M 120 | d=d+(R(:,t)-Q(k,t)).^2; 121 | end 122 | if fident 123 | d(k)=inf; 124 | end 125 | [s,t]=sort(d); 126 | idx(k,:)=t(1:K); 127 | D(k,:)=s(1:K); 128 | end 129 | end 130 | if nargout>1 131 | D=sqrt(D); 132 | end 133 | 134 | function [Q,R,K,fident] = parseinputs(varargin) 135 | % Check input and output 136 | error(nargchk(1,3,nargin)); 137 | 138 | Q=varargin{1}; 139 | 140 | if nargin<2 141 | R=Q; 142 | fident = true; 143 | else 144 | fident = false; 145 | R=varargin{2}; 146 | end 147 | 148 | if isempty(R) 149 | fident = true; 150 | R=Q; 151 | end 152 | 153 | if ~fident 154 | fident = isequal(Q,R); 155 | end 156 | 157 | if nargin<3 158 | K=1; 159 | else 160 | K=varargin{3}; 161 | end 162 | -------------------------------------------------------------------------------- /main.fig: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jrterven/MultiKinCalib/6b797c9c5926ff4e45fde11d015bb7e95a6bf570/main.fig -------------------------------------------------------------------------------- /main.m: -------------------------------------------------------------------------------- 1 | function varargout = main(varargin) 2 | % MAIN MATLAB code for main.fig 3 | % MAIN, by itself, creates a new MAIN or raises the existing 4 | % singleton*. 5 | % 6 | % H = MAIN returns the handle to a new MAIN or the handle to 7 | % the existing singleton*. 8 | % 9 | % MAIN('CALLBACK',hObject,eventData,handles,...) calls the local 10 | % function named CALLBACK in MAIN.M with the given input arguments. 11 | % 12 | % MAIN('Property','Value',...) creates a new MAIN or raises the 13 | % existing singleton*. Starting from the left, property value pairs are 14 | % applied to the GUI before main_OpeningFcn gets called. An 15 | % unrecognized property name or invalid value makes property application 16 | % stop. All inputs are passed to main_OpeningFcn via varargin. 17 | % 18 | % *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one 19 | % instance to run (singleton)". 20 | % 21 | % See also: GUIDE, GUIDATA, GUIHANDLES 22 | 23 | % Edit the above text to modify the response to help main 24 | 25 | % Last Modified by GUIDE v2.5 18-Aug-2016 11:21:17 26 | 27 | % Begin initialization code - DO NOT EDIT 28 | gui_Singleton = 1; 29 | gui_State = struct('gui_Name', mfilename, ... 30 | 'gui_Singleton', gui_Singleton, ... 31 | 'gui_OpeningFcn', @main_OpeningFcn, ... 32 | 'gui_OutputFcn', @main_OutputFcn, ... 33 | 'gui_LayoutFcn', [] , ... 34 | 'gui_Callback', []); 35 | if nargin && ischar(varargin{1}) 36 | gui_State.gui_Callback = str2func(varargin{1}); 37 | end 38 | 39 | if nargout 40 | [varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:}); 41 | else 42 | gui_mainfcn(gui_State, varargin{:}); 43 | end 44 | % End initialization code - DO NOT EDIT 45 | 46 | % --- Executes just before main is made visible. 47 | function main_OpeningFcn(hObject, eventdata, handles, varargin) 48 | % This function has no output args, see OutputFcn. 49 | % hObject handle to figure 50 | % eventdata reserved - to be defined in a future version of MATLAB 51 | % handles structure with handles and user data (see GUIDATA) 52 | % varargin command line arguments to main (see VARARGIN) 53 | 54 | % Choose default command line output for main 55 | handles.output = hObject; 56 | 57 | % Update handles structure 58 | guidata(hObject, handles); 59 | 60 | % UIWAIT makes main wait for user response (see UIRESUME) 61 | % uiwait(handles.figure1); 62 | 63 | displaySetupParams(handles); 64 | 65 | % --- Outputs from this function are returned to the command line. 66 | function varargout = main_OutputFcn(hObject, eventdata, handles) 67 | % varargout cell array for returning output args (see VARARGOUT); 68 | % hObject handle to figure 69 | % eventdata reserved - to be defined in a future version of MATLAB 70 | % handles structure with handles and user data (see GUIDATA) 71 | 72 | % Get default command line output from handles structure 73 | varargout{1} = handles.output; 74 | 75 | 76 | % --- Executes on button press in buttonSetup. 77 | function buttonSetup_Callback(hObject, eventdata, handles) 78 | % hObject handle to buttonSetup (see GCBO) 79 | % eventdata reserved - to be defined in a future version of MATLAB 80 | % handles structure with handles and user data (see GUIDATA) 81 | initialization 82 | 83 | % --- Executes on button press in buttonDataAcquisition. 84 | function buttonDataAcquisition_Callback(hObject, eventdata, handles) 85 | % hObject handle to buttonDataAcquisition (see GCBO) 86 | % eventdata reserved - to be defined in a future version of MATLAB 87 | % handles structure with handles and user data (see GUIDATA) 88 | 89 | dataAcq 90 | 91 | % --- Executes on button press in buttonPreCalibration. 92 | function buttonPreCalibration_Callback(hObject, eventdata, handles) 93 | % hObject handle to buttonPreCalibration (see GCBO) 94 | % eventdata reserved - to be defined in a future version of MATLAB 95 | % handles structure with handles and user data (see GUIDATA) 96 | Step02_PreCalibration(camCount,dataAcqFile,preCalibResultsFile 97 | 98 | % --- Executes on button press in buttonMatching. 99 | function buttonMatching_Callback(hObject, eventdata, handles) 100 | % hObject handle to buttonMatching (see GCBO) 101 | % eventdata reserved - to be defined in a future version of MATLAB 102 | % handles structure with handles and user data (see GUIDATA) 103 | Step03_Matching(camCount,dataAcqFile,minDist3D,matchingResultsFile) 104 | 105 | % --- Executes on button press in buttonIntrinsicInit. 106 | function buttonIntrinsicInit_Callback(hObject, eventdata, handles) 107 | % hObject handle to buttonIntrinsicInit (see GCBO) 108 | % eventdata reserved - to be defined in a future version of MATLAB 109 | % handles structure with handles and user data (see GUIDATA) 110 | Step04_IntrinsicParametersEstimation(camCount,dataAcqFile, ... 111 | preCalibResultsFile,matchingResultsFile,initIntrinsicsFile) 112 | 113 | % --- Executes on button press in buttonNonLinearOptim. 114 | function buttonNonLinearOptim_Callback(hObject, eventdata, handles) 115 | % hObject handle to buttonNonLinearOptim (see GCBO) 116 | % eventdata reserved - to be defined in a future version of MATLAB 117 | % handles structure with handles and user data (see GUIDATA) 118 | Step05_FinalCalibration(camCount,preCalibResultsFile,initIntrinsicsFile,finalCalibResults) 119 | 120 | % --- Executes on button press in buttonExit. 121 | function buttonExit_Callback(hObject, eventdata, handles) 122 | % hObject handle to buttonExit (see GCBO) 123 | % eventdata reserved - to be defined in a future version of MATLAB 124 | % handles structure with handles and user data (see GUIDATA) 125 | close all 126 | 127 | function setupData = getSetupData() 128 | role = getappdata(0,'role'); 129 | 130 | % If server data 131 | if strcmp(role,'server') 132 | camCount = getappdata(0,'clientsCount') + 1; 133 | dataDir = getappdata(0,'dataDir'); 134 | countImagesToSave = getappdata(0,'countImagesToSave'); 135 | minDist3D = getappdata(0,'minDist3D'); 136 | withSkew = getappdata(0,'withSkew'); 137 | distortRad = getappdata(0,'distortRad'); 138 | distortTan = getappdata(0,'distortTan'); 139 | pointsOnStick = getappdata(0,'pointsOnStick'); 140 | sizeStick = getappdata(0,'sizeStick'); 141 | 142 | setupData = struct('role',role,'camCount',camCount, ... 143 | 'dataDir',dataDir,'countImagesToSave',countImagesToSave, ... 144 | 'minDist3D',minDist3D,'withSkew',withSkew, ... 145 | 'distortRad',distortRad,'distortTan',distortTan, ... 146 | 'pointsOnStick',pointsOnStick,'sizeStick',sizeStick); 147 | % If client data 148 | else 149 | clientId = getappdata(0,'clientId'); 150 | serverIP = getappdata(0,'serverIP'); 151 | 152 | setupData = struct('role',role,'clientId',clientId,'serverIP',serverIP); 153 | end 154 | 155 | function displaySetupParams(handles) 156 | 157 | setupDataAvail = getappdata(0,'setupDataAvail'); 158 | 159 | % If there is not setup data available 160 | if ~setupDataAvail 161 | role = 'server'; 162 | camCount = 2; 163 | dataDir = strrep(pwd,'\','/'); 164 | countImagesToSave = 10; 165 | minDist3D = 2; 166 | withSkew = logical(1); 167 | distortRad = 0; 168 | distortTan = logical(0); 169 | pointsOnStick = 3; 170 | sizeStick = 30; 171 | 172 | % Save data to root directory 173 | setappdata(0,'role',role); 174 | setappdata(0,'camCount',camCount); 175 | setappdata(0,'dataDir',dataDir); 176 | setappdata(0,'countImagesToSave',countImagesToSave); 177 | setappdata(0,'minDist3D',minDist3D); 178 | setappdata(0,'withSkew',withSkew); 179 | setappdata(0,'distortRad',distortRad); 180 | setappdata(0,'distortTan',distortTan); 181 | setappdata(0,'pointsOnStick',pointsOnStick); 182 | setappdata(0,'sizeStick',sizeStick); 183 | 184 | [patstr, name, ext] = fileparts(dataDir); 185 | dataDirToShow = name; 186 | 187 | sd = struct('role',role,'camCount',camCount, ... 188 | 'dataDir',dataDirToShow,'countImagesToSave',countImagesToSave, ... 189 | 'minDist3D',minDist3D,'withSkew',withSkew, ... 190 | 'distortRad',distortRad,'distortTan',distortTan, ... 191 | 'pointsOnStick',pointsOnStick,'sizeStick',sizeStick); 192 | else 193 | sd = getSetupData; 194 | end 195 | 196 | % If server 197 | msg = ''; 198 | if strcmp(sd.role,'server') 199 | [patstr, name, ext] = fileparts(sd.dataDir); 200 | dataDirToShow = name; 201 | msg = sprintf(['Computer Role: ' sd.role '\n' ... 202 | 'Cameras: ' num2str(sd.camCount) '\n' ... 203 | 'Images to save: ' num2str(sd.countImagesToSave) '\n' ... 204 | 'Output dir: ' dataDirToShow '\n' ... 205 | 'Matching distance: ' num2str(sd.minDist3D) '\n' ... 206 | 'Skew: ' logical2strYN(sd.withSkew) '\n' ... 207 | 'Radial dist coeff: ' num2str(sd.distortRad) '\n' ... 208 | 'Tangential dist: ' logical2strYN(sd.distortTan) '\n' ... 209 | 'Points on calib object: ' num2str(sd.pointsOnStick) '\n' ... 210 | 'Calib object length: ' num2str(sd.sizeStick) 'cm\n']); 211 | % if client 212 | else 213 | msg = sprintf(['Computer Role: ' sd.role '\n' ... 214 | 'Client Id: ' num2str(sd.clientId) '\n' ... 215 | 'Server IP: ' sd.serverIP '\n']); 216 | end 217 | 218 | set(handles.textSetupParams,'string',msg); 219 | 220 | function str = logical2strYN(l) 221 | if l 222 | str = 'yes'; 223 | else 224 | str = 'no'; 225 | end 226 | 227 | 228 | % --- Executes during object creation, after setting all properties. 229 | function uipanel1_CreateFcn(hObject, eventdata, handles) 230 | % hObject handle to uipanel1 (see GCBO) 231 | % eventdata reserved - to be defined in a future version of MATLAB 232 | % handles empty - handles not created until after all CreateFcns called 233 | 234 | 235 | % --- Executes during object creation, after setting all properties. 236 | function figure1_CreateFcn(hObject, eventdata, handles) 237 | % hObject handle to figure1 (see GCBO) 238 | % eventdata reserved - to be defined in a future version of MATLAB 239 | % handles empty - handles not created until after all CreateFcns called 240 | % Initialize setupDataAvailable variable 241 | setappdata(0,'setupDataAvail',false); 242 | 243 | 244 | % --- Executes on button press in buttonPointCloudVis. 245 | function buttonPointCloudVis_Callback(hObject, eventdata, handles) 246 | % hObject handle to buttonPointCloudVis (see GCBO) 247 | % eventdata reserved - to be defined in a future version of MATLAB 248 | % handles structure with handles and user data (see GUIDATA) 249 | -------------------------------------------------------------------------------- /matching3DNN.m: -------------------------------------------------------------------------------- 1 | % Function: 2 | % matching3DNN 3 | % 4 | % Description: 5 | % Given two input pointclouds (cam1PC, cam2PC), finds the matching points. 6 | % Matching points are searched using 1-Nearest Neighbor with a threshold 7 | % value of epsilon millimeters. 8 | % 9 | % Usage: 10 | % 11 | % 12 | % Params: 13 | % cam1PC : Pointcloud from camera 1 in n x 3 14 | % cam2PC : Pointcloud from camera 2 in n x 3 15 | % epsilon: Max distance between corresponding points in meters 16 | % 17 | % Return: 18 | % cam1MatchedPoints : Matched points in camera 1 in m x 3 19 | % cam2MatchedPoints : Matched points in camera 2 in m x 3 20 | % 21 | % Authors: 22 | % Diana M. Cordova 23 | % Juan R. Terven 24 | % 25 | % Citation: 26 | % Put the paper here! 27 | % 28 | % Date: 09-Jan-2016 29 | 30 | function [cam2_1MatchedPoints, matchedDepthProj, matchedColorProj] = matching3DNN( ... 31 | cam1PC, cam2PC, cam2DepthProj, cam2ColorProj, epsilon) 32 | 33 | % Remove inf and NaN values from input pointclouds 34 | cam1PCvalidRows = ~any( isnan( cam1PC ) | isinf( cam1PC ), 2 ); 35 | cam2PCvalidRows = ~any( isnan( cam2PC ) | isinf( cam2PC ), 2 ); 36 | 37 | cam1PC = cam1PC(cam1PCvalidRows,:); 38 | cam2PC = cam2PC(cam2PCvalidRows,:); 39 | cam2DepthProj = cam2DepthProj(cam2PCvalidRows,:); 40 | cam2ColorProj = cam2ColorProj(cam2PCvalidRows,:); 41 | 42 | % VERSION 3: Using knnsearch from 43 | %http://www.mathworks.com/matlabcentral/fileexchange/19345-efficient-k-nearest-neighbor-search-using-jit 44 | [idx, D] = knnsearch(cam1PC,cam2PC); 45 | % idx contains the indices of cam2PC with the smallest distance to each 46 | % cam1PC 47 | % Find the points with distance less than epsilon 48 | matchedIdx = idx(D <= epsilon); 49 | cam2_1MatchedPoints = cam2PC(matchedIdx,:); 50 | matchedDepthProj = cam2DepthProj(matchedIdx,:); 51 | matchedColorProj = cam2ColorProj(matchedIdx,:); 52 | 53 | % VERSION 2: Using pdist2 Matlab function, partitioning the pointclouds 54 | % in 100 parts for storage 55 | % Brute search for the nearest point to each point from cam1PC in cam2PC 56 | % iout = 1; 57 | % 58 | % elemPerPartition = floor(size(cam1PC,1)/100); 59 | % 60 | % for i1=0:99 61 | % idx = uint32(floor(i1*elemPerPartition + 1:elemPerPartition*(i1+1))); 62 | % cam1PCpart = cam1PC(idx,:); 63 | % D = pdist2(cam1PCpart, cam2PC); 64 | % [r,c] = find(D 1 109 | cam2 = struct('camPoints',camPoints{1}, ... 110 | 'pointcloud',pc{1}, 'depthProj',depthPr{1}, ... 111 | 'colorProj',colorPr{1}); 112 | camData{2} = cam2; 113 | end 114 | if CON_NUM > 2 115 | cam3 = struct('camPoints',camPoints{2}, ... 116 | 'pointcloud',pc{2}, 'depthProj',depthPr{2}, ... 117 | 'colorProj',colorPr{2}); 118 | camData{3} = cam3; 119 | end 120 | if CON_NUM > 3 121 | cam4 = struct('camPoints',camPoints{3}, ... 122 | 'pointcloud',pc{3},'depthProj',depthPr{3}, ... 123 | 'colorProj',colorPr{3}); 124 | camData{4} = cam4; 125 | end 126 | end 127 | else 128 | dataSaved = false; 129 | return; 130 | end 131 | else % if some client were not able to capture data 132 | dataSaved = false; 133 | return; 134 | end 135 | end -------------------------------------------------------------------------------- /setup1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jrterven/MultiKinCalib/6b797c9c5926ff4e45fde11d015bb7e95a6bf570/setup1.mat -------------------------------------------------------------------------------- /trackCalibPoints.m: -------------------------------------------------------------------------------- 1 | % Function: 2 | % trackCalibPoints 3 | % 4 | % Description: 5 | % Search for three or six red points in a stick of SIZE_AF. It uses color space 6 | % and camera space to detect the points in the color image and its 3D 7 | % coordinates as well in order to verify that the points lie in a stick 8 | % and that the dimensions of the stick are correct. 9 | % 10 | % a b c d e f 11 | % o--o--o--o--o--o 12 | % 13 | % Dependencies: 14 | % Kin2 class 15 | % 16 | % Inputs: 17 | % kinect: Kin2 object 18 | % colorFrame: color camera frame 19 | % a_refw: Infrared marker position. Used to find point A 20 | % SIZE_AB: Length of the stick 21 | % 22 | % Usage: 23 | % This function is called during the calibration images acquisition step. 24 | % 25 | % Returns: 26 | % validBalls: flag indicating a successful detection 27 | % points: 6x2 vector of 2D position of the balls in color space 28 | % 29 | % Author: 30 | % Juan R. Terven 31 | % Date: 15-Jan-2016 32 | 33 | function [validBalls, points] = trackCalibPoints(kinect, colorFrame, ... 34 | pointsOnStick, a_refw, SIZE_AF) 35 | 36 | points = zeros(pointsOnStick,2); 37 | validBalls = false; 38 | 39 | % To track red objects in real time 40 | % we have to subtract the red component 41 | % from the grayscale image to extract the red components in the image. 42 | diff_im = imsubtract(colorFrame(:,:,1), rgb2gray(colorFrame)); 43 | 44 | %Use a median filter to filter out noise 45 | diff_im = medfilt2(diff_im, [3 3]); 46 | % Convert the resulting grayscale image into a binary image. 47 | diff_im = im2bw(diff_im,0.18); 48 | 49 | % Remove all those pixels less than 300px 50 | diff_im = bwareaopen(diff_im,30); 51 | 52 | % Label all the connected components in the image. 53 | bw = bwlabel(diff_im, 8); 54 | 55 | % Here we do the image blob analysis. 56 | % We get a set of properties for each labeled region. 57 | stats = regionprops(bw, 'BoundingBox', 'Centroid'); 58 | numObjects = length(stats); 59 | 60 | temp = zeros(numObjects,2); 61 | 62 | for i=1:numObjects 63 | temp(i,:) = stats(i).Centroid; 64 | end 65 | 66 | if ~isempty(temp) 67 | hold on 68 | plot(temp(:,1),temp(:,2),'k*', 'MarkerSize',10) 69 | hold off 70 | end 71 | 72 | % Continue if it found at least pointsOnStick objects 73 | if numObjects >= pointsOnStick 74 | %disp(['numObjects:' num2str(numObjects)]); 75 | 76 | % Get these points on camera space from color to camera 77 | wtemp = kinect.mapColorPoints2Camera(temp); 78 | 79 | % Calculate the distance from each point (in camera coordinates) 80 | % to the reference point (extracted from the infrared image) 81 | dist_2_Aw = zeros(numObjects,1); 82 | 83 | for i=1:numObjects 84 | dist_2_Aw(i) = sqrt((a_refw(1)-wtemp(i,1))^2 + ... 85 | (a_refw(2)-wtemp(i,2))^2) + ... 86 | (a_refw(3)-wtemp(i,3))^2; 87 | end 88 | 89 | % Find a,b,c,d,e,f in order where a is the closest point to a_ref, 90 | % then b, and so on 91 | [~, idx] = sort(dist_2_Aw); 92 | points = temp(idx,:); 93 | pointsw = wtemp(idx,:); 94 | points = points(1:pointsOnStick,:); % select only the first pointsOnStick 95 | 96 | % Plot the possible balls in yellow 97 | hold on 98 | plot(points(:,1),points(:,2),'y*', 'MarkerSize',15) 99 | hold off 100 | 101 | % Check that all the points are inside a line between a and f 102 | ai = double(points(1,:)); % point a in the image (ai) 103 | bi = double(points(2,:)); 104 | ci = double(points(3,:)); 105 | 106 | if pointsOnStick == 6 107 | di = double(points(4,:)); 108 | ei = double(points(5,:)); 109 | fi = double(points(6,:)); 110 | end 111 | 112 | % for this we calculate the slope 113 | if pointsOnStick == 3 114 | m = (ci(2)-ai(2))/(ci(1)-ai(1)); 115 | elseif pointsOnStick == 6 116 | m = (fi(2)-ai(2))/(fi(1)-ai(1)); 117 | end 118 | 119 | % given the point-slope equation of a line 120 | % y - y1 = m(x - x1) 121 | % (x1, y1) will be the coordinates of the A extreme of the line 122 | % and we will calculate the y component of b,c,d,e 123 | yb = m * (bi(1) - ai(1)) + ai(2); 124 | 125 | if pointsOnStick == 6 126 | yc = m * (ci(1) - ai(1)) + ai(2); 127 | yd = m * (di(1) - ai(1)) + ai(2); 128 | ye = m * (ei(1) - ai(1)) + ai(2); 129 | end 130 | 131 | % The calculated y component of the points (yb, yc, yd, ye) must be 132 | % equal (ideally) of the actual y component of points (bi(2), 133 | % ci(2), di(2) and ei(2) 134 | % lets give it alpha pixels of error 135 | alpha = 20; 136 | if pointsOnStick == 3 137 | if abs(yb-bi(2)) > alpha 138 | disp('No points on a line') 139 | return; 140 | end 141 | elseif pointsOnStick == 6 142 | if abs(yb-bi(2)) > alpha || abs(yc-ci(2)) > alpha || ... 143 | abs(yd-di(2)) > alpha || abs(ye-ei(2)) > alpha 144 | disp('No points on a line') 145 | return; 146 | end 147 | end 148 | 149 | % Validate the coordinates of the points by size using its camera 150 | % space values (X,Y,Z) 151 | sizeAB = norm(pointsw(1,:) - pointsw(2,:)); 152 | sizeAC = norm(pointsw(1,:) - pointsw(3,:)); 153 | 154 | if pointsOnStick == 6 155 | sizeAD = norm(pointsw(1,:) - pointsw(4,:)); 156 | sizeAE = norm(pointsw(1,:) - pointsw(5,:)); 157 | sizeAF = norm(pointsw(1,:) - pointsw(6,:)); 158 | end 159 | 160 | if pointsOnStick == 3 161 | if sizeAC > (SIZE_AF - 0.2) && sizeAC < (SIZE_AF + 0.2) && ... 162 | sizeAB < sizeAC 163 | validBalls = true; 164 | disp('VALID balls') 165 | end 166 | elseif pointsOnStick == 6 167 | if sizeAF > (SIZE_AF - 0.2) && sizeAF < (SIZE_AF + 0.2) && ... 168 | sizeAB < sizeAF && sizeAC < sizeAF && sizeAD < sizeAF && ... 169 | sizeAE < sizeAF 170 | validBalls = true; 171 | disp('VALID balls') 172 | end 173 | end 174 | 175 | %disp(['Distances btw points: ' num2str(sizeAB) ' ' num2str(sizeAC) ' ' num2str(sizeAD) ' ' num2str(sizeAE) ' ' num2str(sizeAF) ' ' ]) 176 | 177 | else 178 | disp(['Not found at least ' num2str(pointsOnStick) ' objects']); 179 | end % if numObjects >=3 180 | end % end trackBalls function --------------------------------------------------------------------------------