├── Codes ├── Abhi │ ├── Hand_segmentation_abhi.py │ ├── kinect_body_abhi.py │ └── main_abhi.py ├── Anuj │ └── main_anuj.py ├── Ash │ ├── Hand_segmentation_ash.py │ ├── kinect_body_ash.py │ ├── main_ash.py │ ├── main_ash.txt │ ├── research.py │ └── tp └── PyKinectBodyGame.py ├── Documentation ├── MATLAB_approach └── gestures ├── Drivers ├── pykinect2_v1.0.1 │ ├── PyKinectRuntime.py │ ├── PyKinectRuntime.pyc │ ├── PyKinectV2.py │ ├── PyKinectV2.pyc │ ├── __init__.py │ └── __init__.pyc └── pykinect2_v1.0.2 │ ├── PyKinectRuntime.py │ ├── PyKinectRuntime.pyc │ ├── PyKinectV2.py │ ├── PyKinectV2.pyc │ ├── __init__.py │ └── __init__.pyc ├── Images ├── hand.bmp ├── hand.jpg ├── hand1.jpg ├── hand2.png ├── openhand.png ├── pointer.png └── right_hand_filtered.png ├── README.md ├── Test Data └── data_dump ├── Test ├── centroid.py ├── hand.bmp ├── test_blob.py ├── test_infrared.py ├── test_longExposureInfrared.py └── testiter.py ├── countors.txt └── countors_defects.txt /Codes/Abhi/Hand_segmentation_abhi.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | from mpl_toolkits.mplot3d import Axes3D 6 | import scipy 7 | import numpy as np 8 | import cv2 9 | 10 | class HandGestureObjectClass(object): 11 | def __init__(self): 12 | 13 | # Kinect runtime object, we want only color and body frames 14 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 15 | 16 | self._bodies = None 17 | 18 | 19 | def neighbourhood(self, array, radius, seed): 20 | 21 | neighbour = np.array(array) 22 | neighbour *= 0 23 | 24 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 25 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 26 | # temp = cv2.Color(temp,cv2.COLOR_GRAY2RGB) 27 | # mask = np.zeros(np.shape(temp), dtype = np.uint8) 28 | # mask[radius,radius] = 1 29 | # bgdModel = np.zeros((1,65),np.float64) 30 | # fgdModel = np.zeros((1,65),np.float64) 31 | # rect = (0,0,100,100) 32 | # mask, bgdModel, fgdModel=cv2.grabCut(temp,mask,rect,bgdModel,fgdModel,4,mode = cv2.GC_INIT_WITH_RECT) 33 | # print np.shape(mask) 34 | # temp = np.bitwise_and(mask,temp) 35 | return temp 36 | 37 | def merge(self, array_big, array_small, seed ): 38 | [a,b] = np.shape(array_small) 39 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 40 | return array_big 41 | 42 | def max_hist_depth(self, frame): 43 | #print 'FRAME_MAX = ' + str(frame.max()) 44 | binaries = int(frame.max()) 45 | if binaries <= 0: 46 | return 0 47 | histogram, bins = np.histogram(frame, bins = binaries) 48 | histogram = histogram.tolist(); bins = bins.tolist(); 49 | histogram[0 : 1] = [0, 0] 50 | max_hist = bins[histogram.index( max(histogram) )] 51 | return max_hist 52 | 53 | def run(self): 54 | print_frame=None 55 | 56 | # -------- Main Program Loop ----------- 57 | while (True): 58 | # --- Main event loop 59 | 60 | if self._kinect.has_new_body_frame(): 61 | print 'has body' 62 | depth_frame = self._kinect.get_last_depth_frame() 63 | 64 | depth_frame = np.array(depth_frame/16, dtype= np.uint8) 65 | depth_frame = depth_frame.reshape(424,512) 66 | 67 | self._bodies = self._kinect.get_last_body_frame() 68 | 69 | if self._bodies is not None: 70 | #first detected body taken 71 | body = self._bodies.bodies[0] 72 | if not body.is_tracked: 73 | continue 74 | 75 | joints = body.joints 76 | 77 | # convert joint coordinates to color space 78 | joint_points = self._kinect.body_joints_to_depth_space(joints) 79 | 80 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 81 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 82 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 83 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 84 | 85 | right_x = right_x if right_x < 424 else 423 86 | right_y = right_y if right_y < 512 else 511 87 | left_x = left_x if left_x < 424 else 423 88 | left_y = left_y if left_y < 512 else 511 89 | 90 | right_hand_depth = depth_frame[right_x,right_y] 91 | left_hand_depth = depth_frame[left_x,left_y] 92 | print 'ld:' + str(left_hand_depth)+'\trd:' + str(right_hand_depth) 93 | 94 | right_hand = [right_x,right_y] 95 | left_hand = [left_x,left_y] 96 | 97 | #print type(c) 98 | 99 | d = 50 100 | if depth_frame != None: 101 | right_hand_filtered = self.neighbourhood(depth_frame,d,right_hand) 102 | left_hand_filtered = self.neighbourhood(depth_frame,d,left_hand) 103 | 104 | 105 | neighbour = np.array(depth_frame) 106 | neighbour *= 0 107 | 108 | right_hand_filtered_depth_frame = self.merge(neighbour, right_hand_filtered,right_hand) 109 | left_hand_filtered_depth_frame = self.merge(neighbour, left_hand_filtered, left_hand) 110 | 111 | 112 | # right_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, right_hand_filtered,right_hand),depth_frame) 113 | # left_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, left_hand_filtered, left_hand),depth_frame) 114 | # ret,right_hand_filtered_depth_frame = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY) 115 | # ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY) 116 | 117 | print_frame = right_hand_filtered_depth_frame+left_hand_filtered_depth_frame 118 | 119 | 120 | 121 | if print_frame != None: 122 | dpt = depth_frame 123 | cv2.imshow('Hand Filtered',print_frame) 124 | cv2.imshow('OG',depth_frame) 125 | # fig = plt.figure() 126 | # ax = fig.add_subplot(111, projection = '3d') 127 | # ax.plot([1,2,3,4,5],[1,2,3,4,5],[1,2,3,4,5]) 128 | # plt.show() 129 | 130 | if cv2.waitKey(1) & 0xFF == ord('q'): 131 | break 132 | 133 | 134 | 135 | 136 | 137 | # Close our Kinect sensor, close the window and quit. 138 | self._kinect.close() 139 | 140 | 141 | 142 | HandGestureObject = HandGestureObjectClass(); 143 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Abhi/kinect_body_abhi.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | 9 | class HandGestureObjectClass(object): 10 | def __init__(self): 11 | 12 | # Kinect runtime object, we want only color and body frames 13 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 14 | 15 | # here we will store skeleton data 16 | self._bodies = None 17 | 18 | 19 | def neighbourhood(self, array, radius, seed): 20 | 21 | 22 | neighbour = np.array(array) 23 | neighbour *= 0 24 | 25 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 26 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 27 | return temp 28 | 29 | def merge(self, array_big, array_small, seed ): 30 | [a,b] = np.shape(array_small) 31 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 32 | return array_big 33 | 34 | def max_hist_depth(self, frame): 35 | #print 'FRAME_MAX = ' + str(frame.max()) 36 | binaries = int(frame.max()) 37 | if binaries <= 0: 38 | return 0 39 | histogram, bins = np.histogram(frame, bins = binaries) 40 | histogram = histogram.tolist(); bins = bins.tolist(); 41 | histogram[0 : 1] = [0, 0] 42 | max_hist = bins[histogram.index( max(histogram) )] 43 | return max_hist 44 | 45 | def max_area_contour(self, contours): 46 | max_area = 0 47 | # ci = 0 48 | for i in range(len(contours)): 49 | cnt=contours[i] 50 | area = cv2.contourArea(cnt) 51 | if(area>max_area): 52 | max_area=area 53 | ci=i 54 | return contours[ci] 55 | 56 | def run(self): 57 | print_frame=None 58 | 59 | # -------- Main Program Loop ----------- 60 | while (True): 61 | # --- Main event loop 62 | 63 | if self._kinect.has_new_depth_frame() or self._kinect.has_new_body_frame(): 64 | 65 | depth_frame = self._kinect.get_last_depth_frame() 66 | 67 | depth_frame = np.array(depth_frame/16, dtype= np.uint8) 68 | depth_frame = depth_frame.reshape(424,512) 69 | 70 | self._bodies = self._kinect.get_last_body_frame() 71 | 72 | if self._bodies is not None: 73 | #first detected body taken 74 | body = self._bodies.bodies[0] 75 | if not body.is_tracked: 76 | continue 77 | 78 | joints = body.joints 79 | 80 | # convert joint coordinates to color space 81 | joint_points = self._kinect.body_joints_to_depth_space(joints) 82 | 83 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 84 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 85 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 86 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 87 | 88 | right_x = right_x if right_x < 424 else 423 89 | right_y = right_y if right_y < 512 else 511 90 | left_x = left_x if left_x < 424 else 423 91 | left_y = left_y if left_y < 512 else 511 92 | 93 | right_hand_depth = depth_frame[right_x,right_y] 94 | left_hand_depth = depth_frame[left_x,left_y] 95 | print 'ld:' + str(left_hand_depth)+'\trd:' + str(right_hand_depth) 96 | 97 | right_hand = [right_x,right_y] 98 | left_hand = [left_x,left_y] 99 | 100 | #print type(c) 101 | 102 | d = 50 103 | if depth_frame != None: 104 | right_hand_filtered = self.neighbourhood(depth_frame,d,right_hand) 105 | left_hand_filtered = self.neighbourhood(depth_frame,d,left_hand) 106 | 107 | kernel = np.ones((3,3),np.uint8) 108 | right_hand_filtered = cv2.erode(right_hand_filtered, kernel,iterations = 1) 109 | left_hand_filtered = cv2.erode(left_hand_filtered, kernel,iterations = 1) 110 | 111 | neighbour = np.array(depth_frame) 112 | neighbour *= 0 113 | 114 | print_frame = np.zeros(np.shape(depth_frame)) 115 | 116 | 117 | 118 | if right_hand_filtered != None: 119 | 120 | img1,contours1, hierarchy1 = cv2.findContours(right_hand_filtered,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE) 121 | cnt = self.max_area_contour(contours1) 122 | hull = cv2.convexHull(cnt) 123 | drawing = np.zeros(right_hand_filtered.shape,np.uint8) 124 | drawing = cv2.drawContours(drawing,[cnt],0,150,1) 125 | drawing = cv2.drawContours(drawing,[hull],0,200,1) 126 | cv2.imshow('contours1',drawing) 127 | 128 | right_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, right_hand_filtered,right_hand),depth_frame) 129 | 130 | ret,right_hand_filtered_depth_frame = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 131 | print_frame += right_hand_filtered_depth_frame 132 | 133 | if left_hand_filtered != None: 134 | 135 | 136 | left_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, left_hand_filtered, left_hand),depth_frame) 137 | ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 138 | print_frame += left_hand_filtered_depth_frame 139 | 140 | if print_frame != None: 141 | dpt = depth_frame 142 | cv2.imshow('Hand Filtered',print_frame) 143 | 144 | 145 | 146 | if cv2.waitKey(1) & 0xFF == ord('q'): 147 | break 148 | 149 | 150 | # --- Limit to 60 frames per second 151 | 152 | 153 | # Close our Kinect sensor, close the window and quit. 154 | self._kinect.close() 155 | 156 | 157 | 158 | HandGestureObject = HandGestureObjectClass(); 159 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Abhi/main_abhi.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | 5 | from matplotlib import pyplot as plt 6 | 7 | import scipy 8 | import numpy as np 9 | import cv2 10 | 11 | class HandGestureObjectClass(object): 12 | 13 | def __init__(self): 14 | 15 | # Kinect runtime object, we want only depth and body frames 16 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth) 17 | 18 | def max_hist_depth(self, frame): 19 | #print 'FRAME_MAX = ' + str(frame.max()) 20 | binaries = int(frame.max()) 21 | if binaries <= 0: 22 | return 0 23 | histogram, bins = np.histogram(frame, bins = binaries) 24 | histogram = histogram.tolist(); bins = bins.tolist(); 25 | histogram[0 : 1] = [0, 0] 26 | max_hist = bins[histogram.index( max(histogram) )] 27 | return max_hist 28 | 29 | def run(self): 30 | print ':IN_RUN:Pulling Frames' 31 | previous_frame = None 32 | 33 | 34 | while(True): 35 | #Main event loop 36 | if self._kinect.has_new_depth_frame(): 37 | 38 | frame = self._kinect.get_last_depth_frame() 39 | frame=frame/32 40 | frame = frame.reshape(424,512) 41 | 42 | if previous_frame != None and not np.array_equal(frame,previous_frame): 43 | 44 | #Foreground Detection 45 | frame_foregnd = cv2.subtract(frame,previous_frame) 46 | frame_denoised = np.where(frame_foregnd>0,frame_foregnd,0) 47 | 48 | #Denoising by erosion 49 | kernel = np.ones((5,5),np.uint8) 50 | frame_denoised = cv2.erode(frame_denoised,kernel,iterations=1) 51 | frame_denoised = cv2.dilate(frame_denoised,kernel,iterations=1) 52 | 53 | # Depth frame XOR Denoised Frame 54 | frame_xored = np.where(frame_denoised != 0, frame, 0) 55 | 56 | #Depth of the closest object 57 | hand_depth = self.max_hist_depth(frame_xored) 58 | print "Hand Depth: " + str(hand_depth) 59 | hand_filtered_frame = np.where(previous_frame > (hand_depth + 5),0 , previous_frame) 60 | hand_filtered_frame = np.where(hand_filtered_frame < (hand_depth - 5),0 , hand_filtered_frame) 61 | 62 | cv2.imshow('Kinect',hand_filtered_frame) 63 | 64 | else: 65 | print "Move your hand" 66 | 67 | previous_frame = frame 68 | 69 | if cv2.waitKey(1) & 0xFF == ord('q'): 70 | break 71 | 72 | # Close our Kinect sensor, close the window and quit. 73 | self._kinect.close() 74 | 75 | 76 | HandGestureObject = HandGestureObjectClass(); 77 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Anuj/main_anuj.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | from PIL import Image 6 | from skimage import data 7 | from skimage.filters import threshold_otsu, threshold_adaptive 8 | import scipy 9 | import ctypes 10 | import _ctypes 11 | import pygame 12 | import sys 13 | import numpy 14 | from scipy.misc import toimage 15 | from scipy import ndimage 16 | import cv2 17 | 18 | mindepth = 500 19 | maxdepth = 65535 20 | mapdepthtobyte = 8000/256; 21 | 22 | class BodyGameRuntime(object): 23 | def __init__(self): 24 | # Loop until the user clicks the close button. 25 | self._done = False 26 | # Kinect runtime object, we want only depth and body frames 27 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth) 28 | 29 | def max_hist_depth(self, frame): 30 | 31 | #print 'FRAME_MAX = ' + str(frame.max()) 32 | 33 | binaries = int(frame.max()) 34 | 35 | if binaries <= 0: 36 | 37 | return 0 38 | 39 | histogram, bins = numpy.histogram(frame, bins = binaries) 40 | 41 | histogram = histogram.tolist(); bins = bins.tolist(); 42 | 43 | histogram[0 : 1] = [0, 0] 44 | 45 | max_hist = bins[ histogram.index( max(histogram) ) ] 46 | 47 | return max_hist 48 | 49 | def run(self): 50 | exit = 0 51 | print 'IN_RUN' 52 | previous_frame = None 53 | 54 | while(True): 55 | 56 | if self._kinect.has_new_depth_frame(): 57 | 58 | raw_frame = self._kinect.get_last_depth_frame() 59 | frame = raw_frame 60 | 61 | frame = frame.reshape(424,512) 62 | 63 | if previous_frame != None: 64 | frame=frame.reshape(424,512) 65 | previous_frame=previous_frame.reshape(424,512) 66 | frame_foregnd = cv2.subtract(frame,previous_frame) 67 | frame_denoised = scipy.ndimage.morphology.binary_erosion(frame_foregnd,iterations=3) 68 | frame_foregnd=frame_foregnd.reshape(424,512) 69 | 70 | cv2.imshow('display',frame_foregnd) 71 | 72 | previous_frame=frame 73 | 74 | if cv2.waitKey(1) & 0xFF ==ord('q'): 75 | break 76 | 77 | # Close our Kinect sensor, close the window and quit. 78 | 79 | self._kinect.close() 80 | 81 | __main__ = "Kinect v2 Body Game" 82 | 83 | game = BodyGameRuntime(); 84 | 85 | game.run(); -------------------------------------------------------------------------------- /Codes/Ash/Hand_segmentation_ash.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | 9 | class HandGestureObjectClass(object): 10 | def __init__(self): 11 | 12 | # Kinect runtime object, we want only color and body frames 13 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 14 | 15 | # here we will store skeleton data 16 | self._bodies = None 17 | 18 | 19 | def neighbourhood(self, array, radius, seed): 20 | 21 | 22 | neighbour = np.array(array) 23 | neighbour *= 0 24 | 25 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 26 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 27 | return temp 28 | 29 | def merge(self, array_big, array_small, seed ): 30 | [a,b] = np.shape(array_small) 31 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 32 | return array_big 33 | 34 | def max_hist_depth(self, frame): 35 | #print 'FRAME_MAX = ' + str(frame.max()) 36 | binaries = int(frame.max()) 37 | if binaries <= 0: 38 | return 0 39 | histogram, bins = np.histogram(frame, bins = binaries) 40 | histogram = histogram.tolist(); bins = bins.tolist(); 41 | histogram[0 : 1] = [0, 0] 42 | max_hist = bins[histogram.index( max(histogram) )] 43 | return max_hist 44 | 45 | def max_area_contour(self, contours): 46 | max_area = 0 47 | for i in range(len(contours)): 48 | cnt=contours[i] 49 | area = cv2.contourArea(cnt) 50 | if(area>max_area): 51 | max_area=area 52 | ci=i 53 | return contours[ci] 54 | 55 | def run(self): 56 | print_frame=None 57 | 58 | # -------- Main Program Loop ----------- 59 | while (True): 60 | # --- Main event loop 61 | 62 | if self._kinect.has_new_depth_frame() or self._kinect.has_new_body_frame(): 63 | 64 | depth_frame = self._kinect.get_last_depth_frame() 65 | 66 | depth_frame = np.array(depth_frame/16, dtype= np.uint8) 67 | depth_frame = depth_frame.reshape(424,512) 68 | 69 | self._bodies = self._kinect.get_last_body_frame() 70 | 71 | if self._bodies is not None: 72 | #first detected body taken 73 | body = self._bodies.bodies[0] 74 | if not body.is_tracked: 75 | continue 76 | 77 | joints = body.joints 78 | 79 | # convert joint coordinates to color space 80 | joint_points = self._kinect.body_joints_to_depth_space(joints) 81 | 82 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 83 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 84 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 85 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 86 | 87 | right_x = right_x if right_x < 424 else 423 88 | right_y = right_y if right_y < 512 else 511 89 | left_x = left_x if left_x < 424 else 423 90 | left_y = left_y if left_y < 512 else 511 91 | 92 | right_hand_depth = depth_frame[right_x,right_y] 93 | left_hand_depth = depth_frame[left_x,left_y] 94 | print 'ld:' + str(left_hand_depth)+'\trd:' + str(right_hand_depth) 95 | 96 | right_hand = [right_x,right_y] 97 | left_hand = [left_x,left_y] 98 | 99 | #print type(c) 100 | 101 | d = 50 102 | if depth_frame != None: 103 | right_hand_filtered = self.neighbourhood(depth_frame,d,right_hand) 104 | left_hand_filtered = self.neighbourhood(depth_frame,d,left_hand) 105 | 106 | neighbour = np.array(depth_frame) 107 | neighbour *= 0 108 | 109 | print_frame = np.zeros(np.shape(depth_frame)) 110 | 111 | 112 | 113 | if right_hand_filtered != None: 114 | 115 | img1,contours1, hierarchy1 = cv2.findContours(right_hand_filtered,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE) 116 | cnt = self.max_area_contour(contours1) 117 | hull = cv2.convexHull(cnt) 118 | drawing = np.zeros(right_hand_filtered.shape,np.uint8) 119 | drawing = cv2.drawContours(drawing,[cnt],0,150,1) 120 | drawing = cv2.drawContours(drawing,[hull],0,200,1) 121 | cv2.imshow('contours1',drawing) 122 | 123 | right_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, right_hand_filtered,right_hand),depth_frame) 124 | 125 | ret,right_hand_filtered_depth_frame = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 126 | print_frame += right_hand_filtered_depth_frame 127 | 128 | if left_hand_filtered != None: 129 | 130 | 131 | left_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, left_hand_filtered, left_hand),depth_frame) 132 | ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 133 | print_frame += left_hand_filtered_depth_frame 134 | 135 | if print_frame != None: 136 | dpt = depth_frame 137 | cv2.imshow('Hand Filtered',print_frame) 138 | 139 | 140 | 141 | if cv2.waitKey(1) & 0xFF == ord('q'): 142 | break 143 | 144 | 145 | # --- Limit to 60 frames per second 146 | 147 | 148 | # Close our Kinect sensor, close the window and quit. 149 | self._kinect.close() 150 | 151 | 152 | 153 | HandGestureObject = HandGestureObjectClass(); 154 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Ash/kinect_body_ash.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | 9 | class HandGestureObjectClass(object): 10 | def __init__(self): 11 | 12 | # Kinect runtime object, we want only color and body frames 13 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 14 | 15 | # here we will store skeleton data 16 | self._bodies = None 17 | 18 | def neighbourhood_old(self, array, radius, seed, depth): 19 | # temp = np.nditer(array, flags = ['multi_index'], op_flags = ['readwrite']) 20 | #cv2.imshow('neigh', array) 21 | # print 'in neighbour' 22 | temp = 0 23 | [a,b] = np.shape(array) 24 | neighbour = np.array(array) 25 | neighbour *= 0 26 | for i in range(seed[0]-radius, seed[0]+radius): 27 | for j in range(seed[1]-radius, seed[1]+radius): 28 | temp+=array[j,i] 29 | if array[j,i] < depth+3: 30 | 31 | neighbour[j,i] = array[j,i] 32 | else: 33 | neighbour[j,i] = 0 34 | 35 | # cv2.imshow('neigh', array) 36 | return neighbour,temp/(2*radius+1)^2 37 | 38 | def neighbourhood(self, array, radius, seed, depth): 39 | [a,b] = np.shape(array) 40 | neighbour = np.array(array) 41 | neighbour *= 0 42 | # for i in range(seed[0]-radius, seed[0]+radius): 43 | # for j in range(seed[1]-radius, seed[1]+radius): 44 | # neighbour[j,i] = array[j,i] 45 | 46 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 47 | 48 | # temp = temp.reshape(2*radius,2*radius) 49 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 50 | 51 | neighbour[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius] = temp 52 | return neighbour 53 | 54 | def max_hist_depth(self, frame): 55 | #print 'FRAME_MAX = ' + str(frame.max()) 56 | binaries = int(frame.max()) 57 | if binaries <= 0: 58 | return 0 59 | histogram, bins = np.histogram(frame, bins = binaries) 60 | histogram = histogram.tolist(); bins = bins.tolist(); 61 | histogram[0 : 1] = [0, 0] 62 | max_hist = bins[histogram.index( max(histogram) )] 63 | return max_hist 64 | 65 | def run(self): 66 | print_frame=None 67 | 68 | # -------- Main Program Loop ----------- 69 | while (True): 70 | # --- Main event loop 71 | 72 | if self._kinect.has_new_depth_frame() or self._kinect.has_new_body_frame(): 73 | 74 | depth_frame = self._kinect.get_last_depth_frame() 75 | depth_frame = np.array(depth_frame/16, dtype= np.uint8) 76 | print depth_frame.max() 77 | print '_' 78 | depth_frame = depth_frame.reshape(424,512) 79 | self._bodies = self._kinect.get_last_body_frame() 80 | 81 | # --- draw skeletons to _frame_surface 82 | if self._bodies is not None: 83 | #for i in range(0, self._kinect.max_body_count): 84 | body = self._bodies.bodies[0] 85 | if not body.is_tracked: 86 | continue 87 | 88 | joints = body.joints 89 | # convert joint coordinates to color space 90 | joint_points = self._kinect.body_joints_to_depth_space(joints) 91 | 92 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 93 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 94 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 95 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 96 | #print right_x 97 | right_x = right_x if right_x < 424 else 423 98 | right_y = right_y if right_y < 512 else 511 99 | left_x = left_x if left_x < 424 else 423 100 | left_y = left_y if left_y < 512 else 511 101 | 102 | right_hand_depth = depth_frame[right_x,right_y] 103 | left_hand_depth = depth_frame[left_x,left_y] 104 | print 'ld:' + str(left_hand_depth)+'\trd:' + str(right_hand_depth) 105 | 106 | 107 | # hand_filtered_depth_frame = np.where(depth_frame< (left_hand_depth + 20) ,0 , depth_frame) 108 | # hand_filtered_depth_frame = np.where(depth_frame> (left_hand_depth - 20) ,0 , depth_frame) 109 | # hand_filtered_depth_frame = np.where(hand_filtered_depth_frame>100, 65535, 0) 110 | 111 | 112 | # print_frame=4*depth_frame 113 | # print_frame=cv2.circle(print_frame,(right_x,right_y), 10,(255,0,0),5) 114 | # print_frame=cv2.circle(print_frame,(left_x,left_y), 10,(255,0,0),5) 115 | 116 | right_hand = [right_x,right_y] 117 | left_hand = [left_x,left_y] 118 | 119 | #print type(c) 120 | 121 | d = 50 122 | if depth_frame != None: 123 | right_hand_filtered_depth_frame = self.neighbourhood(depth_frame,d,right_hand,right_hand_depth) 124 | left_hand_filtered_depth_frame = self.neighbourhood(depth_frame,d,left_hand,left_hand_depth) 125 | tp = depth_frame 126 | #img_grey = cv2.cvtColor(hand_filtered_depth_frame, cv2.COLOR_BGR2GRAY) 127 | # right_hand_filtered_depth_frame = np.array(right_hand_filtered_depth_frame/16, dtype = np.uint8) 128 | # left_hand_filtered_depth_frame = np.array(left_hand_filtered_depth_frame/16, dtype = np.uint8) 129 | 130 | # blur1 = cv2.GaussianBlur(right_hand_filtered_depth_frame,(5,5),0) 131 | # ret1,thresh1 = cv2.threshold(blur1,right_hand_depth-10,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 132 | # # thresh1 = cv2.adaptiveThreshold(blur1,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,11,2) 133 | 134 | # # kernel = np.ones((3,3),np.uint8) 135 | # # opening1 = cv2.morphologyEx(thresh1,cv2.MORPH_OPEN,kernel, iterations = 2) 136 | 137 | # # dist_transform1 = cv2.distanceTransform(opening1,cv2.DIST_L2,5) 138 | # # ret1, sure_fg1 = cv2.threshold(dist_transform1,0.3*dist_transform1.max(),255,0) 139 | 140 | # blur2 = cv2.GaussianBlur(left_hand_filtered_depth_frame,(5,5),0) 141 | # ret2,thresh2 = cv2.threshold(blur2,avg2,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 142 | 143 | 144 | # ret1,thresh1 = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY_INV) 145 | # ret2,thresh2 = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY_INV) 146 | 147 | # thresh = cv2.adaptiveThreshold(img_grey, 0, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2) 148 | # print_frame = thresh1+thresh2 149 | 150 | contours1, hierarchy1 = cv2.findContours(right_hand_filtered_depth_frame,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 151 | 152 | 153 | print_frame = right_hand_filtered_depth_frame+left_hand_filtered_depth_frame 154 | 155 | # it = np.nditer(print_frame, flags=['multi_index'],op_flags=['readwrite']) 156 | # while not it.finished: 157 | # p=it.multi_index 158 | 159 | # if (p[0]>c[0]+d or p[0]c[1]+d or p[1]" % (it[0], it.multi_index), 163 | # it.iternext() 164 | 165 | # hand_filtered_depth_frame = np.where(depth_frame < (left_hand_depth + 2), depth_frame, 0) 166 | # hand_filtered_depth_frame = np.where(depth_frame > (left_hand_depth - 2), depth_frame, 0) 167 | # # hand_filtered_depth_frame = np.where(hand_filtered_depth_frame > 0, 65535, 0) 168 | # print_frame=cv2.circle(print_frame,(right_x,right_y), 10,(255,0,0),5) 169 | # print_frame=cv2.circle(print_frame,(left_x,left_y), 10,(255,0,0),5) 170 | 171 | 172 | if print_frame != None: 173 | dpt = depth_frame 174 | cv2.imshow('Hand Filtered',print_frame) 175 | 176 | 177 | 178 | if cv2.waitKey(1) & 0xFF == ord('q'): 179 | break 180 | 181 | 182 | cv2.imshow('OG',tp) 183 | 184 | 185 | # --- Limit to 60 frames per second 186 | 187 | 188 | # Close our Kinect sensor, close the window and quit. 189 | self._kinect.close() 190 | 191 | 192 | 193 | HandGestureObject = HandGestureObjectClass(); 194 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Ash/main_ash.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | 5 | from matplotlib import pyplot as plt 6 | 7 | import scipy 8 | import numpy as np 9 | import cv2 10 | 11 | class HandGestureObjectClass(object): 12 | 13 | 14 | def __init__(self): 15 | 16 | # Kinect runtime object, we want only depth and body depth_frames 17 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth|PyKinectV2.FrameSourceTypes_Color) 18 | 19 | def max_hist_depth(self, frame): 20 | #print 'FRAME_MAX = ' + str(frame.max()) 21 | binaries = int(frame.max()) 22 | if binaries <= 0: 23 | return 0 24 | histogram, bins = np.histogram(frame, bins = binaries) 25 | histogram = histogram.tolist(); bins = bins.tolist(); 26 | histogram[0 : 1] = [0, 0] 27 | max_hist = bins[histogram.index( max(histogram) )] 28 | return max_hist 29 | 30 | def map_depth_to_color(self,depth_x,depth_y): 31 | color_x = depth_x * 1920 / 512 32 | color_y = depth_y * 1080 / 424 33 | 34 | 35 | return 36 | def run(self): 37 | print ':IN_RUN:Pulling Frames' 38 | previous_depth_frame = None 39 | cx=0 40 | cy=0 41 | 42 | 43 | while(True): 44 | #Main event loop 45 | if self._kinect.has_new_depth_frame() or self._kinect.has_new_color_frame(): 46 | 47 | depth_frame = self._kinect.get_last_depth_frame() 48 | 49 | depth_frame = depth_frame.reshape(424,512) 50 | #depth_frame = depth_frame.resize((424*2,512*2)) 51 | 52 | color_frame = self._kinect.get_last_color_frame() 53 | #print 'color' 54 | color_frame = color_frame.reshape(1080,1920,4) 55 | #color_frame = np.resize(color_frame,(1080,1920,4)) 56 | #color_frame = color_frame.resize((1080/2,1920/2,4)) 57 | 58 | if previous_depth_frame != None and not np.array_equal(depth_frame,previous_depth_frame): 59 | 60 | # Foreground Detection 61 | depth_frame_foregnd = cv2.subtract(depth_frame,previous_depth_frame) 62 | depth_frame_denoised = np.where(depth_frame_foregnd>=100,depth_frame_foregnd,0) 63 | depth_frame_denoised = cv2.medianBlur(depth_frame_denoised,5) 64 | 65 | # Denoising by erosion 66 | kernel = np.ones((5,5),np.uint8) 67 | depth_frame_denoised = cv2.erode(depth_frame_denoised,kernel,iterations=1) 68 | depth_frame_denoised = cv2.dilate(depth_frame_denoised,kernel,iterations=1) 69 | 70 | # Depth depth_frame XOR Denoised depth_frame 71 | depth_frame_xored = np.where(depth_frame_denoised != 0, previous_depth_frame, 0) 72 | 73 | # Depth of the closest object 74 | hand_depth = self.max_hist_depth(depth_frame_xored) 75 | # print "Hand Depth: " + str(hand_depth) 76 | hand_filtered_depth_frame = np.where(depth_frame> (hand_depth + 20),0 , depth_frame) 77 | hand_filtered_depth_frame = np.where(hand_filtered_depth_frame < (hand_depth - 20), 0 , hand_filtered_depth_frame) 78 | 79 | 80 | im = np.array(hand_filtered_depth_frame * 255, dtype = np.uint8) 81 | 82 | ret,thresh = cv2.threshold(im,100,255,cv2.THRESH_BINARY) 83 | image,contours,hierarchy = cv2.findContours(thresh, 1, 2) 84 | #print type(contours) 85 | if contours: 86 | 87 | cnt = contours[0] 88 | M = cv2.moments(cnt) 89 | # print M 90 | if M['m00'] != 0: 91 | # print ':' 92 | 93 | cx = int(M['m10']/M['m00']) 94 | # print cx 95 | cy = int(M['m01']/M['m00']) 96 | # print cy 97 | 98 | 99 | thresh = cv2.circle(thresh,(cx,cy), 10,(255,0,0),1) 100 | 101 | #Printing depth_frame 102 | hand_filtered_depth_frame=depth_frame 103 | hand_filtered_depth_frame *= 32 104 | cv2.imshow('Kinect',hand_filtered_depth_frame) 105 | cv2.imshow('COLOR',color_frame) 106 | 107 | previous_depth_frame = depth_frame 108 | 109 | if cv2.waitKey(1) & 0xFF == ord('q'): 110 | break 111 | 112 | # Close our Kinect sensor, close the window and quit. 113 | self._kinect.close() 114 | 115 | 116 | HandGestureObject = HandGestureObjectClass(); 117 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Ash/main_ash.txt: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | 5 | from matplotlib import pyplot as plt 6 | 7 | import scipy 8 | import numpy as np 9 | import cv2 10 | 11 | class HandGestureObjectClass(object): 12 | 13 | def __init__(self): 14 | 15 | # Kinect runtime object, we want only depth and body frames 16 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth) 17 | 18 | def max_hist_depth(self, frame): 19 | #print 'FRAME_MAX = ' + str(frame.max()) 20 | binaries = int(frame.max()) 21 | if binaries <= 0: 22 | return 0 23 | histogram, bins = numpy.histogram(frame, bins = binaries) 24 | histogram = histogram.tolist(); bins = bins.tolist(); 25 | histogram[0 : 1] = [0, 0] 26 | max_hist = bins[ histogram.index( max(histogram) ) ] 27 | return max_hist 28 | 29 | def run(self): 30 | previous_frame = None 31 | 32 | while(True): 33 | #Main event loop 34 | if self._kinect.has_new_depth_frame(): 35 | frame = self._kinect.get_last_depth_frame() 36 | frame = frame.reshape(424,512) 37 | if previous_frame != None: 38 | frame_foregnd = cv2.subtract(frame,previous_frame) 39 | frame_denoised = np.where(frame_foregnd>=200,frame_foregnd,0) 40 | kernel = np.ones((5,5),np.uint8) 41 | frame_denoised = cv2.erode(frame_denoised,kernel,iterations=1) 42 | #frame_denoised = cv2.dilate(frame_denoised,kernel,iterations=1) 43 | frame_xored = np.where(frame_denoised != 0, frame, 0) 44 | print_frame = frame_xored << 8 45 | cv2.imshow('Kinect',print_frame) 46 | previous_frame=frame 47 | if cv2.waitKey(1) & 0xFF == ord('q'): 48 | break 49 | # Close our Kinect sensor, close the window and quit. 50 | self._kinect.close() 51 | 52 | HandGestureObject = HandGestureObjectClass(); 53 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Ash/research.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | 9 | class HandGestureObjectClass(object): 10 | def __init__(self): 11 | 12 | # Kinect runtime object, we want only color and body frames 13 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 14 | 15 | # here we will store skeleton data 16 | self._bodies = None 17 | 18 | 19 | def neighbourhood(self, array, radius, seed): 20 | 21 | 22 | neighbour = np.array(array) 23 | neighbour *= 0 24 | 25 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 26 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 27 | return temp 28 | 29 | def merge(self, array_big, array_small, seed ): 30 | [a,b] = np.shape(array_small) 31 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 32 | return array_big 33 | 34 | def max_hist_depth(self, frame): 35 | #print 'FRAME_MAX = ' + str(frame.max()) 36 | binaries = int(frame.max()) 37 | if binaries <= 0: 38 | return 0 39 | histogram, bins = np.histogram(frame, bins = binaries) 40 | histogram = histogram.tolist(); bins = bins.tolist(); 41 | histogram[0 : 1] = [0, 0] 42 | max_hist = bins[histogram.index( max(histogram) )] 43 | return max_hist 44 | 45 | def max_area_contour(self, contours): 46 | max_area = 0 47 | ci = 0 48 | for i in range(len(contours)): 49 | cnt=contours[i] 50 | area = cv2.contourArea(cnt) 51 | if(area>max_area): 52 | max_area=area 53 | ci=i 54 | return contours[ci] 55 | 56 | def min_area_contour(self, contours): 57 | min_area = 0 58 | ci = 0 59 | for i in range(len(contours)): 60 | cnt=contours[i] 61 | area = cv2.contourArea(cnt) 62 | if(area 5: 159 | gesture = 2 160 | print 'Pointer' 161 | else: 162 | print 'Hand Closed' 163 | gesture = 1 164 | else: 165 | print 'Hand Open' 166 | gesture = 0 167 | 168 | 169 | # k = cv2.isContourConvex(cnt) 170 | # if k: 171 | # print 'convex' 172 | # # else: 173 | # print 'concave' 174 | 175 | right_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, right_hand_filtered,right_hand),depth_frame) 176 | 177 | ret,right_hand_filtered_depth_frame = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 178 | print_frame += right_hand_filtered_depth_frame 179 | 180 | 181 | if left_hand_filtered != None: 182 | 183 | 184 | left_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, left_hand_filtered, left_hand),depth_frame) 185 | ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 186 | print_frame += left_hand_filtered_depth_frame 187 | 188 | print_frame = np.array(print_frame, dtype = np.uint8) 189 | print_frame = cv2.cvtColor(print_frame, cv2.COLOR_GRAY2RGB) 190 | 191 | font = cv2.FONT_HERSHEY_SIMPLEX 192 | cv2.putText(print_frame, 'Gesture:',(50,320), font, 0.5, (150,150,150),1, cv2.LINE_AA) 193 | if gesture == 0: 194 | cv2.putText(print_frame, 'Hand Open',(50,350), font, 0.5, (200,0,0),1, cv2.LINE_AA) 195 | else: 196 | cv2.putText(print_frame, 'Hand Closed',(50,350), font, 0.5, (0,200,0),1, cv2.LINE_AA) 197 | 198 | if print_frame != None: 199 | dpt = depth_frame 200 | cv2.imshow('Hand Filtered',print_frame) 201 | 202 | 203 | 204 | if cv2.waitKey(1) & 0xFF == ord('q'): 205 | break 206 | 207 | 208 | # --- Limit to 60 frames per second 209 | 210 | 211 | # Close our Kinect sensor, close the window and quit. 212 | self._kinect.close() 213 | 214 | 215 | 216 | HandGestureObject = HandGestureObjectClass(); 217 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Ash/tp: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | import math 9 | class HandGestureObjectClass(object): 10 | def __init__(self): 11 | 12 | # Kinect runtime object, we want only color and body frames 13 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 14 | 15 | # here we will store skeleton data 16 | self._bodies = None 17 | 18 | 19 | def neighbourhood(self, array, radius, seed): 20 | 21 | 22 | neighbour = np.array(array) 23 | neighbour *= 0 24 | 25 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 26 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 27 | return temp 28 | 29 | def merge(self, array_big, array_small, seed ): 30 | [a,b] = np.shape(array_small) 31 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 32 | return array_big 33 | 34 | def max_hist_depth(self, frame): 35 | #print 'FRAME_MAX = ' + str(frame.max()) 36 | binaries = int(frame.max()) 37 | if binaries <= 0: 38 | return 0 39 | histogram, bins = np.histogram(frame, bins = binaries) 40 | histogram = histogram.tolist(); bins = bins.tolist(); 41 | histogram[0 : 1] = [0, 0] 42 | max_hist = bins[histogram.index( max(histogram) )] 43 | return max_hist 44 | 45 | def max_area_contour(self, contours): 46 | max_area = 0 47 | ci = 0 48 | for i in range(len(contours)): 49 | cnt=contours[i] 50 | area = cv2.contourArea(cnt) 51 | if(area>max_area): 52 | max_area=area 53 | ci=i 54 | return contours[ci] 55 | 56 | def run(self): 57 | print_frame=None 58 | 59 | # -------- Main Program Loop ----------- 60 | while (True): 61 | # --- Main event loop 62 | print '0:in_main' 63 | 64 | if self._kinect.has_new_body_frame(): 65 | self._bodies = self._kinect.get_last_body_frame() 66 | 67 | if self._kinect.has_new_depth_frame(): 68 | depth_frame = self._kinect.get_last_depth_frame() 69 | 70 | depth_frame = np.array(depth_frame/16, dtype= np.uint8) 71 | depth_frame = depth_frame.reshape(424,512) 72 | cv2.imshow('depth_frame',depth_frame) 73 | 74 | i = 0 75 | if self._bodies is not None: 76 | # print "Bodies"+str(self._bodies) 77 | #first detected body taken 78 | 79 | body = self._bodies.bodies[i] 80 | if not body.is_tracked: 81 | i = i + 1 82 | print i 83 | continue 84 | 85 | joints = body.joints 86 | 87 | # convert joint coordinates to color space 88 | joint_points = self._kinect.body_joints_to_depth_space(joints) 89 | 90 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 91 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 92 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 93 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 94 | 95 | right_x = right_x if right_x < 424 else 423 96 | right_y = right_y if right_y < 512 else 511 97 | left_x = left_x if left_x < 424 else 423 98 | left_y = left_y if left_y < 512 else 511 99 | 100 | right_hand_depth = depth_frame[right_x,right_y] 101 | left_hand_depth = depth_frame[left_x,left_y] 102 | print 'ld:' + str(left_hand_depth)+'\trd:' + str(right_hand_depth) 103 | 104 | right_hand = [right_x,right_y] 105 | left_hand = [left_x,left_y] 106 | 107 | #print type(c) 108 | 109 | # radius_right =int( math.sqrt((int(joint_points[PyKinectV2.JointType_WristRight].x)-int(joint_points[PyKinectV2.JointType_HandTipRight].x))**2+(int(joint_points[PyKinectV2.JointType_WristRight].y)-int(joint_points[PyKinectV2.JointType_HandTipRight].y))**2))+1 110 | # radius_left =int( math.sqrt((int(joint_points[PyKinectV2.JointType_WristLeft].x)-int(joint_points[PyKinectV2.JointType_HandTipLeft].x))**2+(int(joint_points[PyKinectV2.JointType_WristLeft].y)-int(joint_points[PyKinectV2.JointType_HandTipLeft].y))**2))+1 111 | # print d 112 | 113 | radius_right = 50 114 | radius_left = 50 115 | 116 | if depth_frame != None: 117 | right_hand_filtered = self.neighbourhood(depth_frame,radius_right,right_hand) 118 | left_hand_filtered = self.neighbourhood(depth_frame,radius_left,left_hand) 119 | 120 | neighbour = np.array(depth_frame) 121 | neighbour *= 0 122 | 123 | print_frame = np.zeros(np.shape(depth_frame)) 124 | 125 | 126 | 127 | if right_hand_filtered != None: 128 | right = np.array(right_hand_filtered) 129 | # cv2.imwrite('pointer.png',right_hand_filtered) 130 | img1,contours1, hierarchy1 = cv2.findContours(right,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE) 131 | cnt = self.max_area_contour(contours1) 132 | x,y,w,h = cv2.boundingRect(cnt) 133 | 134 | right = np.array(self.neighbourhood(depth_frame,max(w,h),right_hand)) 135 | img1,contours1, hierarchy1 = cv2.findContours(right,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE) 136 | cnt = self.max_area_contour(contours1) 137 | 138 | hull = cv2.convexHull(cnt) 139 | # print hull 140 | # defects = cv2.convexityDefects(cnt,hull) 141 | # print defects.shape[0] 142 | drawing = np.zeros(right_hand_filtered.shape,np.uint8) 143 | drawing = cv2.drawContours(drawing,[cnt],0,150,1) 144 | drawing = cv2.drawContours(drawing,[hull],0,200,1) 145 | cv2.imshow('contours1',drawing) 146 | 147 | # img2 = cv2.imread('right_hand_filtered.png',0) 148 | # # img3 = cv2.imread('pointer.png',0) 149 | # # ret, thresh2 = cv2.threshold(img2, 127, 255,0) 150 | # im2,contours2,hierarchy2 = cv2.findContours(img2,2,1) 151 | # cnt2 = self.max_area_contour(contours2) 152 | 153 | # ret = cv2.matchShapes(cnt,cnt2,1,0.0) 154 | # # print ret 155 | # gesture = -1 156 | # if ret <= 0.1: 157 | # print 'Hand Closed' 158 | # gesture = 1 159 | # else: 160 | # print 'Hand Open' 161 | # gesture = 0 162 | 163 | 164 | # k = cv2.isContourConvex(cnt) 165 | # if k: 166 | # print 'convex' 167 | # # else: 168 | # print 'concave' 169 | 170 | right_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, right_hand_filtered,right_hand),depth_frame) 171 | 172 | ret,right_hand_filtered_depth_frame = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 173 | print_frame += right_hand_filtered_depth_frame 174 | 175 | 176 | if left_hand_filtered != None: 177 | 178 | 179 | left_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, left_hand_filtered, left_hand),depth_frame) 180 | ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 181 | print_frame += left_hand_filtered_depth_frame 182 | 183 | print_frame = np.array(print_frame, dtype = np.uint8) 184 | # print_frame = cv2.cvtColor(print_frame, cv2.COLOR_GRAY2RGB) 185 | 186 | # font = cv2.FONT_HERSHEY_SIMPLEX 187 | # cv2.putText(print_frame, 'Gesture:',(50,320), font, 0.5, (150,150,150),1, cv2.LINE_AA) 188 | # if gesture == 0: 189 | # cv2.putText(print_frame, 'Hand Open',(50,350), font, 0.5, (200,0,0),1, cv2.LINE_AA) 190 | # else: 191 | # cv2.putText(print_frame, 'Hand Closed',(50,350), font, 0.5, (0,200,0),1, cv2.LINE_AA) 192 | 193 | if print_frame != None: 194 | dpt = depth_frame 195 | cv2.imshow('Hand Filtered',print_frame) 196 | 197 | 198 | 199 | if cv2.waitKey(1) & 0xFF == ord('q'): 200 | break 201 | 202 | 203 | # --- Limit to 60 frames per second 204 | 205 | 206 | # Close our Kinect sensor, close the window and quit. 207 | self._kinect.close() 208 | 209 | 210 | 211 | HandGestureObject = HandGestureObjectClass(); 212 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/PyKinectBodyGame.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | 5 | import ctypes 6 | import _ctypes 7 | import pygame 8 | import sys 9 | 10 | if sys.hexversion >= 0x03000000: 11 | import _thread as thread 12 | else: 13 | import thread 14 | 15 | # colors for drawing different bodies 16 | SKELETON_COLORS = [pygame.color.THECOLORS["red"], 17 | pygame.color.THECOLORS["blue"], 18 | pygame.color.THECOLORS["green"], 19 | pygame.color.THECOLORS["orange"], 20 | pygame.color.THECOLORS["purple"], 21 | pygame.color.THECOLORS["yellow"], 22 | pygame.color.THECOLORS["violet"]] 23 | 24 | 25 | class BodyGameRuntime(object): 26 | def __init__(self): 27 | pygame.init() 28 | 29 | # Used to manage how fast the screen updates 30 | self._clock = pygame.time.Clock() 31 | 32 | # Set the width and height of the screen [width, height] 33 | self._infoObject = pygame.display.Info() 34 | self._screen = pygame.display.set_mode((self._infoObject.current_w >> 1, self._infoObject.current_h >> 1), 35 | pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32) 36 | 37 | pygame.display.set_caption("Kinect for Windows v2 Body Game") 38 | 39 | # Loop until the user clicks the close button. 40 | self._done = False 41 | 42 | # Used to manage how fast the screen updates 43 | self._clock = pygame.time.Clock() 44 | 45 | # Kinect runtime object, we want only color and body frames 46 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body) 47 | 48 | # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size 49 | self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32) 50 | 51 | # here we will store skeleton data 52 | self._bodies = None 53 | 54 | 55 | def draw_body_bone(self, joints, jointPoints, color, joint0, joint1): 56 | joint0State = joints[joint0].TrackingState; 57 | joint1State = joints[joint1].TrackingState; 58 | 59 | # both joints are not tracked 60 | if (joint0State == PyKinectV2.TrackingState_NotTracked) or (joint1State == PyKinectV2.TrackingState_NotTracked): 61 | return 62 | 63 | # both joints are not *really* tracked 64 | if (joint0State == PyKinectV2.TrackingState_Inferred) and (joint1State == PyKinectV2.TrackingState_Inferred): 65 | return 66 | 67 | # ok, at least one is good 68 | start = (jointPoints[joint0].x, jointPoints[joint0].y) 69 | end = (jointPoints[joint1].x, jointPoints[joint1].y) 70 | 71 | try: 72 | pygame.draw.line(self._frame_surface, color, start, end, 8) 73 | except: # need to catch it due to possible invalid positions (with inf) 74 | pass 75 | 76 | def draw_body(self, joints, jointPoints, color): 77 | # Torso 78 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_Head, PyKinectV2.JointType_Neck); 79 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_Neck, PyKinectV2.JointType_SpineShoulder); 80 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_SpineMid); 81 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineMid, PyKinectV2.JointType_SpineBase); 82 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderRight); 83 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderLeft); 84 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipRight); 85 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipLeft); 86 | 87 | # Right Arm 88 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ShoulderRight, PyKinectV2.JointType_ElbowRight); 89 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ElbowRight, PyKinectV2.JointType_WristRight); 90 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_HandRight); 91 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HandRight, PyKinectV2.JointType_HandTipRight); 92 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_ThumbRight); 93 | 94 | # Left Arm 95 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ShoulderLeft, PyKinectV2.JointType_ElbowLeft); 96 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ElbowLeft, PyKinectV2.JointType_WristLeft); 97 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_HandLeft); 98 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HandLeft, PyKinectV2.JointType_HandTipLeft); 99 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_ThumbLeft); 100 | 101 | # Right Leg 102 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HipRight, PyKinectV2.JointType_KneeRight); 103 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_KneeRight, PyKinectV2.JointType_AnkleRight); 104 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_AnkleRight, PyKinectV2.JointType_FootRight); 105 | 106 | # Left Leg 107 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HipLeft, PyKinectV2.JointType_KneeLeft); 108 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_KneeLeft, PyKinectV2.JointType_AnkleLeft); 109 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_AnkleLeft, PyKinectV2.JointType_FootLeft); 110 | 111 | 112 | def draw_color_frame(self, frame, target_surface): 113 | target_surface.lock() 114 | address = self._kinect.surface_as_array(target_surface.get_buffer()) 115 | ctypes.memmove(address, frame.ctypes.data, frame.size) 116 | del address 117 | target_surface.unlock() 118 | 119 | def run(self): 120 | # -------- Main Program Loop ----------- 121 | while not self._done: 122 | # --- Main event loop 123 | for event in pygame.event.get(): # User did something 124 | if event.type == pygame.QUIT: # If user clicked close 125 | self._done = True # Flag that we are done so we exit this loop 126 | 127 | elif event.type == pygame.VIDEORESIZE: # window resized 128 | self._screen = pygame.display.set_mode(event.dict['size'], 129 | pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32) 130 | 131 | # --- Game logic should go here 132 | 133 | # --- Getting frames and drawing 134 | # --- Woohoo! We've got a color frame! Let's fill out back buffer surface with frame's data 135 | if self._kinect.has_new_color_frame(): 136 | frame = self._kinect.get_last_color_frame() 137 | self.draw_color_frame(frame, self._frame_surface) 138 | frame = None 139 | 140 | # --- Cool! We have a body frame, so can get skeletons 141 | if self._kinect.has_new_body_frame(): 142 | self._bodies = self._kinect.get_last_body_frame() 143 | 144 | # --- draw skeletons to _frame_surface 145 | if self._bodies is not None: 146 | for i in range(0, self._kinect.max_body_count): 147 | body = self._bodies.bodies[i] 148 | if not body.is_tracked: 149 | continue 150 | 151 | joints = body.joints 152 | # convert joint coordinates to color space 153 | joint_points = self._kinect.body_joints_to_color_space(joints) 154 | self.draw_body(joints, joint_points, SKELETON_COLORS[i]) 155 | 156 | # --- copy back buffer surface pixels to the screen, resize it if needed and keep aspect ratio 157 | # --- (screen size may be different from Kinect's color frame size) 158 | h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width() 159 | target_height = int(h_to_w * self._screen.get_width()) 160 | surface_to_draw = pygame.transform.scale(self._frame_surface, (self._screen.get_width(), target_height)); 161 | self._screen.blit(surface_to_draw, (0,0)) 162 | surface_to_draw = None 163 | pygame.display.update() 164 | 165 | # --- Go ahead and update the screen with what we've drawn. 166 | pygame.display.flip() 167 | 168 | # --- Limit to 60 frames per second 169 | self._clock.tick(60) 170 | 171 | # Close our Kinect sensor, close the window and quit. 172 | self._kinect.close() 173 | pygame.quit() 174 | 175 | 176 | __main__ = "Kinect v2 Body Game" 177 | game = BodyGameRuntime(); 178 | game.run(); 179 | 180 | -------------------------------------------------------------------------------- /Documentation/MATLAB_approach: -------------------------------------------------------------------------------- 1 | # MATLAB APPROACH # 2 | -SUBTRACT 3 | -DENOISE(EROSION) 4 | -XOR WITH IMAGE 5 | -HISTOGRAM 6 | -MAX VALUE=DEPTH OF HAND 7 | -FILL REGION WITH TOLERANCE 8 | -------------------------------------------------------------------------------- /Documentation/gestures: -------------------------------------------------------------------------------- 1 | ::static gestures:: 2 | 1]hand open 3 | 2]hand closed 4 | 3]peace 5 | 4]thumbs up 6 | 7 | ::single hand motion:: 8 | 1]grab 9 | 2]pinch to zoom 10 | 3]rotate 11 | 4]wave to shuffle 12 | 5]drag and drop 13 | 6]tap select 14 | 15 | ::double hand motion:: 16 | 1]double hand zoom 17 | 2] -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.1/PyKinectRuntime.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | 4 | import ctypes 5 | import _ctypes 6 | from _ctypes import COMError 7 | import comtypes 8 | import sys 9 | import numpy 10 | import time 11 | 12 | import importlib 13 | 14 | if sys.hexversion >= 0x03000000: 15 | import _thread as thread 16 | else: 17 | import thread 18 | 19 | KINECT_MAX_BODY_COUNT = 6 20 | 21 | class PyKinectRuntime(object): 22 | """manages Kinect objects and simplifying access to them""" 23 | def __init__(self, frame_source_types): 24 | # recipe to get address of surface: http://archives.seul.org/pygame/users/Apr-2008/msg00218.html 25 | is_64bits = sys.maxsize > 2**32 26 | if not is_64bits: 27 | self.Py_ssize_t = ctypes.c_int 28 | else: 29 | self.Py_ssize_t = ctypes.c_int64 30 | 31 | self._PyObject_AsWriteBuffer = ctypes.pythonapi.PyObject_AsWriteBuffer 32 | self._PyObject_AsWriteBuffer.restype = ctypes.c_int 33 | self._PyObject_AsWriteBuffer.argtypes = [ctypes.py_object, 34 | ctypes.POINTER(ctypes.c_void_p), 35 | ctypes.POINTER(self.Py_ssize_t)] 36 | 37 | #self._color_frame_ready = PyKinectV2._event() 38 | #self._depth_frame_ready = PyKinectV2._event() 39 | #self._body_frame_ready = PyKinectV2._event() 40 | #self._body_index_frame_ready = PyKinectV2._event() 41 | #self._infrared_frame_ready = PyKinectV2._event() 42 | #self._long_exposure_infrared_frame_ready = PyKinectV2._event() 43 | #self._audio_frame_ready = PyKinectV2._event() 44 | 45 | self._close_event = ctypes.windll.kernel32.CreateEventW(None, False, False, None) 46 | 47 | self._color_frame_arrived_event = 0 48 | self._depth_frame_arrived_event = 0 49 | self._body_frame_arrived_event = 0 50 | self._body_index_frame_arrived_event = 0 51 | self._infrared_frame_arrived_event = 0 52 | self._long_exposure_infrared_frame_arrived_event = 0 53 | self._audio_frame_arrived_event = 0 54 | 55 | self._color_frame_lock = thread.allocate() 56 | self._depth_frame_lock = thread.allocate() 57 | self._body_frame_lock = thread.allocate() 58 | self._body_index_frame_lock = thread.allocate() 59 | self._infrared_frame_lock = thread.allocate() 60 | self._long_exposure_infrared_frame_lock = thread.allocate() 61 | self._audio_frame_lock = thread.allocate() 62 | 63 | #initialize sensor 64 | self._sensor = ctypes.POINTER(PyKinectV2.IKinectSensor)() 65 | hres = ctypes.windll.kinect20.GetDefaultKinectSensor(ctypes.byref(self._sensor)) 66 | hres = self._sensor.Open() 67 | 68 | self._mapper = self._sensor.CoordinateMapper 69 | 70 | self.frame_source_types = frame_source_types 71 | self.max_body_count = KINECT_MAX_BODY_COUNT 72 | 73 | self._handles = (ctypes.c_voidp * 8)() 74 | self._handles[0] = self._close_event 75 | self._handles[1] = self._close_event 76 | self._handles[2] = self._close_event 77 | self._handles[3] = self._close_event 78 | self._handles[4] = self._close_event 79 | self._handles[5] = self._close_event 80 | self._handles[6] = self._close_event 81 | self._handles[7] = self._close_event 82 | 83 | self._waitHandleCount = 1 84 | 85 | self._color_source = self._sensor.ColorFrameSource 86 | self.color_frame_desc = self._color_source.FrameDescription 87 | self._depth_source = self._sensor.DepthFrameSource 88 | self.depth_frame_desc = self._depth_source.FrameDescription 89 | self._body_index_source = self._sensor.BodyIndexFrameSource 90 | self.body_index_frame_desc = self._body_index_source.FrameDescription 91 | self._body_source = self._sensor.BodyFrameSource 92 | self._body_frame_data = ctypes.POINTER(ctypes.POINTER(IBody)) 93 | self.max_body_count = self._body_source.BodyCount 94 | 95 | #Changed:{ 96 | self._infrared_source = self._sensor.InfraredFrameSource 97 | self.infrared_frame_desc = self._infrared_source.FrameDescription 98 | #Changed:} 99 | 100 | #Changed:{ 101 | self._long_exposure_infrared_source = self._sensor.LongExposureInfraredFrameSource 102 | self._long_exposure_infrared_frame_desc = self._long_exposure_infrared_source.FrameDescription 103 | #Changed:} 104 | 105 | self._color_frame_data = None 106 | self._depth_frame_data = None 107 | self._body_frame_data = None 108 | self._body_index_frame_data = None 109 | self._infrared_frame_data = None 110 | self._long_exposure_infrared_frame_data = None 111 | self._audio_frame_data = None 112 | 113 | if(self.frame_source_types & FrameSourceTypes_Color): 114 | self._color_frame_data = ctypes.POINTER(ctypes.c_ubyte) 115 | self._color_frame_data_capacity = ctypes.c_uint(self.color_frame_desc.Width * self.color_frame_desc.Height * 4) 116 | self._color_frame_data_type = ctypes.c_ubyte * self._color_frame_data_capacity.value 117 | self._color_frame_data = ctypes.cast(self._color_frame_data_type(), ctypes.POINTER(ctypes.c_ubyte)) 118 | self._color_frame_reader = self._color_source.OpenReader() 119 | self._color_frame_arrived_event = self._color_frame_reader.SubscribeFrameArrived() 120 | self._handles[self._waitHandleCount] = self._color_frame_arrived_event 121 | self._waitHandleCount += 1 122 | 123 | if(self.frame_source_types & FrameSourceTypes_Depth): 124 | self._depth_frame_data = ctypes.POINTER(ctypes.c_ushort) 125 | self._depth_frame_data_capacity = ctypes.c_uint(self.depth_frame_desc.Width * self.depth_frame_desc.Height) 126 | self._depth_frame_data_type = ctypes.c_ushort * self._depth_frame_data_capacity.value 127 | self._depth_frame_data = ctypes.cast(self._depth_frame_data_type(), ctypes.POINTER(ctypes.c_ushort)) 128 | self._depth_frame_reader = self._depth_source.OpenReader() 129 | self._depth_frame_arrived_event = self._depth_frame_reader.SubscribeFrameArrived() 130 | self._handles[self._waitHandleCount] = self._depth_frame_arrived_event 131 | self._waitHandleCount += 1 132 | 133 | #Changed:{ 134 | if(self.frame_source_types & FrameSourceTypes_Infrared): 135 | self._infrared_frame_data = ctypes.POINTER(ctypes.c_ushort) 136 | self._infrared_frame_data_capacity = ctypes.c_uint(self.infrared_frame_desc.Width * self.infrared_frame_desc.Height) 137 | self._infrared_frame_data_type = ctypes.c_ushort * self._infrared_frame_data_capacity.value 138 | self._infrared_frame_data = ctypes.cast(self._infrared_frame_data_type(), ctypes.POINTER(ctypes.c_ushort)) 139 | self._infrared_frame_reader = self._infrared_source.OpenReader() 140 | self._infrared_frame_arrived_event = self._infrared_frame_reader.SubscribeFrameArrived() 141 | self._handles[self._waitHandleCount] = self._infrared_frame_arrived_event 142 | self._waitHandleCount += 1 143 | #Changed:} 144 | 145 | #Changed:{ 146 | if(self.frame_source_types & FrameSourceTypes_LongExposureInfrared): 147 | self._long_exposure_infrared_frame_data = ctypes.POINTER(ctypes.c_ushort) 148 | self._long_exposure_infrared_frame_data_capacity = ctypes.c_uint(self.infrared_frame_desc.Width * self.infrared_frame_desc.Height) 149 | self._long_exposure_infrared_frame_data_type = ctypes.c_ushort * self._long_exposure_infrared_frame_data_capacity.value 150 | self._long_exposure_infrared_frame_data = ctypes.cast(self._long_exposure_infrared_frame_data_type(), ctypes.POINTER(ctypes.c_ushort)) 151 | self._long_exposure_infrared_frame_reader = self._long_exposure_infrared_source.OpenReader() 152 | self._long_exposure_infrared_frame_arrived_event = self._long_exposure_infrared_frame_reader.SubscribeFrameArrived() 153 | self._handles[self._waitHandleCount] = self._long_exposure_infrared_frame_arrived_event 154 | self._waitHandleCount += 1 155 | #Changed:} 156 | 157 | if(self.frame_source_types & FrameSourceTypes_BodyIndex): 158 | self._body_index_frame_data = ctypes.POINTER(ctypes.c_ubyte) 159 | self._body_index_frame_data_capacity = ctypes.c_uint(self.body_index_frame_desc.Width * self.body_index_frame_desc.Height) 160 | self._body_index_frame_data_type = ctypes.c_ubyte * self._body_index_frame_data_capacity.value 161 | self._body_index_frame_data = ctypes.cast(self._body_index_frame_data_type(), ctypes.POINTER(ctypes.c_ubyte)) 162 | self._body_index_frame_reader = self._body_index_source.OpenReader() 163 | self._body_index_frame_arrived_event = self._body_index_frame_reader.SubscribeFrameArrived() 164 | self._handles[self._waitHandleCount] = self._body_index_frame_arrived_event 165 | self._waitHandleCount += 1 166 | 167 | self._body_frame_data = None 168 | if(self.frame_source_types & FrameSourceTypes_Body): 169 | self._body_frame_data_capacity = ctypes.c_uint(self.max_body_count) 170 | self._body_frame_data_type = ctypes.POINTER(IBody) * self._body_frame_data_capacity.value 171 | self._body_frame_data = ctypes.cast(self._body_frame_data_type(), ctypes.POINTER(ctypes.POINTER(IBody))) 172 | self._body_frame_reader = self._body_source.OpenReader() 173 | self._body_frame_arrived_event = self._body_frame_reader.SubscribeFrameArrived() 174 | self._body_frame_bodies = None 175 | self._handles[self._waitHandleCount] = self._body_frame_arrived_event 176 | self._waitHandleCount += 1 177 | 178 | thread.start_new_thread(self.kinect_frame_thread, ()) 179 | 180 | self._last_color_frame = None 181 | self._last_depth_frame = None 182 | self._last_body_frame = None 183 | self._last_body_index_frame = None 184 | self._last_infrared_frame = None 185 | self._last_long_exposure_infrared_frame = None 186 | self._last_audio_frame = None 187 | 188 | start_clock = time.clock() 189 | self._last_color_frame_access = self._last_color_frame_time = start_clock 190 | self._last_body_frame_access = self._last_body_frame_time = start_clock 191 | self._last_body_index_frame_access = self._last_body_index_frame_time = start_clock 192 | self._last_depth_frame_access = self._last_depth_frame_time = start_clock 193 | self._last_infrared_frame_access = self._last_infrared_frame_time = start_clock 194 | self._last_long_exposure_infrared_frame_access = self._last_long_exposure_infrared_frame_time = start_clock 195 | self._last_audio_frame_access = self._last_audio_frame_time = start_clock 196 | 197 | def close(self): 198 | if self._sensor is not None: 199 | ctypes.windll.kernel32.SetEvent(self._close_event) 200 | ctypes.windll.kernel32.CloseHandle(self._close_event) 201 | 202 | self._color_frame_reader = None 203 | self._depth_frame_reader = None 204 | self._body_index_frame_reader = None 205 | self._body_frame_reader = None 206 | self._infrared_frame_reader = None 207 | 208 | self._color_source = None 209 | self._depth_source = None 210 | self._body_index_source = None 211 | self._body_source = None 212 | self._infrared_source = None 213 | 214 | self._body_frame_data = None 215 | 216 | self._sensor.Close() 217 | self._sensor = None 218 | 219 | def __del__(self): 220 | self.close() 221 | 222 | def __enter__(self): 223 | return self 224 | 225 | def __exit__(self, *args): 226 | self.close() 227 | 228 | def surface_as_array(self, surface_buffer_interface): 229 | address = ctypes.c_void_p() 230 | size = self.Py_ssize_t() 231 | self._PyObject_AsWriteBuffer(surface_buffer_interface, 232 | ctypes.byref(address), ctypes.byref(size)) 233 | bytes = (ctypes.c_byte * size.value).from_address(address.value) 234 | bytes.object = surface_buffer_interface 235 | return bytes 236 | 237 | def has_new_color_frame(self): 238 | has = (self._last_color_frame_time > self._last_color_frame_access) 239 | return has 240 | 241 | def has_new_depth_frame(self): 242 | has = (self._last_depth_frame_time > self._last_depth_frame_access) 243 | return has 244 | 245 | def has_new_body_frame(self): 246 | has = (self._last_body_frame_time > self._last_body_frame_access) 247 | return has 248 | 249 | def has_new_body_index_frame(self): 250 | has = (self._last_body_index_frame_time > self._last_body_index_frame_access) 251 | return has 252 | 253 | def has_new_infrared_frame(self): 254 | has = (self._last_infrared_frame_time > self._last_infrared_frame_access) 255 | return has 256 | 257 | def has_new_long_exposure_infrared_frame(self): 258 | has = (self._last_long_exposure_infrared_frame_time > self._last_long_exposure_infrared_frame_access) 259 | return has 260 | 261 | def has_new_audio_frame(self): 262 | has = (self._last_audio_frame_time > self._last_audio_frame_access) 263 | return has 264 | 265 | 266 | def get_last_color_frame(self): 267 | with self._color_frame_lock: 268 | if self._color_frame_data is not None: 269 | data = numpy.copy(numpy.ctypeslib.as_array(self._color_frame_data, shape=(self._color_frame_data_capacity.value,))) 270 | _last_color_frame_access = time.clock() 271 | return data 272 | else: 273 | return None 274 | 275 | def get_last_depth_frame(self): 276 | with self._depth_frame_lock: 277 | if self._depth_frame_data is not None: 278 | data = numpy.copy(numpy.ctypeslib.as_array(self._depth_frame_data, shape=(self._depth_frame_data_capacity.value,))) 279 | _last_color_frame_access = time.clock() 280 | return data 281 | else: 282 | return None 283 | 284 | #Changed:{ 285 | def get_last_infrared_frame(self): 286 | with self._infrared_frame_lock: 287 | if self._infrared_frame_data is not None: 288 | data = numpy.copy(numpy.ctypeslib.as_array(self._infrared_frame_data, shape=(self._infrared_frame_data_capacity.value,))) 289 | _last_infrared_frame_access = time.clock() 290 | return data 291 | else: 292 | return None 293 | #Changed:} 294 | 295 | #Changed:{ 296 | def get_last_long_exposure_infrared_frame(self): 297 | with self._long_exposure_infrared_frame_lock: 298 | if self._long_exposure_infrared_frame_data is not None: 299 | data = numpy.copy(numpy.ctypeslib.as_array(self._long_exposure_infrared_frame_data, shape=(self._long_exposure_infrared_frame_data_capacity.value,))) 300 | _last_long_exposure_infrared_frame_access = time.clock() 301 | return data 302 | else: 303 | return None 304 | #Changed:} 305 | 306 | def get_last_body_index_frame(self): 307 | with self._body_index_frame_lock: 308 | if self._body_index_frame_data is not None: 309 | data = numpy.copy(numpy.ctypeslib.as_array(self._body_index_frame_data, shape=(self._body_index_frame_data_capacity.value,))) 310 | _last_color_frame_access = time.clock() 311 | return data 312 | else: 313 | return None 314 | 315 | def get_last_body_frame(self): 316 | with self._body_frame_lock: 317 | if self._body_frame_bodies is not None: 318 | _last_body_frame_access = time.clock() 319 | return self._body_frame_bodies.copy() 320 | else: 321 | return None 322 | 323 | 324 | def body_joint_to_color_space(self, joint): 325 | return self._mapper.MapCameraPointToColorSpace(joint.Position) 326 | 327 | 328 | def body_joints_to_color_space(self, joints): 329 | joint_points = numpy.ndarray((PyKinectV2.JointType_Count), dtype=numpy.object) 330 | 331 | for j in range(0, PyKinectV2.JointType_Count): 332 | joint_points[j] = self.body_joint_to_color_space(joints[j]) 333 | 334 | return joint_points 335 | 336 | #Changed:{ 337 | def body_joint_to_depth_space(self,joint): 338 | return self._mapper.MapCameraPointToDepthSpace(joint.Position) 339 | 340 | def body_joints_to_depth_space(self, joints): 341 | joint_points = numpy.ndarray((PyKinectV2.JointType_Count), dtype=numpy.object) 342 | 343 | for j in range(0, PyKinectV2.JointType_Count): 344 | joint_points[j] = self.body_joint_to_depth_space(joints[j]) 345 | 346 | return joint_points 347 | #Changed:} 348 | 349 | def kinect_frame_thread(self): 350 | while 1: 351 | wait = ctypes.windll.kernel32.WaitForMultipleObjects(self._waitHandleCount, self._handles, False, PyKinectV2._INFINITE) 352 | 353 | if wait == 0: 354 | break 355 | 356 | if self._handles[wait] == self._color_frame_arrived_event: 357 | self.handle_color_arrived(wait) 358 | elif self._handles[wait] == self._depth_frame_arrived_event: 359 | self.handle_depth_arrived(wait) 360 | elif self._handles[wait] == self._body_frame_arrived_event: 361 | self.handle_body_arrived(wait) 362 | elif self._handles[wait] == self._body_index_frame_arrived_event: 363 | self.handle_body_index_arrived(wait) 364 | elif self._handles[wait] == self._infrared_frame_arrived_event: 365 | self.handle_infrared_arrived(wait) 366 | elif self._handles[wait] == self._long_exposure_infrared_frame_arrived_event: 367 | self.handle_long_exposure_infrared_arrived(wait) 368 | elif self._handles[wait] == self._audio_frame_arrived_event: 369 | self.handle_audio_arrived(wait) 370 | else: 371 | break 372 | 373 | 374 | def handle_color_arrived(self, handle_index): 375 | colorFrameEventData = self._color_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 376 | colorFrameRef = colorFrameEventData.FrameReference 377 | try: 378 | colorFrame = colorFrameRef.AcquireFrame() 379 | try: 380 | with self._color_frame_lock: 381 | colorFrame.CopyConvertedFrameDataToArray(self._color_frame_data_capacity, self._color_frame_data, PyKinectV2.ColorImageFormat_Bgra) 382 | self._last_color_frame_time = time.clock() 383 | except: 384 | pass 385 | colorFrame = None 386 | except: 387 | pass 388 | colorFrameRef = None 389 | colorFrameEventData = None 390 | 391 | 392 | def handle_depth_arrived(self, handle_index): 393 | depthFrameEventData = self._depth_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 394 | depthFrameRef = depthFrameEventData.FrameReference 395 | try: 396 | depthFrame = depthFrameRef.AcquireFrame() 397 | try: 398 | with self._depth_frame_lock: 399 | depthFrame.CopyFrameDataToArray(self._depth_frame_data_capacity, self._depth_frame_data) 400 | self._last_depth_frame_time = time.clock() 401 | except: 402 | pass 403 | depthFrame = None 404 | except: 405 | pass 406 | depthFrameRef = None 407 | depthFrameEventData = None 408 | 409 | 410 | def handle_body_arrived(self, handle_index): 411 | bodyFrameEventData = self._body_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 412 | bofyFrameRef = bodyFrameEventData.FrameReference 413 | try: 414 | bodyFrame = bofyFrameRef.AcquireFrame() 415 | 416 | try: 417 | with self._body_frame_lock: 418 | bodyFrame.GetAndRefreshBodyData(self._body_frame_data_capacity, self._body_frame_data) 419 | self._body_frame_bodies = KinectBodyFrameData(bodyFrame, self._body_frame_data, self.max_body_count) 420 | self._last_body_frame_time = time.clock() 421 | 422 | # need these 2 lines as a workaround for handling IBody referencing exception 423 | self._body_frame_data = None 424 | self._body_frame_data = ctypes.cast(self._body_frame_data_type(), ctypes.POINTER(ctypes.POINTER(IBody))) 425 | 426 | except: 427 | pass 428 | 429 | bodyFrame = None 430 | except: 431 | pass 432 | bofyFrameRef = None 433 | bodyFrameEventData = None 434 | 435 | 436 | def handle_body_index_arrived(self, handle_index): 437 | bodyIndexFrameEventData = self._body_index_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 438 | bodyIndexFrameRef = bodyIndexFrameEventData.FrameReference 439 | try: 440 | bodyIndexFrame = bodyIndexFrameRef.AcquireFrame() 441 | try: 442 | with self._body_index_frame_lock: 443 | bodyIndexFrame.CopyFrameDataToArray(self._body_index_frame_data_capacity, self._body_index_frame_data) 444 | self._last_body_index_frame_time = time.clock() 445 | except: 446 | pass 447 | bodyIndexFrame = None 448 | except: 449 | pass 450 | bodyIndexFrame = None 451 | bodyIndexFrameEventData = None 452 | 453 | 454 | #Changed:{ 455 | def handle_infrared_arrived(self, handle_index): 456 | infraredFrameEventData = self._infrared_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 457 | infraredFrameRef = infraredFrameEventData.FrameReference 458 | try: 459 | infraredFrame = infraredFrameRef.AcquireFrame() 460 | try: 461 | with self._infrared_frame_lock: 462 | infraredFrame.CopyFrameDataToArray(self._infrared_frame_data_capacity, self._infrared_frame_data) 463 | self._last_infrared_frame_time = time.clock() 464 | except: 465 | pass 466 | infraredFrame = None 467 | except: 468 | pass 469 | infraredFrameRef = None 470 | infraredFrameEventData = None 471 | #Changed:} 472 | 473 | #Changed:{ 474 | def handle_long_exposure_infrared_arrived(self, handle_index): 475 | longExposureInfraredFrameEventData = self._long_exposure_infrared_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 476 | longExposureInfraredFrameRef =longExposureInfraredFrameEventData.FrameReference 477 | try: 478 | longExposureInfraredFrame =longExposureInfraredFrameRef.AcquireFrame() 479 | try: 480 | with self._long_exposure_infrared_frame_lock: 481 | longExposureInfraredFrame.CopyFrameDataToArray(self._long_exposure_infrared_frame_data_capacity, self._long_exposure_infrared_frame_data) 482 | self._last_long_exposure_infrared_frame_time = time.clock() 483 | except: 484 | pass 485 | longExposureInfraredFrame = None 486 | except: 487 | pass 488 | longExposureInfraredFrameRef = None 489 | longExposureInfraredFrameEventData = None 490 | #Changed:} 491 | 492 | def handle_audio_arrived(self, handle_index): 493 | pass 494 | 495 | 496 | 497 | class KinectBody(object): 498 | def __init__(self, body = None): 499 | self.is_restricted = False 500 | self.tracking_id = -1 501 | 502 | self.is_tracked = False 503 | 504 | if body is not None: 505 | self.is_tracked = body.IsTracked 506 | 507 | if self.is_tracked: 508 | self.is_restricted = body.IsRestricted 509 | self.tracking_id = body.TrackingId 510 | self.engaged = body.Engaged 511 | self.lean = body.Lean 512 | self.lean_tracking_state = body.LeanTrackingState 513 | self.hand_left_state = body.HandLeftState 514 | self.hand_left_confidence = body.HandLeftConfidence 515 | self.hand_right_state = body.HandRightState 516 | self.hand_right_confidence = body.HandRightConfidence 517 | self.clipped_edges = body.ClippedEdges 518 | 519 | joints = ctypes.POINTER(PyKinectV2._Joint) 520 | joints_capacity = ctypes.c_uint(PyKinectV2.JointType_Count) 521 | joints_data_type = PyKinectV2._Joint * joints_capacity.value 522 | joints = ctypes.cast(joints_data_type(), ctypes.POINTER(PyKinectV2._Joint)) 523 | body.GetJoints(PyKinectV2.JointType_Count, joints) 524 | self.joints = joints 525 | 526 | joint_orientations = ctypes.POINTER(PyKinectV2._JointOrientation) 527 | joint_orientations_data_type = PyKinectV2._JointOrientation * joints_capacity.value 528 | joint_orientations = ctypes.cast(joint_orientations_data_type(), ctypes.POINTER(PyKinectV2._JointOrientation)) 529 | body.GetJointOrientations(PyKinectV2.JointType_Count, joint_orientations) 530 | self.joint_orientations = joint_orientations 531 | 532 | 533 | class KinectBodyFrameData(object): 534 | def __init__(self, bodyFrame, body_frame_data, max_body_count): 535 | self.bodies = None 536 | self.floor_clip_plane = None 537 | if bodyFrame is not None: 538 | self.floor_clip_plane = bodyFrame.FloorClipPlane 539 | 540 | self.bodies = numpy.ndarray((max_body_count), dtype=numpy.object) 541 | for i in range(0, max_body_count): 542 | self.bodies[i] = KinectBody(body_frame_data[i]) 543 | 544 | def copy(self): 545 | res = KinectBodyFrameData(None, None, 0) 546 | res.floor_clip_plane = self.floor_clip_plane 547 | res.bodies = numpy.copy(self.bodies) 548 | return res 549 | 550 | 551 | -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.1/PyKinectRuntime.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Drivers/pykinect2_v1.0.1/PyKinectRuntime.pyc -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.1/PyKinectV2.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Drivers/pykinect2_v1.0.1/PyKinectV2.pyc -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Drivers/pykinect2_v1.0.1/__init__.py -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.1/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Drivers/pykinect2_v1.0.1/__init__.pyc -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.2/PyKinectRuntime.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | 4 | import ctypes 5 | import _ctypes 6 | from _ctypes import COMError 7 | import comtypes 8 | import sys 9 | import numpy 10 | import time 11 | 12 | import importlib 13 | 14 | if sys.hexversion >= 0x03000000: 15 | import _thread as thread 16 | else: 17 | import thread 18 | 19 | KINECT_MAX_BODY_COUNT = 6 20 | 21 | class PyKinectRuntime(object): 22 | """manages Kinect objects and simplifying access to them""" 23 | def __init__(self, frame_source_types): 24 | # recipe to get address of surface: http://archives.seul.org/pygame/users/Apr-2008/msg00218.html 25 | is_64bits = sys.maxsize > 2**32 26 | if not is_64bits: 27 | self.Py_ssize_t = ctypes.c_int 28 | else: 29 | self.Py_ssize_t = ctypes.c_int64 30 | 31 | self._PyObject_AsWriteBuffer = ctypes.pythonapi.PyObject_AsWriteBuffer 32 | self._PyObject_AsWriteBuffer.restype = ctypes.c_int 33 | self._PyObject_AsWriteBuffer.argtypes = [ctypes.py_object, 34 | ctypes.POINTER(ctypes.c_void_p), 35 | ctypes.POINTER(self.Py_ssize_t)] 36 | 37 | #self._color_frame_ready = PyKinectV2._event() 38 | #self._depth_frame_ready = PyKinectV2._event() 39 | #self._body_frame_ready = PyKinectV2._event() 40 | #self._body_index_frame_ready = PyKinectV2._event() 41 | #self._infrared_frame_ready = PyKinectV2._event() 42 | #self._long_exposure_infrared_frame_ready = PyKinectV2._event() 43 | #self._audio_frame_ready = PyKinectV2._event() 44 | 45 | self._close_event = ctypes.windll.kernel32.CreateEventW(None, False, False, None) 46 | 47 | self._color_frame_arrived_event = 0 48 | self._depth_frame_arrived_event = 0 49 | self._body_frame_arrived_event = 0 50 | self._body_index_frame_arrived_event = 0 51 | self._infrared_frame_arrived_event = 0 52 | self._long_exposure_infrared_frame_arrived_event = 0 53 | self._audio_frame_arrived_event = 0 54 | 55 | self._color_frame_lock = thread.allocate() 56 | self._depth_frame_lock = thread.allocate() 57 | self._body_frame_lock = thread.allocate() 58 | self._body_index_frame_lock = thread.allocate() 59 | self._infrared_frame_lock = thread.allocate() 60 | self._long_exposure_infrared_frame_lock = thread.allocate() 61 | self._audio_frame_lock = thread.allocate() 62 | 63 | #initialize sensor 64 | self._sensor = ctypes.POINTER(PyKinectV2.IKinectSensor)() 65 | hres = ctypes.windll.kinect20.GetDefaultKinectSensor(ctypes.byref(self._sensor)) 66 | hres = self._sensor.Open() 67 | 68 | self._mapper = self._sensor.CoordinateMapper 69 | 70 | self.frame_source_types = frame_source_types 71 | self.max_body_count = KINECT_MAX_BODY_COUNT 72 | 73 | self._handles = (ctypes.c_voidp * 8)() 74 | self._handles[0] = self._close_event 75 | self._handles[1] = self._close_event 76 | self._handles[2] = self._close_event 77 | self._handles[3] = self._close_event 78 | self._handles[4] = self._close_event 79 | self._handles[5] = self._close_event 80 | self._handles[6] = self._close_event 81 | self._handles[7] = self._close_event 82 | 83 | self._waitHandleCount = 1 84 | 85 | self._color_source = self._sensor.ColorFrameSource 86 | self.color_frame_desc = self._color_source.FrameDescription 87 | self._depth_source = self._sensor.DepthFrameSource 88 | self.depth_frame_desc = self._depth_source.FrameDescription 89 | self._body_index_source = self._sensor.BodyIndexFrameSource 90 | self.body_index_frame_desc = self._body_index_source.FrameDescription 91 | self._body_source = self._sensor.BodyFrameSource 92 | self._body_frame_data = ctypes.POINTER(ctypes.POINTER(IBody)) 93 | self.max_body_count = self._body_source.BodyCount 94 | 95 | #Changed:{ 96 | self._infrared_source = self._sensor.InfraredFrameSource 97 | self.infrared_frame_desc = self._infrared_source.FrameDescription 98 | #Changed:} 99 | 100 | #Changed:{ 101 | self._long_exposure_infrared_source = self._sensor.LongExposureInfraredFrameSource 102 | self._long_exposure_infrared_frame_desc = self._long_exposure_infrared_source.FrameDescription 103 | #Changed:} 104 | 105 | self._color_frame_data = None 106 | self._depth_frame_data = None 107 | self._body_frame_data = None 108 | self._body_index_frame_data = None 109 | self._infrared_frame_data = None 110 | self._long_exposure_infrared_frame_data = None 111 | self._audio_frame_data = None 112 | 113 | if(self.frame_source_types & FrameSourceTypes_Color): 114 | self._color_frame_data = ctypes.POINTER(ctypes.c_ubyte) 115 | self._color_frame_data_capacity = ctypes.c_uint(self.color_frame_desc.Width * self.color_frame_desc.Height * 4) 116 | self._color_frame_data_type = ctypes.c_ubyte * self._color_frame_data_capacity.value 117 | self._color_frame_data = ctypes.cast(self._color_frame_data_type(), ctypes.POINTER(ctypes.c_ubyte)) 118 | self._color_frame_reader = self._color_source.OpenReader() 119 | self._color_frame_arrived_event = self._color_frame_reader.SubscribeFrameArrived() 120 | self._handles[self._waitHandleCount] = self._color_frame_arrived_event 121 | self._waitHandleCount += 1 122 | 123 | if(self.frame_source_types & FrameSourceTypes_Depth): 124 | self._depth_frame_data = ctypes.POINTER(ctypes.c_ushort) 125 | self._depth_frame_data_capacity = ctypes.c_uint(self.depth_frame_desc.Width * self.depth_frame_desc.Height) 126 | self._depth_frame_data_type = ctypes.c_ushort * self._depth_frame_data_capacity.value 127 | self._depth_frame_data = ctypes.cast(self._depth_frame_data_type(), ctypes.POINTER(ctypes.c_ushort)) 128 | self._depth_frame_reader = self._depth_source.OpenReader() 129 | self._depth_frame_arrived_event = self._depth_frame_reader.SubscribeFrameArrived() 130 | self._handles[self._waitHandleCount] = self._depth_frame_arrived_event 131 | self._waitHandleCount += 1 132 | 133 | #Changed:{ 134 | if(self.frame_source_types & FrameSourceTypes_Infrared): 135 | self._infrared_frame_data = ctypes.POINTER(ctypes.c_ushort) 136 | self._infrared_frame_data_capacity = ctypes.c_uint(self.infrared_frame_desc.Width * self.infrared_frame_desc.Height) 137 | self._infrared_frame_data_type = ctypes.c_ushort * self._infrared_frame_data_capacity.value 138 | self._infrared_frame_data = ctypes.cast(self._infrared_frame_data_type(), ctypes.POINTER(ctypes.c_ushort)) 139 | self._infrared_frame_reader = self._infrared_source.OpenReader() 140 | self._infrared_frame_arrived_event = self._infrared_frame_reader.SubscribeFrameArrived() 141 | self._handles[self._waitHandleCount] = self._infrared_frame_arrived_event 142 | self._waitHandleCount += 1 143 | #Changed:} 144 | 145 | #Changed:{ 146 | if(self.frame_source_types & FrameSourceTypes_LongExposureInfrared): 147 | self._long_exposure_infrared_frame_data = ctypes.POINTER(ctypes.c_ushort) 148 | self._long_exposure_infrared_frame_data_capacity = ctypes.c_uint(self.infrared_frame_desc.Width * self.infrared_frame_desc.Height) 149 | self._long_exposure_infrared_frame_data_type = ctypes.c_ushort * self._long_exposure_infrared_frame_data_capacity.value 150 | self._long_exposure_infrared_frame_data = ctypes.cast(self._long_exposure_infrared_frame_data_type(), ctypes.POINTER(ctypes.c_ushort)) 151 | self._long_exposure_infrared_frame_reader = self._long_exposure_infrared_source.OpenReader() 152 | self._long_exposure_infrared_frame_arrived_event = self._long_exposure_infrared_frame_reader.SubscribeFrameArrived() 153 | self._handles[self._waitHandleCount] = self._long_exposure_infrared_frame_arrived_event 154 | self._waitHandleCount += 1 155 | #Changed:} 156 | 157 | if(self.frame_source_types & FrameSourceTypes_BodyIndex): 158 | self._body_index_frame_data = ctypes.POINTER(ctypes.c_ubyte) 159 | self._body_index_frame_data_capacity = ctypes.c_uint(self.body_index_frame_desc.Width * self.body_index_frame_desc.Height) 160 | self._body_index_frame_data_type = ctypes.c_ubyte * self._body_index_frame_data_capacity.value 161 | self._body_index_frame_data = ctypes.cast(self._body_index_frame_data_type(), ctypes.POINTER(ctypes.c_ubyte)) 162 | self._body_index_frame_reader = self._body_index_source.OpenReader() 163 | self._body_index_frame_arrived_event = self._body_index_frame_reader.SubscribeFrameArrived() 164 | self._handles[self._waitHandleCount] = self._body_index_frame_arrived_event 165 | self._waitHandleCount += 1 166 | 167 | self._body_frame_data = None 168 | if(self.frame_source_types & FrameSourceTypes_Body): 169 | self._body_frame_data_capacity = ctypes.c_uint(self.max_body_count) 170 | self._body_frame_data_type = ctypes.POINTER(IBody) * self._body_frame_data_capacity.value 171 | self._body_frame_data = ctypes.cast(self._body_frame_data_type(), ctypes.POINTER(ctypes.POINTER(IBody))) 172 | self._body_frame_reader = self._body_source.OpenReader() 173 | self._body_frame_arrived_event = self._body_frame_reader.SubscribeFrameArrived() 174 | self._body_frame_bodies = None 175 | self._handles[self._waitHandleCount] = self._body_frame_arrived_event 176 | self._waitHandleCount += 1 177 | 178 | thread.start_new_thread(self.kinect_frame_thread, ()) 179 | 180 | self._last_color_frame = None 181 | self._last_depth_frame = None 182 | self._last_body_frame = None 183 | self._last_body_index_frame = None 184 | self._last_infrared_frame = None 185 | self._last_long_exposure_infrared_frame = None 186 | self._last_audio_frame = None 187 | 188 | start_clock = time.clock() 189 | self._last_color_frame_access = self._last_color_frame_time = start_clock 190 | self._last_body_frame_access = self._last_body_frame_time = start_clock 191 | self._last_body_index_frame_access = self._last_body_index_frame_time = start_clock 192 | self._last_depth_frame_access = self._last_depth_frame_time = start_clock 193 | self._last_infrared_frame_access = self._last_infrared_frame_time = start_clock 194 | self._last_long_exposure_infrared_frame_access = self._last_long_exposure_infrared_frame_time = start_clock 195 | self._last_audio_frame_access = self._last_audio_frame_time = start_clock 196 | 197 | def close(self): 198 | if self._sensor is not None: 199 | ctypes.windll.kernel32.SetEvent(self._close_event) 200 | ctypes.windll.kernel32.CloseHandle(self._close_event) 201 | 202 | self._color_frame_reader = None 203 | self._depth_frame_reader = None 204 | self._body_index_frame_reader = None 205 | self._body_frame_reader = None 206 | self._infrared_frame_reader = None 207 | 208 | self._color_source = None 209 | self._depth_source = None 210 | self._body_index_source = None 211 | self._body_source = None 212 | self._infrared_source = None 213 | 214 | self._body_frame_data = None 215 | 216 | self._sensor.Close() 217 | self._sensor = None 218 | 219 | def __del__(self): 220 | self.close() 221 | 222 | def __enter__(self): 223 | return self 224 | 225 | def __exit__(self, *args): 226 | self.close() 227 | 228 | def surface_as_array(self, surface_buffer_interface): 229 | address = ctypes.c_void_p() 230 | size = self.Py_ssize_t() 231 | self._PyObject_AsWriteBuffer(surface_buffer_interface, 232 | ctypes.byref(address), ctypes.byref(size)) 233 | bytes = (ctypes.c_byte * size.value).from_address(address.value) 234 | bytes.object = surface_buffer_interface 235 | return bytes 236 | 237 | def has_new_color_frame(self): 238 | has = (self._last_color_frame_time > self._last_color_frame_access) 239 | return has 240 | 241 | def has_new_depth_frame(self): 242 | has = (self._last_depth_frame_time > self._last_depth_frame_access) 243 | return has 244 | 245 | def has_new_body_frame(self): 246 | has = (self._last_body_frame_time > self._last_body_frame_access) 247 | return has 248 | 249 | def has_new_body_index_frame(self): 250 | has = (self._last_body_index_frame_time > self._last_body_index_frame_access) 251 | return has 252 | 253 | def has_new_infrared_frame(self): 254 | has = (self._last_infrared_frame_time > self._last_infrared_frame_access) 255 | return has 256 | 257 | def has_new_long_exposure_infrared_frame(self): 258 | has = (self._last_long_exposure_infrared_frame_time > self._last_long_exposure_infrared_frame_access) 259 | return has 260 | 261 | def has_new_audio_frame(self): 262 | has = (self._last_audio_frame_time > self._last_audio_frame_access) 263 | return has 264 | 265 | 266 | def get_last_color_frame(self): 267 | with self._color_frame_lock: 268 | if self._color_frame_data is not None: 269 | data = numpy.copy(numpy.ctypeslib.as_array(self._color_frame_data, shape=(self._color_frame_data_capacity.value,))) 270 | _last_color_frame_access = time.clock() 271 | return data 272 | else: 273 | return None 274 | 275 | def get_last_depth_frame(self): 276 | with self._depth_frame_lock: 277 | if self._depth_frame_data is not None: 278 | data = numpy.copy(numpy.ctypeslib.as_array(self._depth_frame_data, shape=(self._depth_frame_data_capacity.value,))) 279 | _last_color_frame_access = time.clock() 280 | return data 281 | else: 282 | return None 283 | 284 | #Changed:{ 285 | def get_last_infrared_frame(self): 286 | with self._infrared_frame_lock: 287 | if self._infrared_frame_data is not None: 288 | data = numpy.copy(numpy.ctypeslib.as_array(self._infrared_frame_data, shape=(self._infrared_frame_data_capacity.value,))) 289 | _last_infrared_frame_access = time.clock() 290 | return data 291 | else: 292 | return None 293 | #Changed:} 294 | 295 | #Changed:{ 296 | def get_last_long_exposure_infrared_frame(self): 297 | with self._long_exposure_infrared_frame_lock: 298 | if self._long_exposure_infrared_frame_data is not None: 299 | data = numpy.copy(numpy.ctypeslib.as_array(self._long_exposure_infrared_frame_data, shape=(self._long_exposure_infrared_frame_data_capacity.value,))) 300 | _last_long_exposure_infrared_frame_access = time.clock() 301 | return data 302 | else: 303 | return None 304 | #Changed:} 305 | 306 | def get_last_body_index_frame(self): 307 | with self._body_index_frame_lock: 308 | if self._body_index_frame_data is not None: 309 | data = numpy.copy(numpy.ctypeslib.as_array(self._body_index_frame_data, shape=(self._body_index_frame_data_capacity.value,))) 310 | _last_color_frame_access = time.clock() 311 | return data 312 | else: 313 | return None 314 | 315 | def get_last_body_frame(self): 316 | with self._body_frame_lock: 317 | if self._body_frame_bodies is not None: 318 | _last_body_frame_access = time.clock() 319 | return self._body_frame_bodies.copy() 320 | else: 321 | return None 322 | 323 | 324 | def body_joint_to_color_space(self, joint): 325 | return self._mapper.MapCameraPointToColorSpace(joint.Position) 326 | 327 | 328 | def body_joints_to_color_space(self, joints): 329 | joint_points = numpy.ndarray((PyKinectV2.JointType_Count), dtype=numpy.object) 330 | 331 | for j in range(0, PyKinectV2.JointType_Count): 332 | joint_points[j] = self.body_joint_to_color_space(joints[j]) 333 | 334 | return joint_points 335 | 336 | #Changed:{ 337 | def body_joint_to_depth_space(self,joint): 338 | return self._mapper.MapCameraPointToDepthSpace(joint.Position) 339 | 340 | def body_joints_to_depth_space(self, joints): 341 | joint_points = numpy.ndarray((PyKinectV2.JointType_Count), dtype=numpy.object) 342 | 343 | for j in range(0, PyKinectV2.JointType_Count): 344 | joint_points[j] = self.body_joint_to_depth_space(joints[j]) 345 | 346 | return joint_points 347 | #Changed:} 348 | 349 | def kinect_frame_thread(self): 350 | while 1: 351 | wait = ctypes.windll.kernel32.WaitForMultipleObjects(self._waitHandleCount, self._handles, False, PyKinectV2._INFINITE) 352 | 353 | if wait == 0: 354 | break 355 | 356 | if self._handles[wait] == self._color_frame_arrived_event: 357 | self.handle_color_arrived(wait) 358 | elif self._handles[wait] == self._depth_frame_arrived_event: 359 | self.handle_depth_arrived(wait) 360 | elif self._handles[wait] == self._body_frame_arrived_event: 361 | self.handle_body_arrived(wait) 362 | elif self._handles[wait] == self._body_index_frame_arrived_event: 363 | self.handle_body_index_arrived(wait) 364 | elif self._handles[wait] == self._infrared_frame_arrived_event: 365 | self.handle_infrared_arrived(wait) 366 | elif self._handles[wait] == self._long_exposure_infrared_frame_arrived_event: 367 | self.handle_long_exposure_infrared_arrived(wait) 368 | elif self._handles[wait] == self._audio_frame_arrived_event: 369 | self.handle_audio_arrived(wait) 370 | else: 371 | break 372 | 373 | 374 | def handle_color_arrived(self, handle_index): 375 | colorFrameEventData = self._color_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 376 | colorFrameRef = colorFrameEventData.FrameReference 377 | try: 378 | colorFrame = colorFrameRef.AcquireFrame() 379 | try: 380 | with self._color_frame_lock: 381 | colorFrame.CopyConvertedFrameDataToArray(self._color_frame_data_capacity, self._color_frame_data, PyKinectV2.ColorImageFormat_Bgra) 382 | self._last_color_frame_time = time.clock() 383 | except: 384 | pass 385 | colorFrame = None 386 | except: 387 | pass 388 | colorFrameRef = None 389 | colorFrameEventData = None 390 | 391 | 392 | def handle_depth_arrived(self, handle_index): 393 | depthFrameEventData = self._depth_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 394 | depthFrameRef = depthFrameEventData.FrameReference 395 | try: 396 | depthFrame = depthFrameRef.AcquireFrame() 397 | try: 398 | with self._depth_frame_lock: 399 | depthFrame.CopyFrameDataToArray(self._depth_frame_data_capacity, self._depth_frame_data) 400 | self._last_depth_frame_time = time.clock() 401 | except: 402 | pass 403 | depthFrame = None 404 | except: 405 | pass 406 | depthFrameRef = None 407 | depthFrameEventData = None 408 | 409 | 410 | def handle_body_arrived(self, handle_index): 411 | bodyFrameEventData = self._body_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 412 | bofyFrameRef = bodyFrameEventData.FrameReference 413 | try: 414 | bodyFrame = bofyFrameRef.AcquireFrame() 415 | 416 | try: 417 | with self._body_frame_lock: 418 | bodyFrame.GetAndRefreshBodyData(self._body_frame_data_capacity, self._body_frame_data) 419 | self._body_frame_bodies = KinectBodyFrameData(bodyFrame, self._body_frame_data, self.max_body_count) 420 | self._last_body_frame_time = time.clock() 421 | 422 | # need these 2 lines as a workaround for handling IBody referencing exception 423 | self._body_frame_data = None 424 | self._body_frame_data = ctypes.cast(self._body_frame_data_type(), ctypes.POINTER(ctypes.POINTER(IBody))) 425 | 426 | except: 427 | pass 428 | 429 | bodyFrame = None 430 | except: 431 | pass 432 | bofyFrameRef = None 433 | bodyFrameEventData = None 434 | 435 | 436 | def handle_body_index_arrived(self, handle_index): 437 | bodyIndexFrameEventData = self._body_index_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 438 | bodyIndexFrameRef = bodyIndexFrameEventData.FrameReference 439 | try: 440 | bodyIndexFrame = bodyIndexFrameRef.AcquireFrame() 441 | try: 442 | with self._body_index_frame_lock: 443 | bodyIndexFrame.CopyFrameDataToArray(self._body_index_frame_data_capacity, self._body_index_frame_data) 444 | self._last_body_index_frame_time = time.clock() 445 | except: 446 | pass 447 | bodyIndexFrame = None 448 | except: 449 | pass 450 | bodyIndexFrame = None 451 | bodyIndexFrameEventData = None 452 | 453 | 454 | #Changed:{ 455 | def handle_infrared_arrived(self, handle_index): 456 | infraredFrameEventData = self._infrared_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 457 | infraredFrameRef = infraredFrameEventData.FrameReference 458 | try: 459 | infraredFrame = infraredFrameRef.AcquireFrame() 460 | try: 461 | with self._infrared_frame_lock: 462 | infraredFrame.CopyFrameDataToArray(self._infrared_frame_data_capacity, self._infrared_frame_data) 463 | self._last_infrared_frame_time = time.clock() 464 | except: 465 | pass 466 | infraredFrame = None 467 | except: 468 | pass 469 | infraredFrameRef = None 470 | infraredFrameEventData = None 471 | #Changed:} 472 | 473 | #Changed:{ 474 | def handle_long_exposure_infrared_arrived(self, handle_index): 475 | longExposureInfraredFrameEventData = self._long_exposure_infrared_frame_reader.GetFrameArrivedEventData(self._handles[handle_index]) 476 | longExposureInfraredFrameRef =longExposureInfraredFrameEventData.FrameReference 477 | try: 478 | longExposureInfraredFrame =longExposureInfraredFrameRef.AcquireFrame() 479 | try: 480 | with self._long_exposure_infrared_frame_lock: 481 | longExposureInfraredFrame.CopyFrameDataToArray(self._long_exposure_infrared_frame_data_capacity, self._long_exposure_infrared_frame_data) 482 | self._last_long_exposure_infrared_frame_time = time.clock() 483 | except: 484 | pass 485 | longExposureInfraredFrame = None 486 | except: 487 | pass 488 | longExposureInfraredFrameRef = None 489 | longExposureInfraredFrameEventData = None 490 | #Changed:} 491 | 492 | def handle_audio_arrived(self, handle_index): 493 | pass 494 | 495 | 496 | 497 | class KinectBody(object): 498 | def __init__(self, body = None): 499 | self.is_restricted = False 500 | self.tracking_id = -1 501 | 502 | self.is_tracked = False 503 | 504 | if body is not None: 505 | self.is_tracked = body.IsTracked 506 | 507 | if self.is_tracked: 508 | self.is_restricted = body.IsRestricted 509 | self.tracking_id = body.TrackingId 510 | self.engaged = body.Engaged 511 | self.lean = body.Lean 512 | self.lean_tracking_state = body.LeanTrackingState 513 | self.hand_left_state = body.HandLeftState 514 | self.hand_left_confidence = body.HandLeftConfidence 515 | self.hand_right_state = body.HandRightState 516 | self.hand_right_confidence = body.HandRightConfidence 517 | self.clipped_edges = body.ClippedEdges 518 | 519 | joints = ctypes.POINTER(PyKinectV2._Joint) 520 | joints_capacity = ctypes.c_uint(PyKinectV2.JointType_Count) 521 | joints_data_type = PyKinectV2._Joint * joints_capacity.value 522 | joints = ctypes.cast(joints_data_type(), ctypes.POINTER(PyKinectV2._Joint)) 523 | body.GetJoints(PyKinectV2.JointType_Count, joints) 524 | self.joints = joints 525 | 526 | joint_orientations = ctypes.POINTER(PyKinectV2._JointOrientation) 527 | joint_orientations_data_type = PyKinectV2._JointOrientation * joints_capacity.value 528 | joint_orientations = ctypes.cast(joint_orientations_data_type(), ctypes.POINTER(PyKinectV2._JointOrientation)) 529 | body.GetJointOrientations(PyKinectV2.JointType_Count, joint_orientations) 530 | self.joint_orientations = joint_orientations 531 | 532 | 533 | class KinectBodyFrameData(object): 534 | def __init__(self, bodyFrame, body_frame_data, max_body_count): 535 | self.bodies = None 536 | self.floor_clip_plane = None 537 | if bodyFrame is not None: 538 | self.floor_clip_plane = bodyFrame.FloorClipPlane 539 | 540 | self.bodies = numpy.ndarray((max_body_count), dtype=numpy.object) 541 | for i in range(0, max_body_count): 542 | self.bodies[i] = KinectBody(body_frame_data[i]) 543 | 544 | def copy(self): 545 | res = KinectBodyFrameData(None, None, 0) 546 | res.floor_clip_plane = self.floor_clip_plane 547 | res.bodies = numpy.copy(self.bodies) 548 | return res 549 | 550 | 551 | -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.2/PyKinectRuntime.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Drivers/pykinect2_v1.0.2/PyKinectRuntime.pyc -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.2/PyKinectV2.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Drivers/pykinect2_v1.0.2/PyKinectV2.pyc -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Drivers/pykinect2_v1.0.2/__init__.py -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.2/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Drivers/pykinect2_v1.0.2/__init__.pyc -------------------------------------------------------------------------------- /Images/hand.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Images/hand.bmp -------------------------------------------------------------------------------- /Images/hand.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Images/hand.jpg -------------------------------------------------------------------------------- /Images/hand1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Images/hand1.jpg -------------------------------------------------------------------------------- /Images/hand2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Images/hand2.png -------------------------------------------------------------------------------- /Images/openhand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Images/openhand.png -------------------------------------------------------------------------------- /Images/pointer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Images/pointer.png -------------------------------------------------------------------------------- /Images/right_hand_filtered.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Images/right_hand_filtered.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Things To-Do # 2 | - Skeleton map 3 | - Feature Extraction 4 | - Gesture classification 5 | - Augmented reality engine 6 | - Modularize everything(API) 7 | - Finish till gesture classification by 1-2-2016 8 | 9 | ### What is this repository for? ### 10 | 11 | * Quick summary 12 | 13 | * Version: 1.0.0 14 | 15 | ### How do I get set up? ### 16 | 17 | * Dependencies: PyKinect2, Numpy, OpenCV 18 | * Configuration: 19 | Its simple: Clone the repository, cd to directory and python run the main_*.py in cmd/terminal. (Do SSPY :P) 20 | 21 | ### Contribution guidelines ### 22 | We appreciate contributions in following forms: 23 | 24 | - Writing tests 25 | - Code review 26 | 27 | If you want to become one of us, you are always welcomed. Contact us by any means comfortable. 28 | 29 | ### Who do I talk to? ### 30 | * Feel free to contact any of the Admins(A, A or A) on their corresponding email ids. -------------------------------------------------------------------------------- /Test Data/data_dump: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Test Data/data_dump -------------------------------------------------------------------------------- /Test/centroid.py: -------------------------------------------------------------------------------- 1 | 2 | import cv2 3 | import numpy as np 4 | 5 | 6 | img=cv2.imread('hand1.jpg',0) 7 | cv2.imshow('image',img) 8 | cv2.waitKey(0) 9 | 10 | ret2,thresh = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 11 | kernel = np.ones((5,5),np.uint8) 12 | thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel) 13 | cv2.imshow('threshold',thresh) 14 | cv2.waitKey(0) 15 | 16 | image,contours,hierarchy = cv2.findContours(thresh, 1, 2) 17 | print len(contours) 18 | thresh = cv2.drawContours(thresh, contours, -1, (0,255,0), 10) 19 | cv2.imshow('centroid',thresh) 20 | cv2.waitKey(0) 21 | # #print type(contours) 22 | # if contours: 23 | 24 | # cnt = contours[1] 25 | # convex_Hull=cv2.convexHull(cnt) 26 | # convexity_Defects = cv2.convexityDefects(cnt,convex_Hull) 27 | # M = cv2.moments(cnt) 28 | # print M 29 | # if M['m00'] != 0: 30 | # print ':' 31 | 32 | # cx = int(M['m10']/M['m00']) 33 | # print cx 34 | # cy = int(M['m01']/M['m00']) 35 | # print cy 36 | # frame = cv2.circle(img,(cx,cy), 10,(255,0,0),5) 37 | # cv2.imshow('centroid',frame) 38 | # cv2.waitKey(0) -------------------------------------------------------------------------------- /Test/hand.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/3f148183a7bee8d0d9aa910b01d4407b671ed941/Test/hand.bmp -------------------------------------------------------------------------------- /Test/test_blob.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import math 6 | import scipy 7 | import numpy as np 8 | import cv2 9 | import pygame 10 | 11 | 12 | class HandGestureObjectClass(object): 13 | def __init__(self): 14 | 15 | # Kinect runtime object, we want only color and body frames 16 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 17 | 18 | 19 | # here we will store skeleton data 20 | self._bodies = None 21 | 22 | 23 | 24 | 25 | def run(self): 26 | print_frame=None 27 | 28 | print ':IN_RUN:Pulling Frames' 29 | 30 | 31 | 32 | # -------- Main Program Loop ----------- 33 | while (True): 34 | # --- Main event loop 35 | 36 | if self._kinect.has_new_depth_frame() or self._kinect.has_new_body_frame(): 37 | print ':IN_RUN:depth_frame received' 38 | 39 | depth_frame = self._kinect.get_last_depth_frame() 40 | print_frame = 32*depth_frame.reshape(424,512) 41 | 42 | 43 | self._bodies = self._kinect.get_last_body_frame() 44 | 45 | # --- draw skeletons to _frame_surface 46 | if self._bodies is not None: 47 | print ':IN_RUN:body received' 48 | for i in range(0, self._kinect.max_body_count): 49 | body = self._bodies.bodies[i] 50 | if not body.is_tracked: 51 | continue 52 | 53 | joints = body.joints 54 | # convert joint coordinates to color space 55 | joint_points = self._kinect.body_joints_to_depth_space(joints) 56 | print ':' 57 | rx=joint_points[PyKinectV2.JointType_HandRight].x 58 | ry=joint_points[PyKinectV2.JointType_HandRight].y 59 | lx=joint_points[PyKinectV2.JointType_HandLeft].x 60 | ly=joint_points[PyKinectV2.JointType_HandLeft].y 61 | rx=math.floor(rx) 62 | ry=math.floor(ry) 63 | lx=math.floor(lx) 64 | ly=math.floor(ly) 65 | print_frame=cv2.circle(print_frame,(int(rx),int(ry)), 10,(255,0,0),5) 66 | print_frame=cv2.circle(print_frame,(int(lx),int(ly)), 10,(255,0,0),5) 67 | ############# 68 | # Read image 69 | im = cv2.imread("hand.bmp", cv2.IMREAD_GRAYSCALE) 70 | #im=print_frame 71 | # Setup SimpleBlobDetector parameters. 72 | params = cv2.SimpleBlobDetector_Params() 73 | # Change thresholds 74 | params.minThreshold = 10 75 | params.maxThreshold = 200 76 | # Filter by Area. 77 | #params.filterByArea = True 78 | #params.minArea = 1500 79 | # Filter by Circularity 80 | #params.filterByCircularity = True 81 | #params.minCircularity = 0.1 82 | # Filter by Convexity 83 | #params.filterByConvexity = True 84 | #params.minConvexity = 0.87 85 | # Filter by Inertia 86 | #params.filterByInertia = True 87 | #params.minInertiaRatio = 0.01 88 | # Create a detector with the parameters 89 | ver = (cv2.__version__).split('.') 90 | if int(ver[0]) < 3 : 91 | detector = cv2.SimpleBlobDetector(params) 92 | else : 93 | detector = cv2.SimpleBlobDetector_create(params) 94 | # Detect blobs. 95 | keypoints = detector.detect(im) 96 | # Draw detected blobs as red circles. 97 | # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures 98 | # the size of the circle corresponds to the size of blob 99 | im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) 100 | # Show blobs 101 | cv2.imshow("Keypoints", im_with_keypoints) 102 | cv2.waitKey(0) 103 | 104 | 105 | if print_frame != None: 106 | 107 | cv2.imshow('Depthimage',print_frame) 108 | 109 | if cv2.waitKey(1) & 0xFF == ord('q'): 110 | break 111 | 112 | 113 | # --- Limit to 60 frames per second 114 | 115 | 116 | # Close our Kinect sensor, close the window and quit. 117 | self._kinect.close() 118 | 119 | 120 | 121 | HandGestureObject = HandGestureObjectClass(); 122 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Test/test_infrared.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | 5 | from matplotlib import pyplot as plt 6 | import SimpleCV.ImageClass as scv 7 | 8 | import scipy 9 | import numpy as np 10 | import cv2 11 | 12 | class Kinect_infrared(object): 13 | 14 | def __init__(self): 15 | 16 | # Kinect runtime object, we want only depth and body frames 17 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Infrared|PyKinectV2.FrameSourceTypes_Depth) 18 | 19 | def max_hist_depth(self, frame): 20 | #print 'FRAME_MAX = ' + str(frame.max()) 21 | binaries = int(frame.max()) 22 | if binaries <= 0: 23 | return 0 24 | histogram, bins = np.histogram(frame, bins = binaries) 25 | histogram = histogram.tolist(); bins = bins.tolist(); 26 | histogram[0 : 1] = [0, 0] 27 | max_hist = bins[histogram.index( max(histogram) )] 28 | return max_hist 29 | 30 | def run(self): 31 | 32 | 33 | print ':IN_RUN:Pulling Frames' 34 | 35 | while(True): 36 | #Main event loop 37 | if self._kinect.has_new_infrared_frame() or self._kinect.has_new_depth_frame: 38 | 39 | iframe = self._kinect.get_last_infrared_frame() 40 | iframe *= 1 41 | iframe = iframe.reshape(424,512) 42 | cv2.imshow('Infrared',iframe) 43 | 44 | dframe = self._kinect.get_last_depth_frame() 45 | dframe = dframe.reshape(424,512) 46 | dframe = np.array(dframe/16, dtype = np.uint8) 47 | gaussian_thresh = cv2.adaptiveThreshold(dframe, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV,11,2) 48 | blur = cv2.GaussianBlur(dframe,(5,5),0) 49 | ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 50 | # dframe = cv2.findContours(dframe, mode = cv2.RETR_EXTERNAL, method = cv2.CHAIN_APPROX_NONE) 51 | cv2.imshow('Gaussian_thresh', gaussian_thresh) 52 | cv2.imshow('Gaussian_blur', blur) 53 | cv2.imshow('Gaussian_otsu', th3) 54 | 55 | 56 | 57 | if cv2.waitKey(1) & 0xFF == ord('q'): 58 | break 59 | 60 | # Close our Kinect sensor, close the window and quit. 61 | self._kinect.close() 62 | 63 | 64 | HandGestureObject = Kinect_infrared(); 65 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Test/test_longExposureInfrared.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | 5 | from matplotlib import pyplot as plt 6 | 7 | import scipy 8 | import numpy as np 9 | import cv2 10 | 11 | class Kinect_LongExposureInfrared(object): 12 | 13 | def __init__(self): 14 | 15 | # Kinect runtime object, we want only depth and body frames 16 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_LongExposureInfrared) 17 | 18 | def run(self): 19 | print ':IN_RUN:Pulling Frames' 20 | 21 | while(True): 22 | #Main event loop 23 | if self._kinect.has_new_long_exposure_infrared_frame(): 24 | 25 | frame = self._kinect.get_last_long_exposure_infrared_frame() 26 | # frame = np.array(frame ,dtype = np.uint8) 27 | frame = frame.reshape(424,512) 28 | cv2.imshow('Long_Exposure_Infrared',frame) 29 | 30 | if cv2.waitKey(1) & 0xFF == ord('q'): 31 | break 32 | 33 | # Close our Kinect sensor, close the window and quit. 34 | self._kinect.close() 35 | 36 | 37 | HandGestureObject = Kinect_LongExposureInfrared(); 38 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Test/testiter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | a = np.arange(25).reshape(5,5) 4 | d=0 5 | c=[2,3] 6 | print a 7 | it = np.nditer(a, flags=['multi_index'],op_flags=['readwrite']) 8 | while not it.finished: 9 | p=it.multi_index 10 | 11 | 12 | if (p[0]>c[0]+d or p[0]c[1]+d or p[1]" % (it[0], it.multi_index), 16 | it.iternext() 17 | print a -------------------------------------------------------------------------------- /countors.txt: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import math 6 | import scipy 7 | import numpy as np 8 | import cv2 9 | import pygame 10 | 11 | 12 | # colors for drawing different bodies 13 | SKELETON_COLORS = [pygame.color.THECOLORS["red"], 14 | pygame.color.THECOLORS["blue"], 15 | pygame.color.THECOLORS["green"], 16 | pygame.color.THECOLORS["orange"], 17 | pygame.color.THECOLORS["purple"], 18 | pygame.color.THECOLORS["yellow"], 19 | pygame.color.THECOLORS["violet"]] 20 | 21 | 22 | class HandGestureObjectClass(object): 23 | def __init__(self): 24 | 25 | # Kinect runtime object, we want only color and body frames 26 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 27 | 28 | 29 | # here we will store skeleton data 30 | self._bodies = None 31 | 32 | 33 | def subimage(self,image, centre, theta, width, height): 34 | output_image = cv.CreateImage((width, height), image.depth, image.nChannels) 35 | mapping = np.array([[np.cos(theta), -np.sin(theta), centre[0]],[np.sin(theta), np.cos(theta), centre[1]]]) 36 | map_matrix_cv = cv.fromarray(mapping) 37 | cv.GetQuadrangleSubPix(image, output_image, map_matrix_cv) 38 | return output_image 39 | 40 | 41 | def run(self): 42 | print_frame=None 43 | 44 | # -------- Main Program Loop ----------- 45 | while (True): 46 | 47 | # --- Main event loop 48 | 49 | if self._kinect.has_new_depth_frame() or self._kinect.has_new_body_frame(): 50 | #print ':IN_RUN:depth_frame received' 51 | 52 | depth_frame = self._kinect.get_last_depth_frame() 53 | print_frame = 32*depth_frame.reshape(424,512) 54 | 55 | 56 | self._bodies = self._kinect.get_last_body_frame() 57 | 58 | # --- draw skeletons to _frame_surface 59 | if self._bodies is not None: 60 | # print ':IN_RUN:body received' 61 | for i in range(0, self._kinect.max_body_count): 62 | body = self._bodies.bodies[i] 63 | if not body.is_tracked: 64 | continue 65 | 66 | joints = body.joints 67 | # convert joint coordinates to color space 68 | joint_points = self._kinect.body_joints_to_depth_space(joints) 69 | # print ':' 70 | rx=joint_points[PyKinectV2.JointType_HandRight].x 71 | ry=joint_points[PyKinectV2.JointType_HandRight].y 72 | lx=joint_points[PyKinectV2.JointType_HandLeft].x 73 | ly=joint_points[PyKinectV2.JointType_HandLeft].y 74 | rx=math.floor(rx) 75 | ry=math.floor(ry) 76 | lx=math.floor(lx) 77 | ly=math.floor(ly) 78 | print_frame=cv2.circle(print_frame,(int(rx),int(ry)), 10,(255,0,0),5) 79 | print_frame=cv2.circle(print_frame,(int(lx),int(ly)), 10,(255,0,0),5) 80 | figure=cv2.imread('last1.png') 81 | imgray1=cv2.cvtColor(figure,cv2.COLOR_BGR2GRAY) 82 | ret1,thresh1=cv2.threshold(imgray1,127,255,0) 83 | #print 'apnawala'+str(type(figure2)) 84 | im2, contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 85 | #print contours 86 | cnt=contours[1] 87 | rect = cv2.minAreaRect(cnt) 88 | #print rect 89 | angle=rect[2] 90 | width,height=rect[1] 91 | #print angle 92 | box = cv2.boxPoints(rect) 93 | box = np.int0(box) 94 | figure1=cv2.drawContours(figure,[box],0,(0,0,255),2) 95 | figure2=cv2.drawContours(figure,[cnt],0, (0,255,0), 1) 96 | cv2.imshow('figure1',figure1) 97 | cv2.imshow('figure2',figure2) 98 | #patch = self.subimage(figure,rect[0],angle,width,height) 99 | #cv.SaveImage('patch.jpg', patch) 100 | M = cv2.getRotationMatrix2D(rect[0],angle,1) 101 | dst = cv2.warpAffine(figure,M,(424,512)) 102 | cv2.imshow('figure3',dst) 103 | #if print_frame != None: 104 | 105 | # cv2.imshow('Depthimage',print_frame) 106 | 107 | if cv2.waitKey(1) & 0xFF == ord('q'): 108 | break 109 | 110 | 111 | # --- Limit to 60 frames per second 112 | 113 | 114 | # Close our Kinect sensor, close the window and quit. 115 | self._kinect.close() 116 | 117 | 118 | 119 | HandGestureObject = HandGestureObjectClass(); 120 | HandGestureObject.run(); 121 | -------------------------------------------------------------------------------- /countors_defects.txt: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | 9 | class HandGestureObjectClass(object): 10 | def __init__(self): 11 | 12 | # Kinect runtime object, we want only color and body frames 13 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 14 | 15 | # here we will store skeleton data 16 | self._bodies = None 17 | 18 | 19 | def neighbourhood(self, array, radius, seed): 20 | 21 | 22 | neighbour = np.array(array) 23 | neighbour *= 0 24 | 25 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 26 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 27 | return temp 28 | 29 | def merge(self, array_big, array_small, seed ): 30 | [a,b] = np.shape(array_small) 31 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 32 | return array_big 33 | 34 | def max_hist_depth(self, frame): 35 | #print 'FRAME_MAX = ' + str(frame.max()) 36 | binaries = int(frame.max()) 37 | if binaries <= 0: 38 | return 0 39 | histogram, bins = np.histogram(frame, bins = binaries) 40 | histogram = histogram.tolist(); bins = bins.tolist(); 41 | histogram[0 : 1] = [0, 0] 42 | max_hist = bins[histogram.index( max(histogram) )] 43 | return max_hist 44 | 45 | def max_area_contour(self, contours): 46 | max_area=0 47 | for i in range(len(contours)): 48 | cnt=contours[i] 49 | area = cv2.contourArea(cnt) 50 | if(area>max_area): 51 | max_area=area 52 | ci=i 53 | return ci 54 | 55 | def run(self): 56 | print_frame=None 57 | 58 | # -------- Main Program Loop ----------- 59 | while (True): 60 | # --- Main event loop 61 | 62 | if self._kinect.has_new_depth_frame(): 63 | print 'frame acquired' 64 | depth_frame = self._kinect.get_last_depth_frame() 65 | 66 | depth_frame = np.array(depth_frame/16, dtype= np.uint8) 67 | depth_frame = depth_frame.reshape(424,512) 68 | cv2.imshow('lol',depth_frame) 69 | 70 | if self._kinect.has_new_body_frame(): 71 | self._bodies = self._kinect.get_last_body_frame() 72 | 73 | i = 0 74 | 75 | if self._bodies is not None: 76 | #first detected body taken 77 | if i > 6: 78 | i=0 79 | 80 | body = self._bodies.bodies[i] 81 | if not body.is_tracked: 82 | i = i + 1 83 | continue 84 | 85 | # while not body.is_tracked: 86 | # body = self._ 87 | 88 | joints = body.joints 89 | 90 | # convert joint coordinates to color space 91 | joint_points = self._kinect.body_joints_to_depth_space(joints) 92 | 93 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 94 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 95 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 96 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 97 | 98 | right_x = right_x if right_x < 424 else 423 99 | right_y = right_y if right_y < 512 else 511 100 | left_x = left_x if left_x < 424 else 423 101 | left_y = left_y if left_y < 512 else 511 102 | 103 | right_hand_depth = depth_frame[right_x,right_y] 104 | left_hand_depth = depth_frame[left_x,left_y] 105 | right_hand = [right_x,right_y] 106 | left_hand = [left_x,left_y] 107 | 108 | d = 50 109 | if depth_frame != None: 110 | right_hand_filtered = self.neighbourhood(depth_frame,d,right_hand) 111 | left_hand_filtered = self.neighbourhood(depth_frame,d,left_hand) 112 | neighbour = np.array(depth_frame) 113 | neighbour *= 0 114 | 115 | print_frame = np.zeros(np.shape(depth_frame)) 116 | 117 | if right_hand_filtered != None: 118 | img1,contours1, hierarchy1 = cv2.findContours(right_hand_filtered,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE) 119 | cnt=contours1[self.max_area_contour(contours1)] 120 | hull = cv2.convexHull(cnt,returnPoints = False) 121 | defects = cv2.convexityDefects(cnt,hull) 122 | drawing = np.zeros(right_hand_filtered.shape,np.uint8) 123 | drawing = cv2.cvtColor(drawing,cv2.COLOR_GRAY2RGB) 124 | for i in range(defects.shape[0]): 125 | s,e,f,d = defects[i,0] 126 | start = tuple(cnt[s][0]) 127 | end = tuple(cnt[e][0]) 128 | far = tuple(cnt[f][0]) 129 | cv2.line(drawing,start,end,[0,255,0],2) 130 | cv2.circle(drawing,far,5,[0,0,255],-1) 131 | drawing = cv2.drawContours(drawing,[cnt],-1,150,1) 132 | cv2.imshow('contours1',drawing) 133 | right_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, right_hand_filtered,right_hand),depth_frame) 134 | ret,right_hand_filtered_depth_frame = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 135 | print_frame += right_hand_filtered_depth_frame 136 | if left_hand_filtered != None: 137 | left_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, left_hand_filtered, left_hand),depth_frame) 138 | ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 139 | print_frame += left_hand_filtered_depth_frame 140 | cv2.imshow('Hand Filtered',print_frame) 141 | 142 | if print_frame != None: 143 | dpt = depth_frame 144 | 145 | 146 | 147 | 148 | if cv2.waitKey(1) & 0xFF == ord('q'): 149 | break 150 | 151 | 152 | cv2.imshow('OG',tp) 153 | 154 | 155 | # --- Limit to 60 frames per second 156 | 157 | 158 | # Close our Kinect sensor, close the window and quit. 159 | self._kinect.close() 160 | 161 | 162 | 163 | HandGestureObject = HandGestureObjectClass(); 164 | HandGestureObject.run(); --------------------------------------------------------------------------------