├── .DS_Store ├── Codes ├── .DS_Store ├── 3D │ ├── .DS_Store │ ├── Hello_world_panda.py │ ├── carousel │ │ ├── ManualPage.url │ │ ├── Run main.py.lnk │ │ ├── env_sky.jpg │ │ ├── main.py │ │ └── models │ │ │ ├── carousel_base.egg.pz │ │ │ ├── carousel_base.jpg │ │ │ ├── carousel_lights.egg.pz │ │ │ ├── carousel_lights_off.jpg │ │ │ ├── carousel_lights_on.jpg │ │ │ ├── carousel_panda.egg.pz │ │ │ ├── carousel_panda.jpg │ │ │ ├── env.egg.pz │ │ │ ├── env_ground.jpg │ │ │ └── env_sky.jpg │ ├── glow-filter │ │ ├── advanced.py │ │ ├── basic.py │ │ ├── models │ │ │ ├── tron-color.png │ │ │ ├── tron-glow.png │ │ │ ├── tron.egg.pz │ │ │ └── tron_anim.egg.pz │ │ └── shaders │ │ │ ├── XBlurShader.sha │ │ │ ├── YBlurShader.sha │ │ │ └── glowShader.sha │ ├── roaming-ralph │ │ ├── ManualPage.url │ │ ├── Run main.py.lnk │ │ ├── main.py │ │ └── models │ │ │ ├── ground.jpg │ │ │ ├── hedge.jpg │ │ │ ├── ralph-run.egg.pz │ │ │ ├── ralph-walk.egg.pz │ │ │ ├── ralph.egg.pz │ │ │ ├── ralph.jpg │ │ │ ├── rock03.jpg │ │ │ ├── tree.jpg │ │ │ └── world.egg.pz │ └── test.py ├── Abhi │ ├── .DS_Store │ ├── 3D │ │ ├── .DS_Store │ │ ├── Hello_world_panda.py │ │ ├── carousel │ │ │ ├── ManualPage.url │ │ │ ├── Run main.py.lnk │ │ │ ├── env_sky.jpg │ │ │ ├── main.py │ │ │ └── models │ │ │ │ ├── carousel_base.egg.pz │ │ │ │ ├── carousel_base.jpg │ │ │ │ ├── carousel_lights.egg.pz │ │ │ │ ├── carousel_lights_off.jpg │ │ │ │ ├── carousel_lights_on.jpg │ │ │ │ ├── carousel_panda.egg.pz │ │ │ │ ├── carousel_panda.jpg │ │ │ │ ├── env.egg.pz │ │ │ │ ├── env_ground.jpg │ │ │ │ └── env_sky.jpg │ │ ├── glow-filter │ │ │ ├── advanced.py │ │ │ ├── basic.py │ │ │ ├── models │ │ │ │ ├── tron-color.png │ │ │ │ ├── tron-glow.png │ │ │ │ ├── tron.egg.pz │ │ │ │ └── tron_anim.egg.pz │ │ │ └── shaders │ │ │ │ ├── XBlurShader.sha │ │ │ │ ├── YBlurShader.sha │ │ │ │ └── glowShader.sha │ │ ├── roaming-ralph │ │ │ ├── ManualPage.url │ │ │ ├── Run main.py.lnk │ │ │ ├── main.py │ │ │ └── models │ │ │ │ ├── ground.jpg │ │ │ │ ├── hedge.jpg │ │ │ │ ├── ralph-run.egg.pz │ │ │ │ ├── ralph-walk.egg.pz │ │ │ │ ├── ralph.egg.pz │ │ │ │ ├── ralph.jpg │ │ │ │ ├── rock03.jpg │ │ │ │ ├── tree.jpg │ │ │ │ └── world.egg.pz │ │ └── test.py │ ├── Hand_segmentation_abhi.py │ ├── README.doc │ ├── dump │ ├── feature │ ├── feature_closed_hand │ ├── feature_open_hand │ ├── hand_body.jpg │ ├── kinect_body_abhi.py │ ├── main_abhi.py │ ├── nn.pkl │ ├── test.py │ └── test_ANN.py ├── Anuj │ ├── countors.txt │ ├── countors_defects.txt │ ├── display.py │ ├── feature_closed_hand │ ├── feature_open_hand │ ├── final_app.txt │ ├── main_anuj.py │ ├── test.py │ ├── test_abhi.py │ ├── train_ip.txt │ └── train_op.txt ├── Ash │ ├── clean_hand_filtered_ash.py │ ├── feature │ ├── old │ │ ├── Hand_segmentation_ash.py │ │ ├── kinect_body_ash.py │ │ ├── main_ash.py │ │ ├── main_ash.txt │ │ ├── research.py │ │ └── tp │ └── topViewHand.jpg ├── Finals │ ├── clean_hand_filtered'.py │ ├── final_app.py │ └── nn.pkl ├── Hand_segmentation_abhi.py ├── PyKinectBodyGame.py ├── dump ├── feature ├── feature_closed_hand ├── feature_open_hand ├── hand_body.jpg ├── kinect_body_abhi.py ├── main_abhi.py ├── nn.pkl ├── test.py └── test_ANN.py ├── Documentation ├── MATLAB_approach └── gestures ├── Drivers ├── .DS_Store ├── pykinect2_v1.0.1 │ ├── PyKinectRuntime.py │ ├── PyKinectRuntime.pyc │ ├── PyKinectV2.py │ ├── PyKinectV2.pyc │ ├── __init__.py │ └── __init__.pyc └── pykinect2_v1.0.2 │ ├── PyKinectRuntime.py │ ├── PyKinectRuntime.pyc │ ├── PyKinectV2.py │ ├── PyKinectV2.pyc │ ├── __init__.py │ └── __init__.pyc ├── Images ├── Test.p ├── hand.bmp ├── hand.jpg ├── hand1.jpg ├── hand2.png ├── openhand.png ├── pointer.png └── right_hand_filtered.png ├── Papers ├── Hand Tracking Using Detection.pdf ├── PSO_Kinect Hand Tracking.pdf ├── Shape based hand recog.pdf ├── Vision based hand pose extimation Review.pdf └── cvpr14_handtracking.pdf ├── PyKinectRuntime.py ├── PyKinectV2.py ├── README.md ├── Test Data └── data_dump └── Test ├── centroid.py ├── hand.bmp ├── test_blob.py ├── test_infrared.py ├── test_longExposureInfrared.py └── testiter.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/.DS_Store -------------------------------------------------------------------------------- /Codes/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/.DS_Store -------------------------------------------------------------------------------- /Codes/3D/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/.DS_Store -------------------------------------------------------------------------------- /Codes/3D/Hello_world_panda.py: -------------------------------------------------------------------------------- 1 | from direct.showbase.ShowBase import ShowBase 2 | 3 | class MyApp(ShowBase): 4 | 5 | def __init__(self): 6 | ShowBase.__init__(self) 7 | 8 | # Load the environment model. 9 | self.scene = self.loader.loadModel("environment") 10 | # Reparent the model to render. 11 | self.scene.reparentTo(self.render) 12 | # Apply scale and position transforms on the model. 13 | self.scene.setScale(0.25, 0.25, 0.25) 14 | self.scene.setPos(-8, 42, 0) 15 | 16 | 17 | app = MyApp() 18 | app.run() -------------------------------------------------------------------------------- /Codes/3D/carousel/ManualPage.url: -------------------------------------------------------------------------------- 1 | [InternetShortcut] 2 | URL=http://panda3d.org/wiki/index.php/Sample_Programs:_carousel 3 | -------------------------------------------------------------------------------- /Codes/3D/carousel/Run main.py.lnk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/Run main.py.lnk -------------------------------------------------------------------------------- /Codes/3D/carousel/env_sky.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/env_sky.jpg -------------------------------------------------------------------------------- /Codes/3D/carousel/main.py: -------------------------------------------------------------------------------- 1 | from direct.showbase.ShowBase import ShowBase 2 | from panda3d.core import AmbientLight, DirectionalLight, LightAttrib 3 | from panda3d.core import NodePath 4 | from panda3d.core import LVector3 5 | from direct.interval.IntervalGlobal import * # Needed to use Intervals 6 | from direct.gui.DirectGui import * 7 | from direct.gui.OnscreenImage import OnscreenImage 8 | from panda3d.core import TransparencyAttrib 9 | # Kinect Libraries 10 | from pykinect2 import PyKinectV2 11 | from pykinect2.PyKinectV2 import * 12 | from pykinect2 import PyKinectRuntime 13 | 14 | import sys 15 | import os 16 | import numpy as np 17 | import cv2 18 | 19 | # Importing math constants and functions 20 | from math import pi, sin, cos 21 | 22 | 23 | class CarouselDemo(ShowBase): 24 | 25 | def __init__(self): 26 | # Initialize the ShowBase class from which we inherit, which will 27 | # create a window and set up everything we need for rendering into it. 28 | ShowBase.__init__(self) 29 | 30 | base.disableMouse() # Allow manual positioning of the camera 31 | camera.setPos(0, -10, 1) # Set the cameras' position 32 | camera.setHpr(0, 0, 0) # and orientation 33 | 34 | self.keyMap = { 35 | "left": 0, "right": 0, "up": 0, "down": 0} 36 | 37 | taskMgr.add(self.startCarousel, "moveTask") 38 | 39 | imageObject = OnscreenImage(image = 'env_sky.jpg', pos = (-10,0,-10)) 40 | imageObject.setImage('env_sky.jpg') 41 | imageObject.setTransparency(TransparencyAttrib.MAlpha) 42 | 43 | self.loadModels() # Load and position our models 44 | self.setupLights() # Add some basic lighting 45 | 46 | 47 | self.accept("escape", sys.exit) 48 | self.accept("arrow_left", self.setKey, ["left", True]) 49 | self.accept("arrow_right", self.setKey, ["right", True]) 50 | self.accept("arrow_up", self.setKey, ["up", True]) 51 | self.accept("arrow_down", self.setKey, ["down", True]) 52 | self.accept("arrow_left-up", self.setKey, ["left", False]) 53 | self.accept("arrow_right-up", self.setKey, ["right", False]) 54 | self.accept("arrow_up-up", self.setKey, ["up", False]) 55 | self.accept("arrow_down-up", self.setKey, ["down", False]) 56 | 57 | 58 | def setKey(self, key, value): 59 | self.keyMap[key] = value 60 | 61 | def loadModels(self): 62 | # Load the carousel base 63 | self.carousel = loader.loadModel("models/carousel_base") 64 | self.carousel.reparentTo(render) # Attach it to render 65 | 66 | 67 | def setupLights(self): 68 | ambientLight = AmbientLight("ambientLight") 69 | ambientLight.setColor((1,1,1, 1)) 70 | render.setLight(render.attachNewNode(ambientLight)) 71 | 72 | def startCarousel(self,task): 73 | h = self.carousel.getH() 74 | p = self.carousel.getP() 75 | 76 | if self.keyMap["left"]: 77 | self.carouselSpin = self.carousel.setH(h+1) 78 | if self.keyMap["right"]: 79 | self.carouselSpin = self.carousel.setH(h-1) 80 | if self.keyMap["up"]: 81 | self.carouselSpin = self.carousel.setP(p-1) 82 | if self.keyMap["down"]: 83 | self.carouselSpin = self.carousel.setP(p+1) 84 | 85 | return task.cont 86 | 87 | 88 | def startCarousels(self,task): 89 | if self.keyMap["left"]: 90 | angleDegrees = task.time * 100.0 91 | angleRadians = angleDegrees * (pi / 180.0) 92 | self.camera.setPos(10 * sin(angleRadians), -10.0 * cos(angleRadians), 2) 93 | self.camera.setHpr(angleDegrees, 0, 0) 94 | 95 | if self.keyMap["right"]: 96 | angleDegrees = task.time * 10.0 97 | angleRadians = angleDegrees * (pi / 180.0) 98 | self.camera.setPos(10 * sin(angleRadians), -10.0 * cos(angleRadians), 2) 99 | self.camera.setHpr(angleDegrees, 0, 0) 100 | return task.cont 101 | 102 | demo = CarouselDemo() 103 | demo.run() -------------------------------------------------------------------------------- /Codes/3D/carousel/models/carousel_base.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/models/carousel_base.egg.pz -------------------------------------------------------------------------------- /Codes/3D/carousel/models/carousel_base.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/models/carousel_base.jpg -------------------------------------------------------------------------------- /Codes/3D/carousel/models/carousel_lights.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/models/carousel_lights.egg.pz -------------------------------------------------------------------------------- /Codes/3D/carousel/models/carousel_lights_off.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/models/carousel_lights_off.jpg -------------------------------------------------------------------------------- /Codes/3D/carousel/models/carousel_lights_on.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/models/carousel_lights_on.jpg -------------------------------------------------------------------------------- /Codes/3D/carousel/models/carousel_panda.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/models/carousel_panda.egg.pz -------------------------------------------------------------------------------- /Codes/3D/carousel/models/carousel_panda.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/models/carousel_panda.jpg -------------------------------------------------------------------------------- /Codes/3D/carousel/models/env.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/models/env.egg.pz -------------------------------------------------------------------------------- /Codes/3D/carousel/models/env_ground.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/models/env_ground.jpg -------------------------------------------------------------------------------- /Codes/3D/carousel/models/env_sky.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/carousel/models/env_sky.jpg -------------------------------------------------------------------------------- /Codes/3D/glow-filter/advanced.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Author: Kwasi Mensah (kmensah@andrew.cmu.edu) 4 | # Date: 7/25/2005 5 | 6 | from direct.showbase.ShowBase import ShowBase 7 | from panda3d.core import Filename, Shader 8 | from panda3d.core import PandaNode, NodePath 9 | from panda3d.core import ColorBlendAttrib 10 | from panda3d.core import AmbientLight, DirectionalLight 11 | from panda3d.core import TextNode, LPoint3, LVector4 12 | from direct.showbase.DirectObject import DirectObject 13 | from direct.gui.OnscreenText import OnscreenText 14 | from direct.actor.Actor import Actor 15 | import sys 16 | import os 17 | 18 | # Function to put instructions on the screen. 19 | def addInstructions(pos, msg): 20 | return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), 21 | parent=base.a2dTopLeft, align=TextNode.ALeft, 22 | pos=(0.08, -pos - 0.04), scale=.05) 23 | 24 | # Function to put title on the screen. 25 | def addTitle(text): 26 | return OnscreenText(text=text, style=1, fg=(1, 1, 1, 1), 27 | parent=base.a2dBottomRight, align=TextNode.ARight, 28 | pos=(-0.1, 0.09), scale=.08) 29 | 30 | 31 | # This function is responsible for setting up the two blur filters. 32 | # It just makes a temp Buffer, puts a screen aligned card, and then sets 33 | # the appropiate shader to do all the work. Gaussian blurs are decomposable 34 | # into a two-pass algorithm which is faster than the equivalent one-pass 35 | # algorithm, so we do it in two passes: one pass that blurs in the horizontal 36 | # direction, and one in the vertical direction. 37 | def makeFilterBuffer(srcbuffer, name, sort, prog): 38 | blurBuffer = base.win.makeTextureBuffer(name, 512, 512) 39 | blurBuffer.setSort(sort) 40 | blurBuffer.setClearColor(LVector4(1, 0, 0, 1)) 41 | blurCamera = base.makeCamera2d(blurBuffer) 42 | blurScene = NodePath("new Scene") 43 | blurCamera.node().setScene(blurScene) 44 | shader = loader.loadShader(prog) 45 | card = srcbuffer.getTextureCard() 46 | card.reparentTo(blurScene) 47 | card.setShader(shader) 48 | return blurBuffer 49 | 50 | 51 | class GlowDemo(ShowBase): 52 | 53 | def __init__(self): 54 | # Initialize the ShowBase class from which we inherit, which will 55 | # create a window and set up everything we need for rendering into it. 56 | ShowBase.__init__(self) 57 | 58 | base.disableMouse() 59 | base.setBackgroundColor(0, 0, 0) 60 | camera.setPos(0, -50, 0) 61 | 62 | # Check video card capabilities. 63 | if not base.win.getGsg().getSupportsBasicShaders(): 64 | addTitle( 65 | "Glow Filter: Video driver reports that Cg shaders are not supported.") 66 | return 67 | 68 | # Post the instructions 69 | self.title = addTitle("Panda3D: Tutorial - Glow Filter") 70 | self.inst1 = addInstructions(0.06, "ESC: Quit") 71 | self.inst2 = addInstructions(0.12, "Space: Toggle Glow Filter On/Off") 72 | self.inst3 = addInstructions(0.18, "Enter: Toggle Running/Spinning") 73 | self.inst4 = addInstructions(0.24, "V: View the render-to-texture results") 74 | 75 | # Create the shader that will determime what parts of the scene will 76 | # glow 77 | glowShader = loader.loadShader("shaders/glowShader.sha") 78 | 79 | # load our model 80 | self.tron = Actor() 81 | self.tron.loadModel("models/tron") 82 | self.tron.loadAnims({"running": "models/tron_anim"}) 83 | self.tron.reparentTo(render) 84 | self.interval = self.tron.hprInterval(60, LPoint3(360, 0, 0)) 85 | self.interval.loop() 86 | self.isRunning = False 87 | 88 | # put some lighting on the tron model 89 | dlight = DirectionalLight('dlight') 90 | alight = AmbientLight('alight') 91 | dlnp = render.attachNewNode(dlight) 92 | alnp = render.attachNewNode(alight) 93 | dlight.setColor(LVector4(1.0, 0.7, 0.2, 1)) 94 | alight.setColor(LVector4(0.2, 0.2, 0.2, 1)) 95 | dlnp.setHpr(0, -60, 0) 96 | render.setLight(dlnp) 97 | render.setLight(alnp) 98 | 99 | # create the glow buffer. This buffer renders like a normal scene, 100 | # except that only the glowing materials should show up nonblack. 101 | glowBuffer = base.win.makeTextureBuffer("Glow scene", 512, 512) 102 | glowBuffer.setSort(-3) 103 | glowBuffer.setClearColor(LVector4(0, 0, 0, 1)) 104 | 105 | # We have to attach a camera to the glow buffer. The glow camera 106 | # must have the same frustum as the main camera. As long as the aspect 107 | # ratios match, the rest will take care of itself. 108 | glowCamera = base.makeCamera( 109 | glowBuffer, lens=base.cam.node().getLens()) 110 | 111 | # Tell the glow camera to use the glow shader 112 | tempnode = NodePath(PandaNode("temp node")) 113 | tempnode.setShader(glowShader) 114 | glowCamera.node().setInitialState(tempnode.getState()) 115 | 116 | # set up the pipeline: from glow scene to blur x to blur y to main 117 | # window. 118 | blurXBuffer = makeFilterBuffer( 119 | glowBuffer, "Blur X", -2, "shaders/XBlurShader.sha") 120 | blurYBuffer = makeFilterBuffer( 121 | blurXBuffer, "Blur Y", -1, "shaders/YBlurShader.sha") 122 | self.finalcard = blurYBuffer.getTextureCard() 123 | self.finalcard.reparentTo(render2d) 124 | 125 | # This attribute is used to add the results of the post-processing 126 | # effects to the existing framebuffer image, rather than replace it. 127 | # This is mainly useful for glow effects like ours. 128 | self.finalcard.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd)) 129 | 130 | # Panda contains a built-in viewer that lets you view the results of 131 | # your render-to-texture operations. This code configures the viewer. 132 | self.accept("v", base.bufferViewer.toggleEnable) 133 | self.accept("V", base.bufferViewer.toggleEnable) 134 | base.bufferViewer.setPosition("llcorner") 135 | base.bufferViewer.setLayout("hline") 136 | base.bufferViewer.setCardSize(0.652, 0) 137 | 138 | # event handling 139 | self.accept("space", self.toggleGlow) 140 | self.accept("enter", self.toggleDisplay) 141 | self.accept("escape", sys.exit, [0]) 142 | 143 | self.glowOn = True 144 | 145 | def toggleGlow(self): 146 | if self.glowOn: 147 | self.finalcard.reparentTo(hidden) 148 | else: 149 | self.finalcard.reparentTo(render2d) 150 | self.glowOn = not(self.glowOn) 151 | 152 | def toggleDisplay(self): 153 | self.isRunning = not(self.isRunning) 154 | if not(self.isRunning): 155 | camera.setPos(0, -50, 0) 156 | self.tron.stop("running") 157 | self.tron.pose("running", 0) 158 | self.interval.loop() 159 | else: 160 | camera.setPos(0, -170, 3) 161 | self.interval.finish() 162 | self.tron.setHpr(0, 0, 0) 163 | self.tron.loop("running") 164 | 165 | demo = GlowDemo() 166 | demo.run() 167 | -------------------------------------------------------------------------------- /Codes/3D/glow-filter/basic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Author: Kwasi Mensah (kmensah@andrew.cmu.edu) 4 | # Date: 7/25/2005 5 | 6 | from direct.showbase.ShowBase import ShowBase 7 | from panda3d.core import Filename, Shader 8 | from panda3d.core import PandaNode, NodePath 9 | from panda3d.core import AmbientLight, DirectionalLight 10 | from panda3d.core import TextNode, LPoint3 11 | from direct.showbase.DirectObject import DirectObject 12 | from direct.filter.CommonFilters import CommonFilters 13 | from direct.gui.OnscreenText import OnscreenText 14 | from direct.actor.Actor import Actor 15 | import sys 16 | import os 17 | 18 | # Function to put instructions on the screen. 19 | def addInstructions(pos, msg): 20 | return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), 21 | parent=base.a2dTopLeft, align=TextNode.ALeft, 22 | pos=(0.08, -pos - 0.04), scale=.05) 23 | 24 | # Function to put title on the screen. 25 | def addTitle(text): 26 | return OnscreenText(text=text, style=1, fg=(1, 1, 1, 1), 27 | parent=base.a2dBottomRight, align=TextNode.ARight, 28 | pos=(-0.1, 0.09), scale=.08) 29 | 30 | 31 | class GlowDemo(ShowBase): 32 | def __init__(self): 33 | # Initialize the ShowBase class from which we inherit, which will 34 | # create a window and set up everything we need for rendering into it. 35 | ShowBase.__init__(self) 36 | 37 | base.disableMouse() 38 | base.setBackgroundColor(0, 0, 0) 39 | camera.setPos(0, -50, 0) 40 | 41 | # Check video card capabilities. 42 | if not base.win.getGsg().getSupportsBasicShaders(): 43 | addTitle( 44 | "Glow Filter: Video driver reports that Cg shaders are not supported.") 45 | return 46 | 47 | # Use class 'CommonFilters' to enable a bloom filter. 48 | # The brightness of a pixel is measured using a weighted average 49 | # of R,G,B,A. We put all the weight on Alpha, meaning that for 50 | # us, the framebuffer's alpha channel alpha controls bloom. 51 | 52 | self.filters = CommonFilters(base.win, base.cam) 53 | filterok = self.filters.setBloom( 54 | blend=(0, 0, 0, 1), desat=-0.5, intensity=3.0, size="small") 55 | if (filterok == False): 56 | addTitle( 57 | "Toon Shader: Video card not powerful enough to do image postprocessing") 58 | return 59 | self.glowSize = 1 60 | 61 | # Post the instructions 62 | self.title = addTitle("Panda3D: Tutorial - Glow Filter") 63 | self.inst1 = addInstructions(0.06, "ESC: Quit") 64 | self.inst2 = addInstructions(0.12, "Space: Toggle Glow Filter Small/Med/Large/Off") 65 | self.inst3 = addInstructions(0.18, "Enter: Toggle Running/Spinning") 66 | self.inst4 = addInstructions(0.24, "V: View the render-to-texture results") 67 | 68 | # load our model 69 | 70 | self.tron = Actor() 71 | self.tron.loadModel("models/tron") 72 | self.tron.loadAnims({"running": "models/tron_anim"}) 73 | self.tron.reparentTo(render) 74 | self.interval = self.tron.hprInterval(60, LPoint3(360, 0, 0)) 75 | self.interval.loop() 76 | self.isRunning = False 77 | 78 | # put some lighting on the model 79 | 80 | dlight = DirectionalLight('dlight') 81 | alight = AmbientLight('alight') 82 | dlnp = render.attachNewNode(dlight) 83 | alnp = render.attachNewNode(alight) 84 | dlight.setColor((1.0, 0.7, 0.2, 1)) 85 | alight.setColor((0.2, 0.2, 0.2, 1)) 86 | dlnp.setHpr(0, -60, 0) 87 | render.setLight(dlnp) 88 | render.setLight(alnp) 89 | 90 | # Panda contains a built-in viewer that lets you view the results of 91 | # your render-to-texture operations. This code configures the viewer. 92 | self.accept("v", base.bufferViewer.toggleEnable) 93 | self.accept("V", base.bufferViewer.toggleEnable) 94 | base.bufferViewer.setPosition("llcorner") 95 | base.bufferViewer.setLayout("hline") 96 | # base.camLens.setFov(100) 97 | # event handling 98 | self.accept("space", self.toggleGlow) 99 | self.accept("enter", self.toggleDisplay) 100 | self.accept("escape", sys.exit, [0]) 101 | 102 | def toggleGlow(self): 103 | self.glowSize = self.glowSize + 1 104 | if self.glowSize == 4: 105 | self.glowSize = 0 106 | self.filters.setBloom(blend=(0, 0, 0, 1), desat=-0.5, intensity=3.0, 107 | size=self.glowSize) 108 | 109 | def toggleDisplay(self): 110 | self.isRunning = not self.isRunning 111 | if not self.isRunning: 112 | camera.setPos(0, -50, 0) 113 | self.tron.stop("running") 114 | self.tron.pose("running", 0) 115 | self.interval.loop() 116 | else: 117 | camera.setPos(0, -170, 3) 118 | self.interval.finish() 119 | self.tron.setHpr(0, 0, 0) 120 | self.tron.loop("running") 121 | 122 | demo = GlowDemo() 123 | demo.run() 124 | -------------------------------------------------------------------------------- /Codes/3D/glow-filter/models/tron-color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/glow-filter/models/tron-color.png -------------------------------------------------------------------------------- /Codes/3D/glow-filter/models/tron-glow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/glow-filter/models/tron-glow.png -------------------------------------------------------------------------------- /Codes/3D/glow-filter/models/tron.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/glow-filter/models/tron.egg.pz -------------------------------------------------------------------------------- /Codes/3D/glow-filter/models/tron_anim.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/glow-filter/models/tron_anim.egg.pz -------------------------------------------------------------------------------- /Codes/3D/glow-filter/shaders/XBlurShader.sha: -------------------------------------------------------------------------------- 1 | //Cg 2 | // 3 | //Cg profile arbvp1 arbfp1 4 | 5 | void vshader(float4 vtx_position : POSITION, 6 | float2 vtx_texcoord0 : TEXCOORD0, 7 | out float4 l_position : POSITION, 8 | out float2 l_texcoord0 : TEXCOORD0, 9 | uniform float4x4 mat_modelproj) 10 | { 11 | l_position=mul(mat_modelproj, vtx_position); 12 | l_texcoord0=vtx_texcoord0; 13 | } 14 | 15 | 16 | void fshader(float2 l_texcoord0 : TEXCOORD0, 17 | out float4 o_color : COLOR, 18 | uniform sampler2D tex_0 : TEXUNIT0) 19 | { 20 | float3 offset = float3(1.0/1024.0, 5.0/1024.0, 9.0/1024.0); 21 | o_color = tex2D(tex_0, float2(l_texcoord0.x - offset.z, l_texcoord0.y)) * 5.0; 22 | o_color += tex2D(tex_0, float2(l_texcoord0.x - offset.y, l_texcoord0.y)) * 8.0; 23 | o_color += tex2D(tex_0, float2(l_texcoord0.x - offset.x, l_texcoord0.y)) * 10.0; 24 | o_color += tex2D(tex_0, float2(l_texcoord0.x + offset.x, l_texcoord0.y)) * 10.0; 25 | o_color += tex2D(tex_0, float2(l_texcoord0.x + offset.y, l_texcoord0.y)) * 8.0; 26 | o_color += tex2D(tex_0, float2(l_texcoord0.x + offset.z, l_texcoord0.y)) * 5.0; 27 | o_color = o_color * 0.030; 28 | } 29 | -------------------------------------------------------------------------------- /Codes/3D/glow-filter/shaders/YBlurShader.sha: -------------------------------------------------------------------------------- 1 | //Cg 2 | // 3 | //Cg profile arbvp1 arbfp1 4 | 5 | void vshader(float4 vtx_position : POSITION, 6 | float2 vtx_texcoord0 : TEXCOORD0, 7 | out float4 l_position : POSITION, 8 | out float2 l_texcoord0 : TEXCOORD0, 9 | uniform float4x4 mat_modelproj) 10 | { 11 | l_position=mul(mat_modelproj, vtx_position); 12 | l_texcoord0=vtx_texcoord0; 13 | } 14 | 15 | 16 | void fshader(float2 l_texcoord0 : TEXCOORD0, 17 | out float4 o_color : COLOR, 18 | uniform sampler2D tex_0 : TEXUNIT0) 19 | { 20 | float3 offset = float3(1.0/1024.0, 5.0/1024.0, 9.0/1024.0); 21 | o_color = tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y - offset.z)) * 5.0; 22 | o_color += tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y - offset.y)) * 8.0; 23 | o_color += tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y - offset.x)) * 10.0; 24 | o_color += tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y + offset.x)) * 10.0; 25 | o_color += tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y + offset.y)) * 8.0; 26 | o_color += tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y + offset.z)) * 5.0; 27 | o_color = o_color * 0.030; 28 | } 29 | 30 | 31 | -------------------------------------------------------------------------------- /Codes/3D/glow-filter/shaders/glowShader.sha: -------------------------------------------------------------------------------- 1 | //Cg 2 | // 3 | 4 | void vshader(float4 vtx_position : POSITION, 5 | float2 vtx_texcoord0 : TEXCOORD0, 6 | uniform float4x4 mat_modelproj, 7 | out float4 l_position : POSITION, 8 | out float2 l_texcoord0 : TEXCOORD0) 9 | { 10 | l_position=mul(mat_modelproj, vtx_position); 11 | l_texcoord0=vtx_texcoord0; 12 | } 13 | 14 | void fshader(float2 l_texcoord0 : TEXCOORD0, 15 | uniform sampler2D tex_0 : TEXUNIT0, 16 | out float4 o_color : COLOR) 17 | { 18 | float4 texColor=tex2D(tex_0, l_texcoord0); 19 | o_color=texColor*2*(texColor.w - 0.5); 20 | } 21 | 22 | -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/ManualPage.url: -------------------------------------------------------------------------------- 1 | [InternetShortcut] 2 | URL=http://panda3d.org/wiki/index.php/Sample_Programs:_roaming_ralph 3 | -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/Run main.py.lnk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/roaming-ralph/Run main.py.lnk -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Author: Ryan Myers 4 | # Models: Jeff Styers, Reagan Heller 5 | # 6 | # Last Updated: 2015-03-13 7 | # 8 | # This tutorial provides an example of creating a character 9 | # and having it walk around on uneven terrain, as well 10 | # as implementing a fully rotatable camera. 11 | 12 | from direct.showbase.ShowBase import ShowBase 13 | from panda3d.core import CollisionTraverser, CollisionNode 14 | from panda3d.core import CollisionHandlerQueue, CollisionRay 15 | from panda3d.core import Filename, AmbientLight, DirectionalLight 16 | from panda3d.core import PandaNode, NodePath, Camera, TextNode 17 | from panda3d.core import CollideMask 18 | from direct.gui.OnscreenText import OnscreenText 19 | from direct.actor.Actor import Actor 20 | import random 21 | import sys 22 | import os 23 | import math 24 | 25 | # Function to put instructions on the screen. 26 | def addInstructions(pos, msg): 27 | return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), scale=.05, 28 | shadow=(0, 0, 0, 1), parent=base.a2dTopLeft, 29 | pos=(0.08, -pos - 0.04), align=TextNode.ALeft) 30 | 31 | # Function to put title on the screen. 32 | def addTitle(text): 33 | return OnscreenText(text=text, style=1, fg=(1, 1, 1, 1), scale=.07, 34 | parent=base.a2dBottomRight, align=TextNode.ARight, 35 | pos=(-0.1, 0.09), shadow=(0, 0, 0, 1)) 36 | 37 | 38 | class RoamingRalphDemo(ShowBase): 39 | def __init__(self): 40 | # Set up the window, camera, etc. 41 | ShowBase.__init__(self) 42 | 43 | # Set the background color to black 44 | self.win.setClearColor((0, 0, 0, 1)) 45 | 46 | # This is used to store which keys are currently pressed. 47 | self.keyMap = { 48 | "left": 0, "right": 0, "forward": 0, "cam-left": 0, "cam-right": 0} 49 | 50 | # Post the instructions 51 | self.title = addTitle( 52 | "Panda3D Tutorial: Roaming Ralph (Walking on Uneven Terrain)") 53 | self.inst1 = addInstructions(0.06, "[ESC]: Quit") 54 | self.inst2 = addInstructions(0.12, "[Left Arrow]: Rotate Ralph Left") 55 | self.inst3 = addInstructions(0.18, "[Right Arrow]: Rotate Ralph Right") 56 | self.inst4 = addInstructions(0.24, "[Up Arrow]: Run Ralph Forward") 57 | self.inst6 = addInstructions(0.30, "[A]: Rotate Camera Left") 58 | self.inst7 = addInstructions(0.36, "[S]: Rotate Camera Right") 59 | 60 | # Set up the environment 61 | # 62 | # This environment model contains collision meshes. If you look 63 | # in the egg file, you will see the following: 64 | # 65 | # { Polyset keep descend } 66 | # 67 | # This tag causes the following mesh to be converted to a collision 68 | # mesh -- a mesh which is optimized for collision, not rendering. 69 | # It also keeps the original mesh, so there are now two copies --- 70 | # one optimized for rendering, one for collisions. 71 | 72 | self.environ = loader.loadModel("models/world") 73 | self.environ.reparentTo(render) 74 | 75 | # Create the main character, Ralph 76 | 77 | ralphStartPos = self.environ.find("**/start_point").getPos() 78 | self.ralph = Actor("models/ralph", 79 | {"run": "models/ralph-run", 80 | "walk": "models/ralph-walk"}) 81 | self.ralph.reparentTo(render) 82 | self.ralph.setScale(.2) 83 | self.ralph.setPos(ralphStartPos + (0, 0, 0.5)) 84 | 85 | # Create a floater object, which floats 2 units above ralph. We 86 | # use this as a target for the camera to look at. 87 | 88 | self.floater = NodePath(PandaNode("floater")) 89 | self.floater.reparentTo(self.ralph) 90 | self.floater.setZ(2.0) 91 | 92 | # Accept the control keys for movement and rotation 93 | 94 | self.accept("escape", sys.exit) 95 | self.accept("arrow_left", self.setKey, ["left", True]) 96 | self.accept("arrow_right", self.setKey, ["right", True]) 97 | self.accept("arrow_up", self.setKey, ["forward", True]) 98 | self.accept("a", self.setKey, ["cam-left", True]) 99 | self.accept("s", self.setKey, ["cam-right", True]) 100 | self.accept("arrow_left-up", self.setKey, ["left", False]) 101 | self.accept("arrow_right-up", self.setKey, ["right", False]) 102 | self.accept("arrow_up-up", self.setKey, ["forward", False]) 103 | self.accept("a-up", self.setKey, ["cam-left", False]) 104 | self.accept("s-up", self.setKey, ["cam-right", False]) 105 | 106 | taskMgr.add(self.move, "moveTask") 107 | 108 | # Game state variables 109 | self.isMoving = False 110 | 111 | # Set up the camera 112 | self.disableMouse() 113 | self.camera.setPos(self.ralph.getX(), self.ralph.getY() + 10, 2) 114 | 115 | 116 | 117 | # Records the state of the arrow keys 118 | def setKey(self, key, value): 119 | self.keyMap[key] = value 120 | 121 | # Accepts arrow keys to move either the player or the menu cursor, 122 | # Also deals with grid checking and collision detection 123 | def move(self, task): 124 | 125 | startpos = self.ralph.getPos() 126 | 127 | # If a move-key is pressed, move ralph in the specified direction. 128 | 129 | if self.keyMap["left"]: 130 | self.ralph.setH(self.ralph.getH() + 300 * dt) 131 | # If ralph is moving, loop the run animation. 132 | 133 | 134 | 135 | return task.cont 136 | 137 | 138 | demo = RoamingRalphDemo() 139 | demo.run() 140 | -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/models/ground.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/roaming-ralph/models/ground.jpg -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/models/hedge.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/roaming-ralph/models/hedge.jpg -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/models/ralph-run.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/roaming-ralph/models/ralph-run.egg.pz -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/models/ralph-walk.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/roaming-ralph/models/ralph-walk.egg.pz -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/models/ralph.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/roaming-ralph/models/ralph.egg.pz -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/models/ralph.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/roaming-ralph/models/ralph.jpg -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/models/rock03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/roaming-ralph/models/rock03.jpg -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/models/tree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/roaming-ralph/models/tree.jpg -------------------------------------------------------------------------------- /Codes/3D/roaming-ralph/models/world.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/3D/roaming-ralph/models/world.egg.pz -------------------------------------------------------------------------------- /Codes/3D/test.py: -------------------------------------------------------------------------------- 1 | import direct.directbase.DirectStart 2 | from direct.gui.OnscreenText import OnscreenText 3 | from direct.gui.DirectGui import * 4 | from panda3d.core import * 5 | 6 | v = [0] 7 | # Add some text 8 | bk_text = "This is my Demo" 9 | textObject = OnscreenText(text = bk_text, pos = (0.95,-0.95), 10 | scale = 0.07,fg=(1,0.5,0.5,1),align=TextNode.ACenter,mayChange=1) 11 | 12 | # Callback function to set text 13 | def setText(status=None): 14 | bk_text = "CurrentValue : %s"%v 15 | textObject.setText(bk_text) 16 | 17 | # Add button 18 | buttons = [ 19 | DirectRadioButton(text = 'RadioButton0', variable=v, value=[0], scale=0.05, pos=(-0.4,0,0), command=setText), 20 | DirectRadioButton(text = 'RadioButton1', variable=v, value=[1], scale=0.05, pos=(0,0,0), command=setText), 21 | DirectRadioButton(text = 'RadioButton2', variable=v, value=[2], scale=0.05, pos=(0.4,0,0), command=setText) 22 | ] 23 | 24 | for button in buttons: 25 | button.setOthers(buttons) 26 | 27 | # Run the tutorial 28 | run() -------------------------------------------------------------------------------- /Codes/Abhi/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/.DS_Store -------------------------------------------------------------------------------- /Codes/Abhi/3D/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/.DS_Store -------------------------------------------------------------------------------- /Codes/Abhi/3D/Hello_world_panda.py: -------------------------------------------------------------------------------- 1 | from direct.showbase.ShowBase import ShowBase 2 | 3 | class MyApp(ShowBase): 4 | 5 | def __init__(self): 6 | ShowBase.__init__(self) 7 | 8 | # Load the environment model. 9 | self.scene = self.loader.loadModel("environment") 10 | # Reparent the model to render. 11 | self.scene.reparentTo(self.render) 12 | # Apply scale and position transforms on the model. 13 | self.scene.setScale(0.25, 0.25, 0.25) 14 | self.scene.setPos(-8, 42, 0) 15 | 16 | 17 | app = MyApp() 18 | app.run() -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/ManualPage.url: -------------------------------------------------------------------------------- 1 | [InternetShortcut] 2 | URL=http://panda3d.org/wiki/index.php/Sample_Programs:_carousel 3 | -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/Run main.py.lnk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/Run main.py.lnk -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/env_sky.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/env_sky.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/main.py: -------------------------------------------------------------------------------- 1 | from direct.showbase.ShowBase import ShowBase 2 | from panda3d.core import AmbientLight, DirectionalLight, LightAttrib 3 | from panda3d.core import NodePath 4 | from panda3d.core import LVector3 5 | from direct.interval.IntervalGlobal import * # Needed to use Intervals 6 | from direct.gui.DirectGui import * 7 | from direct.gui.OnscreenImage import OnscreenImage 8 | from panda3d.core import TransparencyAttrib 9 | # Kinect Libraries 10 | from pykinect2 import PyKinectV2 11 | from pykinect2.PyKinectV2 import * 12 | from pykinect2 import PyKinectRuntime 13 | 14 | import sys 15 | import os 16 | import numpy as np 17 | import cv2 18 | 19 | # Importing math constants and functions 20 | from math import pi, sin, cos 21 | 22 | 23 | class CarouselDemo(ShowBase): 24 | 25 | def __init__(self): 26 | # Initialize the ShowBase class from which we inherit, which will 27 | # create a window and set up everything we need for rendering into it. 28 | ShowBase.__init__(self) 29 | 30 | base.disableMouse() # Allow manual positioning of the camera 31 | camera.setPos(0, -10, 1) # Set the cameras' position 32 | camera.setHpr(0, 0, 0) # and orientation 33 | 34 | self.keyMap = { 35 | "left": 0, "right": 0, "up": 0, "down": 0} 36 | 37 | taskMgr.add(self.startCarousel, "moveTask") 38 | 39 | imageObject = OnscreenImage(image = 'env_sky.jpg', pos = (-10,0,-10)) 40 | imageObject.setImage('env_sky.jpg') 41 | imageObject.setTransparency(TransparencyAttrib.MAlpha) 42 | 43 | self.loadModels() # Load and position our models 44 | self.setupLights() # Add some basic lighting 45 | 46 | 47 | self.accept("escape", sys.exit) 48 | self.accept("arrow_left", self.setKey, ["left", True]) 49 | self.accept("arrow_right", self.setKey, ["right", True]) 50 | self.accept("arrow_up", self.setKey, ["up", True]) 51 | self.accept("arrow_down", self.setKey, ["down", True]) 52 | self.accept("arrow_left-up", self.setKey, ["left", False]) 53 | self.accept("arrow_right-up", self.setKey, ["right", False]) 54 | self.accept("arrow_up-up", self.setKey, ["up", False]) 55 | self.accept("arrow_down-up", self.setKey, ["down", False]) 56 | 57 | 58 | def setKey(self, key, value): 59 | self.keyMap[key] = value 60 | 61 | def loadModels(self): 62 | # Load the carousel base 63 | self.carousel = loader.loadModel("models/carousel_base") 64 | self.carousel.reparentTo(render) # Attach it to render 65 | 66 | 67 | def setupLights(self): 68 | ambientLight = AmbientLight("ambientLight") 69 | ambientLight.setColor((1,1,1, 1)) 70 | render.setLight(render.attachNewNode(ambientLight)) 71 | 72 | def startCarousel(self,task): 73 | h = self.carousel.getH() 74 | p = self.carousel.getP() 75 | 76 | if self.keyMap["left"]: 77 | self.carouselSpin = self.carousel.setH(h+1) 78 | if self.keyMap["right"]: 79 | self.carouselSpin = self.carousel.setH(h-1) 80 | if self.keyMap["up"]: 81 | self.carouselSpin = self.carousel.setP(p-1) 82 | if self.keyMap["down"]: 83 | self.carouselSpin = self.carousel.setP(p+1) 84 | 85 | return task.cont 86 | 87 | 88 | def startCarousels(self,task): 89 | if self.keyMap["left"]: 90 | angleDegrees = task.time * 100.0 91 | angleRadians = angleDegrees * (pi / 180.0) 92 | self.camera.setPos(10 * sin(angleRadians), -10.0 * cos(angleRadians), 2) 93 | self.camera.setHpr(angleDegrees, 0, 0) 94 | 95 | if self.keyMap["right"]: 96 | angleDegrees = task.time * 10.0 97 | angleRadians = angleDegrees * (pi / 180.0) 98 | self.camera.setPos(10 * sin(angleRadians), -10.0 * cos(angleRadians), 2) 99 | self.camera.setHpr(angleDegrees, 0, 0) 100 | return task.cont 101 | 102 | demo = CarouselDemo() 103 | demo.run() -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/models/carousel_base.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/models/carousel_base.egg.pz -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/models/carousel_base.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/models/carousel_base.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/models/carousel_lights.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/models/carousel_lights.egg.pz -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/models/carousel_lights_off.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/models/carousel_lights_off.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/models/carousel_lights_on.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/models/carousel_lights_on.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/models/carousel_panda.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/models/carousel_panda.egg.pz -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/models/carousel_panda.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/models/carousel_panda.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/models/env.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/models/env.egg.pz -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/models/env_ground.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/models/env_ground.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/carousel/models/env_sky.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/carousel/models/env_sky.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/glow-filter/advanced.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Author: Kwasi Mensah (kmensah@andrew.cmu.edu) 4 | # Date: 7/25/2005 5 | 6 | from direct.showbase.ShowBase import ShowBase 7 | from panda3d.core import Filename, Shader 8 | from panda3d.core import PandaNode, NodePath 9 | from panda3d.core import ColorBlendAttrib 10 | from panda3d.core import AmbientLight, DirectionalLight 11 | from panda3d.core import TextNode, LPoint3, LVector4 12 | from direct.showbase.DirectObject import DirectObject 13 | from direct.gui.OnscreenText import OnscreenText 14 | from direct.actor.Actor import Actor 15 | import sys 16 | import os 17 | 18 | # Function to put instructions on the screen. 19 | def addInstructions(pos, msg): 20 | return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), 21 | parent=base.a2dTopLeft, align=TextNode.ALeft, 22 | pos=(0.08, -pos - 0.04), scale=.05) 23 | 24 | # Function to put title on the screen. 25 | def addTitle(text): 26 | return OnscreenText(text=text, style=1, fg=(1, 1, 1, 1), 27 | parent=base.a2dBottomRight, align=TextNode.ARight, 28 | pos=(-0.1, 0.09), scale=.08) 29 | 30 | 31 | # This function is responsible for setting up the two blur filters. 32 | # It just makes a temp Buffer, puts a screen aligned card, and then sets 33 | # the appropiate shader to do all the work. Gaussian blurs are decomposable 34 | # into a two-pass algorithm which is faster than the equivalent one-pass 35 | # algorithm, so we do it in two passes: one pass that blurs in the horizontal 36 | # direction, and one in the vertical direction. 37 | def makeFilterBuffer(srcbuffer, name, sort, prog): 38 | blurBuffer = base.win.makeTextureBuffer(name, 512, 512) 39 | blurBuffer.setSort(sort) 40 | blurBuffer.setClearColor(LVector4(1, 0, 0, 1)) 41 | blurCamera = base.makeCamera2d(blurBuffer) 42 | blurScene = NodePath("new Scene") 43 | blurCamera.node().setScene(blurScene) 44 | shader = loader.loadShader(prog) 45 | card = srcbuffer.getTextureCard() 46 | card.reparentTo(blurScene) 47 | card.setShader(shader) 48 | return blurBuffer 49 | 50 | 51 | class GlowDemo(ShowBase): 52 | 53 | def __init__(self): 54 | # Initialize the ShowBase class from which we inherit, which will 55 | # create a window and set up everything we need for rendering into it. 56 | ShowBase.__init__(self) 57 | 58 | base.disableMouse() 59 | base.setBackgroundColor(0, 0, 0) 60 | camera.setPos(0, -50, 0) 61 | 62 | # Check video card capabilities. 63 | if not base.win.getGsg().getSupportsBasicShaders(): 64 | addTitle( 65 | "Glow Filter: Video driver reports that Cg shaders are not supported.") 66 | return 67 | 68 | # Post the instructions 69 | self.title = addTitle("Panda3D: Tutorial - Glow Filter") 70 | self.inst1 = addInstructions(0.06, "ESC: Quit") 71 | self.inst2 = addInstructions(0.12, "Space: Toggle Glow Filter On/Off") 72 | self.inst3 = addInstructions(0.18, "Enter: Toggle Running/Spinning") 73 | self.inst4 = addInstructions(0.24, "V: View the render-to-texture results") 74 | 75 | # Create the shader that will determime what parts of the scene will 76 | # glow 77 | glowShader = loader.loadShader("shaders/glowShader.sha") 78 | 79 | # load our model 80 | self.tron = Actor() 81 | self.tron.loadModel("models/tron") 82 | self.tron.loadAnims({"running": "models/tron_anim"}) 83 | self.tron.reparentTo(render) 84 | self.interval = self.tron.hprInterval(60, LPoint3(360, 0, 0)) 85 | self.interval.loop() 86 | self.isRunning = False 87 | 88 | # put some lighting on the tron model 89 | dlight = DirectionalLight('dlight') 90 | alight = AmbientLight('alight') 91 | dlnp = render.attachNewNode(dlight) 92 | alnp = render.attachNewNode(alight) 93 | dlight.setColor(LVector4(1.0, 0.7, 0.2, 1)) 94 | alight.setColor(LVector4(0.2, 0.2, 0.2, 1)) 95 | dlnp.setHpr(0, -60, 0) 96 | render.setLight(dlnp) 97 | render.setLight(alnp) 98 | 99 | # create the glow buffer. This buffer renders like a normal scene, 100 | # except that only the glowing materials should show up nonblack. 101 | glowBuffer = base.win.makeTextureBuffer("Glow scene", 512, 512) 102 | glowBuffer.setSort(-3) 103 | glowBuffer.setClearColor(LVector4(0, 0, 0, 1)) 104 | 105 | # We have to attach a camera to the glow buffer. The glow camera 106 | # must have the same frustum as the main camera. As long as the aspect 107 | # ratios match, the rest will take care of itself. 108 | glowCamera = base.makeCamera( 109 | glowBuffer, lens=base.cam.node().getLens()) 110 | 111 | # Tell the glow camera to use the glow shader 112 | tempnode = NodePath(PandaNode("temp node")) 113 | tempnode.setShader(glowShader) 114 | glowCamera.node().setInitialState(tempnode.getState()) 115 | 116 | # set up the pipeline: from glow scene to blur x to blur y to main 117 | # window. 118 | blurXBuffer = makeFilterBuffer( 119 | glowBuffer, "Blur X", -2, "shaders/XBlurShader.sha") 120 | blurYBuffer = makeFilterBuffer( 121 | blurXBuffer, "Blur Y", -1, "shaders/YBlurShader.sha") 122 | self.finalcard = blurYBuffer.getTextureCard() 123 | self.finalcard.reparentTo(render2d) 124 | 125 | # This attribute is used to add the results of the post-processing 126 | # effects to the existing framebuffer image, rather than replace it. 127 | # This is mainly useful for glow effects like ours. 128 | self.finalcard.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd)) 129 | 130 | # Panda contains a built-in viewer that lets you view the results of 131 | # your render-to-texture operations. This code configures the viewer. 132 | self.accept("v", base.bufferViewer.toggleEnable) 133 | self.accept("V", base.bufferViewer.toggleEnable) 134 | base.bufferViewer.setPosition("llcorner") 135 | base.bufferViewer.setLayout("hline") 136 | base.bufferViewer.setCardSize(0.652, 0) 137 | 138 | # event handling 139 | self.accept("space", self.toggleGlow) 140 | self.accept("enter", self.toggleDisplay) 141 | self.accept("escape", sys.exit, [0]) 142 | 143 | self.glowOn = True 144 | 145 | def toggleGlow(self): 146 | if self.glowOn: 147 | self.finalcard.reparentTo(hidden) 148 | else: 149 | self.finalcard.reparentTo(render2d) 150 | self.glowOn = not(self.glowOn) 151 | 152 | def toggleDisplay(self): 153 | self.isRunning = not(self.isRunning) 154 | if not(self.isRunning): 155 | camera.setPos(0, -50, 0) 156 | self.tron.stop("running") 157 | self.tron.pose("running", 0) 158 | self.interval.loop() 159 | else: 160 | camera.setPos(0, -170, 3) 161 | self.interval.finish() 162 | self.tron.setHpr(0, 0, 0) 163 | self.tron.loop("running") 164 | 165 | demo = GlowDemo() 166 | demo.run() 167 | -------------------------------------------------------------------------------- /Codes/Abhi/3D/glow-filter/basic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Author: Kwasi Mensah (kmensah@andrew.cmu.edu) 4 | # Date: 7/25/2005 5 | 6 | from direct.showbase.ShowBase import ShowBase 7 | from panda3d.core import Filename, Shader 8 | from panda3d.core import PandaNode, NodePath 9 | from panda3d.core import AmbientLight, DirectionalLight 10 | from panda3d.core import TextNode, LPoint3 11 | from direct.showbase.DirectObject import DirectObject 12 | from direct.filter.CommonFilters import CommonFilters 13 | from direct.gui.OnscreenText import OnscreenText 14 | from direct.actor.Actor import Actor 15 | import sys 16 | import os 17 | 18 | # Function to put instructions on the screen. 19 | def addInstructions(pos, msg): 20 | return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), 21 | parent=base.a2dTopLeft, align=TextNode.ALeft, 22 | pos=(0.08, -pos - 0.04), scale=.05) 23 | 24 | # Function to put title on the screen. 25 | def addTitle(text): 26 | return OnscreenText(text=text, style=1, fg=(1, 1, 1, 1), 27 | parent=base.a2dBottomRight, align=TextNode.ARight, 28 | pos=(-0.1, 0.09), scale=.08) 29 | 30 | 31 | class GlowDemo(ShowBase): 32 | def __init__(self): 33 | # Initialize the ShowBase class from which we inherit, which will 34 | # create a window and set up everything we need for rendering into it. 35 | ShowBase.__init__(self) 36 | 37 | base.disableMouse() 38 | base.setBackgroundColor(0, 0, 0) 39 | camera.setPos(0, -50, 0) 40 | 41 | # Check video card capabilities. 42 | if not base.win.getGsg().getSupportsBasicShaders(): 43 | addTitle( 44 | "Glow Filter: Video driver reports that Cg shaders are not supported.") 45 | return 46 | 47 | # Use class 'CommonFilters' to enable a bloom filter. 48 | # The brightness of a pixel is measured using a weighted average 49 | # of R,G,B,A. We put all the weight on Alpha, meaning that for 50 | # us, the framebuffer's alpha channel alpha controls bloom. 51 | 52 | self.filters = CommonFilters(base.win, base.cam) 53 | filterok = self.filters.setBloom( 54 | blend=(0, 0, 0, 1), desat=-0.5, intensity=3.0, size="small") 55 | if (filterok == False): 56 | addTitle( 57 | "Toon Shader: Video card not powerful enough to do image postprocessing") 58 | return 59 | self.glowSize = 1 60 | 61 | # Post the instructions 62 | self.title = addTitle("Panda3D: Tutorial - Glow Filter") 63 | self.inst1 = addInstructions(0.06, "ESC: Quit") 64 | self.inst2 = addInstructions(0.12, "Space: Toggle Glow Filter Small/Med/Large/Off") 65 | self.inst3 = addInstructions(0.18, "Enter: Toggle Running/Spinning") 66 | self.inst4 = addInstructions(0.24, "V: View the render-to-texture results") 67 | 68 | # load our model 69 | 70 | self.tron = Actor() 71 | self.tron.loadModel("models/tron") 72 | self.tron.loadAnims({"running": "models/tron_anim"}) 73 | self.tron.reparentTo(render) 74 | self.interval = self.tron.hprInterval(60, LPoint3(360, 0, 0)) 75 | self.interval.loop() 76 | self.isRunning = False 77 | 78 | # put some lighting on the model 79 | 80 | dlight = DirectionalLight('dlight') 81 | alight = AmbientLight('alight') 82 | dlnp = render.attachNewNode(dlight) 83 | alnp = render.attachNewNode(alight) 84 | dlight.setColor((1.0, 0.7, 0.2, 1)) 85 | alight.setColor((0.2, 0.2, 0.2, 1)) 86 | dlnp.setHpr(0, -60, 0) 87 | render.setLight(dlnp) 88 | render.setLight(alnp) 89 | 90 | # Panda contains a built-in viewer that lets you view the results of 91 | # your render-to-texture operations. This code configures the viewer. 92 | self.accept("v", base.bufferViewer.toggleEnable) 93 | self.accept("V", base.bufferViewer.toggleEnable) 94 | base.bufferViewer.setPosition("llcorner") 95 | base.bufferViewer.setLayout("hline") 96 | # base.camLens.setFov(100) 97 | # event handling 98 | self.accept("space", self.toggleGlow) 99 | self.accept("enter", self.toggleDisplay) 100 | self.accept("escape", sys.exit, [0]) 101 | 102 | def toggleGlow(self): 103 | self.glowSize = self.glowSize + 1 104 | if self.glowSize == 4: 105 | self.glowSize = 0 106 | self.filters.setBloom(blend=(0, 0, 0, 1), desat=-0.5, intensity=3.0, 107 | size=self.glowSize) 108 | 109 | def toggleDisplay(self): 110 | self.isRunning = not self.isRunning 111 | if not self.isRunning: 112 | camera.setPos(0, -50, 0) 113 | self.tron.stop("running") 114 | self.tron.pose("running", 0) 115 | self.interval.loop() 116 | else: 117 | camera.setPos(0, -170, 3) 118 | self.interval.finish() 119 | self.tron.setHpr(0, 0, 0) 120 | self.tron.loop("running") 121 | 122 | demo = GlowDemo() 123 | demo.run() 124 | -------------------------------------------------------------------------------- /Codes/Abhi/3D/glow-filter/models/tron-color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/glow-filter/models/tron-color.png -------------------------------------------------------------------------------- /Codes/Abhi/3D/glow-filter/models/tron-glow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/glow-filter/models/tron-glow.png -------------------------------------------------------------------------------- /Codes/Abhi/3D/glow-filter/models/tron.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/glow-filter/models/tron.egg.pz -------------------------------------------------------------------------------- /Codes/Abhi/3D/glow-filter/models/tron_anim.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/glow-filter/models/tron_anim.egg.pz -------------------------------------------------------------------------------- /Codes/Abhi/3D/glow-filter/shaders/XBlurShader.sha: -------------------------------------------------------------------------------- 1 | //Cg 2 | // 3 | //Cg profile arbvp1 arbfp1 4 | 5 | void vshader(float4 vtx_position : POSITION, 6 | float2 vtx_texcoord0 : TEXCOORD0, 7 | out float4 l_position : POSITION, 8 | out float2 l_texcoord0 : TEXCOORD0, 9 | uniform float4x4 mat_modelproj) 10 | { 11 | l_position=mul(mat_modelproj, vtx_position); 12 | l_texcoord0=vtx_texcoord0; 13 | } 14 | 15 | 16 | void fshader(float2 l_texcoord0 : TEXCOORD0, 17 | out float4 o_color : COLOR, 18 | uniform sampler2D tex_0 : TEXUNIT0) 19 | { 20 | float3 offset = float3(1.0/1024.0, 5.0/1024.0, 9.0/1024.0); 21 | o_color = tex2D(tex_0, float2(l_texcoord0.x - offset.z, l_texcoord0.y)) * 5.0; 22 | o_color += tex2D(tex_0, float2(l_texcoord0.x - offset.y, l_texcoord0.y)) * 8.0; 23 | o_color += tex2D(tex_0, float2(l_texcoord0.x - offset.x, l_texcoord0.y)) * 10.0; 24 | o_color += tex2D(tex_0, float2(l_texcoord0.x + offset.x, l_texcoord0.y)) * 10.0; 25 | o_color += tex2D(tex_0, float2(l_texcoord0.x + offset.y, l_texcoord0.y)) * 8.0; 26 | o_color += tex2D(tex_0, float2(l_texcoord0.x + offset.z, l_texcoord0.y)) * 5.0; 27 | o_color = o_color * 0.030; 28 | } 29 | -------------------------------------------------------------------------------- /Codes/Abhi/3D/glow-filter/shaders/YBlurShader.sha: -------------------------------------------------------------------------------- 1 | //Cg 2 | // 3 | //Cg profile arbvp1 arbfp1 4 | 5 | void vshader(float4 vtx_position : POSITION, 6 | float2 vtx_texcoord0 : TEXCOORD0, 7 | out float4 l_position : POSITION, 8 | out float2 l_texcoord0 : TEXCOORD0, 9 | uniform float4x4 mat_modelproj) 10 | { 11 | l_position=mul(mat_modelproj, vtx_position); 12 | l_texcoord0=vtx_texcoord0; 13 | } 14 | 15 | 16 | void fshader(float2 l_texcoord0 : TEXCOORD0, 17 | out float4 o_color : COLOR, 18 | uniform sampler2D tex_0 : TEXUNIT0) 19 | { 20 | float3 offset = float3(1.0/1024.0, 5.0/1024.0, 9.0/1024.0); 21 | o_color = tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y - offset.z)) * 5.0; 22 | o_color += tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y - offset.y)) * 8.0; 23 | o_color += tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y - offset.x)) * 10.0; 24 | o_color += tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y + offset.x)) * 10.0; 25 | o_color += tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y + offset.y)) * 8.0; 26 | o_color += tex2D(tex_0, float2(l_texcoord0.x, l_texcoord0.y + offset.z)) * 5.0; 27 | o_color = o_color * 0.030; 28 | } 29 | 30 | 31 | -------------------------------------------------------------------------------- /Codes/Abhi/3D/glow-filter/shaders/glowShader.sha: -------------------------------------------------------------------------------- 1 | //Cg 2 | // 3 | 4 | void vshader(float4 vtx_position : POSITION, 5 | float2 vtx_texcoord0 : TEXCOORD0, 6 | uniform float4x4 mat_modelproj, 7 | out float4 l_position : POSITION, 8 | out float2 l_texcoord0 : TEXCOORD0) 9 | { 10 | l_position=mul(mat_modelproj, vtx_position); 11 | l_texcoord0=vtx_texcoord0; 12 | } 13 | 14 | void fshader(float2 l_texcoord0 : TEXCOORD0, 15 | uniform sampler2D tex_0 : TEXUNIT0, 16 | out float4 o_color : COLOR) 17 | { 18 | float4 texColor=tex2D(tex_0, l_texcoord0); 19 | o_color=texColor*2*(texColor.w - 0.5); 20 | } 21 | 22 | -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/ManualPage.url: -------------------------------------------------------------------------------- 1 | [InternetShortcut] 2 | URL=http://panda3d.org/wiki/index.php/Sample_Programs:_roaming_ralph 3 | -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/Run main.py.lnk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/roaming-ralph/Run main.py.lnk -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Author: Ryan Myers 4 | # Models: Jeff Styers, Reagan Heller 5 | # 6 | # Last Updated: 2015-03-13 7 | # 8 | # This tutorial provides an example of creating a character 9 | # and having it walk around on uneven terrain, as well 10 | # as implementing a fully rotatable camera. 11 | 12 | from direct.showbase.ShowBase import ShowBase 13 | from panda3d.core import CollisionTraverser, CollisionNode 14 | from panda3d.core import CollisionHandlerQueue, CollisionRay 15 | from panda3d.core import Filename, AmbientLight, DirectionalLight 16 | from panda3d.core import PandaNode, NodePath, Camera, TextNode 17 | from panda3d.core import CollideMask 18 | from direct.gui.OnscreenText import OnscreenText 19 | from direct.actor.Actor import Actor 20 | import random 21 | import sys 22 | import os 23 | import math 24 | 25 | # Function to put instructions on the screen. 26 | def addInstructions(pos, msg): 27 | return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), scale=.05, 28 | shadow=(0, 0, 0, 1), parent=base.a2dTopLeft, 29 | pos=(0.08, -pos - 0.04), align=TextNode.ALeft) 30 | 31 | # Function to put title on the screen. 32 | def addTitle(text): 33 | return OnscreenText(text=text, style=1, fg=(1, 1, 1, 1), scale=.07, 34 | parent=base.a2dBottomRight, align=TextNode.ARight, 35 | pos=(-0.1, 0.09), shadow=(0, 0, 0, 1)) 36 | 37 | 38 | class RoamingRalphDemo(ShowBase): 39 | def __init__(self): 40 | # Set up the window, camera, etc. 41 | ShowBase.__init__(self) 42 | 43 | # Set the background color to black 44 | self.win.setClearColor((0, 0, 0, 1)) 45 | 46 | # This is used to store which keys are currently pressed. 47 | self.keyMap = { 48 | "left": 0, "right": 0, "forward": 0, "cam-left": 0, "cam-right": 0} 49 | 50 | # Post the instructions 51 | self.title = addTitle( 52 | "Panda3D Tutorial: Roaming Ralph (Walking on Uneven Terrain)") 53 | self.inst1 = addInstructions(0.06, "[ESC]: Quit") 54 | self.inst2 = addInstructions(0.12, "[Left Arrow]: Rotate Ralph Left") 55 | self.inst3 = addInstructions(0.18, "[Right Arrow]: Rotate Ralph Right") 56 | self.inst4 = addInstructions(0.24, "[Up Arrow]: Run Ralph Forward") 57 | self.inst6 = addInstructions(0.30, "[A]: Rotate Camera Left") 58 | self.inst7 = addInstructions(0.36, "[S]: Rotate Camera Right") 59 | 60 | # Set up the environment 61 | # 62 | # This environment model contains collision meshes. If you look 63 | # in the egg file, you will see the following: 64 | # 65 | # { Polyset keep descend } 66 | # 67 | # This tag causes the following mesh to be converted to a collision 68 | # mesh -- a mesh which is optimized for collision, not rendering. 69 | # It also keeps the original mesh, so there are now two copies --- 70 | # one optimized for rendering, one for collisions. 71 | 72 | self.environ = loader.loadModel("models/world") 73 | self.environ.reparentTo(render) 74 | 75 | # Create the main character, Ralph 76 | 77 | ralphStartPos = self.environ.find("**/start_point").getPos() 78 | self.ralph = Actor("models/ralph", 79 | {"run": "models/ralph-run", 80 | "walk": "models/ralph-walk"}) 81 | self.ralph.reparentTo(render) 82 | self.ralph.setScale(.2) 83 | self.ralph.setPos(ralphStartPos + (0, 0, 0.5)) 84 | 85 | # Create a floater object, which floats 2 units above ralph. We 86 | # use this as a target for the camera to look at. 87 | 88 | self.floater = NodePath(PandaNode("floater")) 89 | self.floater.reparentTo(self.ralph) 90 | self.floater.setZ(2.0) 91 | 92 | # Accept the control keys for movement and rotation 93 | 94 | self.accept("escape", sys.exit) 95 | self.accept("arrow_left", self.setKey, ["left", True]) 96 | self.accept("arrow_right", self.setKey, ["right", True]) 97 | self.accept("arrow_up", self.setKey, ["forward", True]) 98 | self.accept("a", self.setKey, ["cam-left", True]) 99 | self.accept("s", self.setKey, ["cam-right", True]) 100 | self.accept("arrow_left-up", self.setKey, ["left", False]) 101 | self.accept("arrow_right-up", self.setKey, ["right", False]) 102 | self.accept("arrow_up-up", self.setKey, ["forward", False]) 103 | self.accept("a-up", self.setKey, ["cam-left", False]) 104 | self.accept("s-up", self.setKey, ["cam-right", False]) 105 | 106 | taskMgr.add(self.move, "moveTask") 107 | 108 | # Game state variables 109 | self.isMoving = False 110 | 111 | # Set up the camera 112 | self.disableMouse() 113 | self.camera.setPos(self.ralph.getX(), self.ralph.getY() + 10, 2) 114 | 115 | 116 | 117 | # Records the state of the arrow keys 118 | def setKey(self, key, value): 119 | self.keyMap[key] = value 120 | 121 | # Accepts arrow keys to move either the player or the menu cursor, 122 | # Also deals with grid checking and collision detection 123 | def move(self, task): 124 | 125 | startpos = self.ralph.getPos() 126 | 127 | # If a move-key is pressed, move ralph in the specified direction. 128 | 129 | if self.keyMap["left"]: 130 | self.ralph.setH(self.ralph.getH() + 300 * dt) 131 | # If ralph is moving, loop the run animation. 132 | 133 | 134 | 135 | return task.cont 136 | 137 | 138 | demo = RoamingRalphDemo() 139 | demo.run() 140 | -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/models/ground.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/roaming-ralph/models/ground.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/models/hedge.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/roaming-ralph/models/hedge.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/models/ralph-run.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/roaming-ralph/models/ralph-run.egg.pz -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/models/ralph-walk.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/roaming-ralph/models/ralph-walk.egg.pz -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/models/ralph.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/roaming-ralph/models/ralph.egg.pz -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/models/ralph.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/roaming-ralph/models/ralph.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/models/rock03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/roaming-ralph/models/rock03.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/models/tree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/roaming-ralph/models/tree.jpg -------------------------------------------------------------------------------- /Codes/Abhi/3D/roaming-ralph/models/world.egg.pz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/3D/roaming-ralph/models/world.egg.pz -------------------------------------------------------------------------------- /Codes/Abhi/3D/test.py: -------------------------------------------------------------------------------- 1 | import direct.directbase.DirectStart 2 | from direct.gui.OnscreenText import OnscreenText 3 | from direct.gui.DirectGui import * 4 | from panda3d.core import * 5 | 6 | v = [0] 7 | # Add some text 8 | bk_text = "This is my Demo" 9 | textObject = OnscreenText(text = bk_text, pos = (0.95,-0.95), 10 | scale = 0.07,fg=(1,0.5,0.5,1),align=TextNode.ACenter,mayChange=1) 11 | 12 | # Callback function to set text 13 | def setText(status=None): 14 | bk_text = "CurrentValue : %s"%v 15 | textObject.setText(bk_text) 16 | 17 | # Add button 18 | buttons = [ 19 | DirectRadioButton(text = 'RadioButton0', variable=v, value=[0], scale=0.05, pos=(-0.4,0,0), command=setText), 20 | DirectRadioButton(text = 'RadioButton1', variable=v, value=[1], scale=0.05, pos=(0,0,0), command=setText), 21 | DirectRadioButton(text = 'RadioButton2', variable=v, value=[2], scale=0.05, pos=(0.4,0,0), command=setText) 22 | ] 23 | 24 | for button in buttons: 25 | button.setOthers(buttons) 26 | 27 | # Run the tutorial 28 | run() -------------------------------------------------------------------------------- /Codes/Abhi/Hand_segmentation_abhi.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | from mpl_toolkits.mplot3d import Axes3D 6 | import scipy 7 | import numpy as np 8 | import cv2 9 | 10 | class HandGestureObjectClass(object): 11 | def __init__(self): 12 | 13 | # Kinect runtime object, we want only color and body frames 14 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 15 | 16 | self._bodies = None 17 | 18 | 19 | def neighbourhood(self, array, radius, seed): 20 | 21 | neighbour = np.array(array) 22 | neighbour *= 0 23 | 24 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 25 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 26 | # temp = cv2.Color(temp,cv2.COLOR_GRAY2RGB) 27 | # mask = np.zeros(np.shape(temp), dtype = np.uint8) 28 | # mask[radius,radius] = 1 29 | # bgdModel = np.zeros((1,65),np.float64) 30 | # fgdModel = np.zeros((1,65),np.float64) 31 | # rect = (0,0,100,100) 32 | # mask, bgdModel, fgdModel=cv2.grabCut(temp,mask,rect,bgdModel,fgdModel,4,mode = cv2.GC_INIT_WITH_RECT) 33 | # print np.shape(mask) 34 | # temp = np.bitwise_and(mask,temp) 35 | return temp 36 | 37 | def merge(self, array_big, array_small, seed ): 38 | [a,b] = np.shape(array_small) 39 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 40 | return array_big 41 | 42 | def max_hist_depth(self, frame): 43 | #print 'FRAME_MAX = ' + str(frame.max()) 44 | binaries = int(frame.max()) 45 | if binaries <= 0: 46 | return 0 47 | histogram, bins = np.histogram(frame, bins = binaries) 48 | histogram = histogram.tolist(); bins = bins.tolist(); 49 | histogram[0 : 1] = [0, 0] 50 | max_hist = bins[histogram.index( max(histogram) )] 51 | return max_hist 52 | 53 | def run(self): 54 | print_frame=None 55 | 56 | # -------- Main Program Loop ----------- 57 | while (True): 58 | # --- Main event loop 59 | 60 | if self._kinect.has_new_body_frame(): 61 | print 'has body' 62 | depth_frame = self._kinect.get_last_depth_frame() 63 | 64 | depth_frame = np.array(depth_frame/16, dtype= np.uint8) 65 | depth_frame = depth_frame.reshape(424,512) 66 | 67 | self._bodies = self._kinect.get_last_body_frame() 68 | 69 | if self._bodies is not None: 70 | #first detected body taken 71 | body = self._bodies.bodies[0] 72 | if not body.is_tracked: 73 | continue 74 | 75 | joints = body.joints 76 | 77 | # convert joint coordinates to color space 78 | joint_points = self._kinect.body_joints_to_depth_space(joints) 79 | 80 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 81 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 82 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 83 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 84 | 85 | right_x = right_x if right_x < 424 else 423 86 | right_y = right_y if right_y < 512 else 511 87 | left_x = left_x if left_x < 424 else 423 88 | left_y = left_y if left_y < 512 else 511 89 | 90 | right_hand_depth = depth_frame[right_x,right_y] 91 | left_hand_depth = depth_frame[left_x,left_y] 92 | print 'ld:' + str(left_hand_depth)+'\trd:' + str(right_hand_depth) 93 | 94 | right_hand = [right_x,right_y] 95 | left_hand = [left_x,left_y] 96 | 97 | #print type(c) 98 | 99 | d = 50 100 | if depth_frame != None: 101 | right_hand_filtered = self.neighbourhood(depth_frame,d,right_hand) 102 | left_hand_filtered = self.neighbourhood(depth_frame,d,left_hand) 103 | 104 | 105 | neighbour = np.array(depth_frame) 106 | neighbour *= 0 107 | 108 | right_hand_filtered_depth_frame = self.merge(neighbour, right_hand_filtered,right_hand) 109 | left_hand_filtered_depth_frame = self.merge(neighbour, left_hand_filtered, left_hand) 110 | 111 | 112 | # right_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, right_hand_filtered,right_hand),depth_frame) 113 | # left_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, left_hand_filtered, left_hand),depth_frame) 114 | # ret,right_hand_filtered_depth_frame = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY) 115 | # ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY) 116 | 117 | print_frame = right_hand_filtered_depth_frame+left_hand_filtered_depth_frame 118 | 119 | 120 | 121 | if print_frame != None: 122 | dpt = depth_frame 123 | cv2.imshow('Hand Filtered',print_frame) 124 | cv2.imshow('OG',depth_frame) 125 | # fig = plt.figure() 126 | # ax = fig.add_subplot(111, projection = '3d') 127 | # ax.plot([1,2,3,4,5],[1,2,3,4,5],[1,2,3,4,5]) 128 | # plt.show() 129 | 130 | if cv2.waitKey(1) & 0xFF == ord('q'): 131 | break 132 | 133 | 134 | 135 | 136 | 137 | # Close our Kinect sensor, close the window and quit. 138 | self._kinect.close() 139 | 140 | 141 | 142 | HandGestureObject = HandGestureObjectClass(); 143 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Abhi/README.doc: -------------------------------------------------------------------------------- 1 | Tried Algorithms: 2 | :Filtering 3 | -GrabCut: Failed (slow and inaccurate) 4 | -Watershed: In Progress (Idiotic docs, seems buggy) -------------------------------------------------------------------------------- /Codes/Abhi/feature: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/feature -------------------------------------------------------------------------------- /Codes/Abhi/feature_closed_hand: -------------------------------------------------------------------------------- 1 | [3, 83] 2 | [3, 97] 3 | [3, 80] 4 | [3, 41] 5 | [3, 49] 6 | [3, 42] 7 | [2, 56] 8 | [3, 55] 9 | [2, 53] 10 | [2, 93] 11 | [3, 42] 12 | [2, 108] 13 | [4, 101] 14 | [3, 123] 15 | [3, 97] 16 | [3, 110] 17 | [3, 83] 18 | [3, 80] 19 | [3, 79] 20 | [3, 96] 21 | [3, 87] 22 | [2, 66] 23 | [2, 57] 24 | [3, 65] 25 | [2, 68] 26 | [3, 73] 27 | [3, 58] 28 | [3, 75] 29 | [3, 65] 30 | [5, 66] 31 | [3, 60] 32 | [3, 62] 33 | [3, 56] 34 | [2, 25] 35 | [2, 35] 36 | [2, 40] 37 | [3, 38] 38 | [3, 51] 39 | [2, 42] 40 | [2, 87] 41 | [2, 105] 42 | [3, 136] 43 | [2, 172] 44 | [2, 97] 45 | [3, 38] 46 | [3, 47] 47 | [3, 40] 48 | [3, 38] 49 | [2, 128] 50 | [3, 113] -------------------------------------------------------------------------------- /Codes/Abhi/feature_open_hand: -------------------------------------------------------------------------------- 1 | [7, 246] 2 | [4, 113] 3 | [6, 244] 4 | [6, 248] 5 | [6, 245] 6 | [5, 234] 7 | [5, 253] 8 | [7, 253] 9 | [6, 251] 10 | [5, 255] 11 | [6, 272] 12 | [6, 271] 13 | [6, 277] 14 | [7, 269] 15 | [7, 276] 16 | [5, 274] 17 | [6, 290] 18 | [5, 277] 19 | [7, 290] 20 | [6, 288] 21 | [5, 285] 22 | [4, 292] 23 | [4, 304] 24 | [5, 296] 25 | [5, 307] 26 | [5, 324] 27 | [7, 336] 28 | [6, 344] 29 | [6, 349] 30 | [4, 375] 31 | [5, 376] 32 | [6, 402] 33 | [6, 406] 34 | [5, 412] 35 | [5, 407] 36 | [5, 418] 37 | [5, 432] 38 | [6, 393] 39 | [5, 372] 40 | [5, 375] 41 | [6, 371] 42 | [6, 375] 43 | [5, 359] 44 | [5, 393] 45 | [6, 424] 46 | [6, 405] 47 | [5, 432] 48 | [5, 431] 49 | [6, 439] 50 | [5, 444] -------------------------------------------------------------------------------- /Codes/Abhi/hand_body.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Abhi/hand_body.jpg -------------------------------------------------------------------------------- /Codes/Abhi/nn.pkl: -------------------------------------------------------------------------------- 1 | ccopy_reg 2 | _reconstructor 3 | p0 4 | (csknn.mlp 5 | Classifier 6 | p1 7 | c__builtin__ 8 | object 9 | p2 10 | Ntp3 11 | Rp4 12 | (dp5 13 | S'loss_type' 14 | p6 15 | NsS'dropout_rate' 16 | p7 17 | NsS'verbose' 18 | p8 19 | NsS'valid_set' 20 | p9 21 | NsS'regularize' 22 | p10 23 | NsS'learning_rate' 24 | p11 25 | F0.5 26 | sS'batch_size' 27 | p12 28 | I1 29 | sS'valid_size' 30 | p13 31 | F0.0 32 | sS'debug' 33 | p14 34 | I00 35 | sS'learning_momentum' 36 | p15 37 | F0.9 38 | sS'learning_rule' 39 | p16 40 | Vsgd 41 | p17 42 | sS'unit_counts' 43 | p18 44 | (lp19 45 | I2 46 | aI2 47 | aI2 48 | asS'weight_decay' 49 | p20 50 | NsS'layers' 51 | p21 52 | (lp22 53 | g0 54 | (csknn.nn 55 | Layer 56 | p23 57 | g2 58 | Ntp24 59 | Rp25 60 | (dp26 61 | S'name' 62 | p27 63 | Vhidden0 64 | p28 65 | sS'frozen' 66 | p29 67 | I00 68 | sS'dropout' 69 | p30 70 | NsS'pieces' 71 | p31 72 | NsS'units' 73 | p32 74 | I2 75 | sg20 76 | NsS'type' 77 | p33 78 | S'Sigmoid' 79 | p34 80 | sbag0 81 | (g23 82 | g2 83 | Ntp35 84 | Rp36 85 | (dp37 86 | g27 87 | Voutput 88 | p38 89 | sg29 90 | I00 91 | sg30 92 | Nsg31 93 | Nsg32 94 | I2 95 | sg20 96 | Nsg33 97 | S'Softmax' 98 | p39 99 | sbasS'n_iter' 100 | p40 101 | I5 102 | sS'f_stable' 103 | p41 104 | F0.001 105 | sS'n_stable' 106 | p42 107 | I10 108 | sS'callback' 109 | p43 110 | NsS'random_state' 111 | p44 112 | NsS'weights' 113 | p45 114 | (lp46 115 | (cnumpy.core.multiarray 116 | _reconstruct 117 | p47 118 | (cnumpy 119 | ndarray 120 | p48 121 | (I0 122 | tp49 123 | S'b' 124 | p50 125 | tp51 126 | Rp52 127 | (I1 128 | (I2 129 | I2 130 | tp53 131 | cnumpy 132 | dtype 133 | p54 134 | (S'f8' 135 | p55 136 | I0 137 | I1 138 | tp56 139 | Rp57 140 | (I3 141 | S'<' 142 | p58 143 | NNNI-1 144 | I-1 145 | I0 146 | tp59 147 | bI00 148 | S'\x9a#\xf9]\x9c\x1c\x05@j\xfb\x02D\xa3\x18\x07\xc0;\x024}E\x81\x0b@N{q\xf7\xb4N\x10\xc0' 149 | p60 150 | tp61 151 | bg47 152 | (g48 153 | (I0 154 | tp62 155 | g50 156 | tp63 157 | Rp64 158 | (I1 159 | (I2 160 | tp65 161 | g57 162 | I00 163 | S'\x18\xa7d\xdf\xb7q\x04\xc0@\xa2\xbd/z%\x07@' 164 | p66 165 | tp67 166 | btp68 167 | a(g47 168 | (g48 169 | (I0 170 | tp69 171 | g50 172 | tp70 173 | Rp71 174 | (I1 175 | (I2 176 | I2 177 | tp72 178 | g57 179 | I00 180 | S'?H\x97r\xa6\x85\x0e\xc0\xc0J+\xca\xf9E\t@C\x14\x06\x92\xc4,\x0f@\xafu\x96p[\xac\x10\xc0' 181 | p73 182 | tp74 183 | bg47 184 | (g48 185 | (I0 186 | tp75 187 | g50 188 | tp76 189 | Rp77 190 | (I1 191 | (I2 192 | tp78 193 | g57 194 | I00 195 | S'\x8e\x84LBG\x86\xc4?m\x84LBG\x86\xc4\xbf' 196 | p79 197 | tp80 198 | btp81 199 | asS'label_binarizers' 200 | p82 201 | (lp83 202 | g0 203 | (csklearn.preprocessing.label 204 | LabelBinarizer 205 | p84 206 | g2 207 | Ntp85 208 | Rp86 209 | (dp87 210 | S'neg_label' 211 | p88 212 | I0 213 | sS'sparse_input_' 214 | p89 215 | I00 216 | sS'sparse_output' 217 | p90 218 | I00 219 | sS'classes_' 220 | p91 221 | g47 222 | (g48 223 | (I0 224 | tp92 225 | g50 226 | tp93 227 | Rp94 228 | (I1 229 | (I2 230 | tp95 231 | g54 232 | (S'i4' 233 | p96 234 | I0 235 | I1 236 | tp97 237 | Rp98 238 | (I3 239 | S'<' 240 | p99 241 | NNNI-1 242 | I-1 243 | I0 244 | tp100 245 | bI00 246 | S'\x00\x00\x00\x00\x01\x00\x00\x00' 247 | p101 248 | tp102 249 | bsS'y_type_' 250 | p103 251 | Vmulticlass 252 | p104 253 | sS'pos_label' 254 | p105 255 | I1 256 | sbasb. -------------------------------------------------------------------------------- /Codes/Abhi/test.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | from scipy import ndimage 6 | import numpy as np 7 | import cv2 8 | from os import system as cmd 9 | import math 10 | 11 | while(True): 12 | img = cv2.imread('hand_body.jpg',0) 13 | img = np.array(img, dtype = np.uint8) 14 | cv2.imshow('oring',img) 15 | val = img[40,40]+3 16 | 17 | img[img>val] /=3 18 | 19 | 20 | cv2.imshow('orig2',img) 21 | ret, img = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 22 | cv2.imshow('thresh', img) 23 | # im2, contours, hierarchy = cv2.findContours(img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 24 | 25 | # cv2.drawContours(img, contours, 3, 255, 3) 26 | # cv2.imshow('cont',img) 27 | 28 | if cv2.waitKey(1) & 0xFF == ord('q'): 29 | break -------------------------------------------------------------------------------- /Codes/Abhi/test_ANN.py: -------------------------------------------------------------------------------- 1 | # Import datasets, classifiers and performance metrics 2 | from sknn.mlp import Classifier, Layer 3 | import numpy as np 4 | import logging 5 | import pickle 6 | logging.basicConfig() 7 | 8 | # The digits dataset 9 | opt = [] 10 | f = open("feature_open_hand", "r+") 11 | ipt_open = f.read() 12 | f.close() 13 | ipt_open = ipt_open.split("\n") 14 | 15 | for i in range(0,len(ipt_open)-1): 16 | ipt_open[i] = ipt_open[i].strip("[]").split(",") 17 | # print ipt_open 18 | ipt_open[i][0] = float(ipt_open[i][0])/7 19 | ipt_open[i][1] = float(ipt_open[i][1])/500 20 | opt.append(1) 21 | 22 | f = open("feature_closed_hand", "r+") 23 | ipt_closed = f.read() 24 | f.close() 25 | ipt_closed = ipt_closed.split("\n") 26 | 27 | for i in range(0,len(ipt_closed)-1): 28 | ipt_closed[i] = ipt_closed[i].strip("[]").split(",") 29 | ipt_closed[i][0] = float(ipt_closed[i][0])/7 30 | ipt_closed[i][1] = float(ipt_closed[i][1])/500 31 | opt.append(0) 32 | 33 | ipt = ipt_open[:-1]+ipt_closed[:-1] 34 | 35 | # ipt = [[2/7,30/400],[6/7,400/400]] 36 | # opt = [0,1] 37 | ipt = np.asarray(ipt) 38 | opt = np.asarray(opt) 39 | 40 | print ":"+str(len(ipt)) 41 | print len(opt) 42 | print opt 43 | print ipt 44 | 45 | nn = Classifier( 46 | layers=[ 47 | Layer("Sigmoid", units=2), 48 | # Layer("Softmax",units=5), 49 | Layer("Softmax")], 50 | learning_rate=0.5, 51 | n_iter=5) 52 | 53 | # nn.set_parameters([([[-3.75906463, 1.26411728],[-5.44439202, 0.44432455]], [ 2.63582797, -0.23474542]), 54 | # ([[ 4.32310838, -5.46097277],[-1.114463 , 1.37638111]], [-2.13190273, 2.13190273])]) 55 | nn.fit(ipt,opt) 56 | 57 | a = np.asarray([[2/7,30/400],[4/7,30/400],[6/7,400/400],[4/7,400/400]]) 58 | # a = np.asarray([[2,30],[4,30],[6,400],[4,400]]) 59 | # a =a.reshape(2,-1) 60 | 61 | 62 | # params = nn.get_parameters() 63 | 64 | # print "::NEW::" 65 | 66 | # tnn = Classifier( 67 | # layers=[ 68 | # Layer("Sigmoid", units=2), 69 | # # Layer("Softmax",units=5), 70 | # Layer("Softmax")], 71 | # learning_rate=0.5, 72 | # n_iter=5) 73 | 74 | # tnn.set_parameters(params) 75 | 76 | op = nn.predict(a) 77 | pickle.dump(nn, open('nn.pkl','wb')) -------------------------------------------------------------------------------- /Codes/Anuj/countors.txt: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import math 6 | import scipy 7 | import numpy as np 8 | import cv2 9 | import pygame 10 | 11 | 12 | # colors for drawing different bodies 13 | SKELETON_COLORS = [pygame.color.THECOLORS["red"], 14 | pygame.color.THECOLORS["blue"], 15 | pygame.color.THECOLORS["green"], 16 | pygame.color.THECOLORS["orange"], 17 | pygame.color.THECOLORS["purple"], 18 | pygame.color.THECOLORS["yellow"], 19 | pygame.color.THECOLORS["violet"]] 20 | 21 | 22 | class HandGestureObjectClass(object): 23 | def __init__(self): 24 | 25 | # Kinect runtime object, we want only color and body frames 26 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 27 | 28 | 29 | # here we will store skeleton data 30 | self._bodies = None 31 | 32 | 33 | def subimage(self,image, centre, theta, width, height): 34 | output_image = cv.CreateImage((width, height), image.depth, image.nChannels) 35 | mapping = np.array([[np.cos(theta), -np.sin(theta), centre[0]],[np.sin(theta), np.cos(theta), centre[1]]]) 36 | map_matrix_cv = cv.fromarray(mapping) 37 | cv.GetQuadrangleSubPix(image, output_image, map_matrix_cv) 38 | return output_image 39 | 40 | 41 | def run(self): 42 | print_frame=None 43 | 44 | # -------- Main Program Loop ----------- 45 | while (True): 46 | 47 | # --- Main event loop 48 | 49 | if self._kinect.has_new_depth_frame() or self._kinect.has_new_body_frame(): 50 | #print ':IN_RUN:depth_frame received' 51 | 52 | depth_frame = self._kinect.get_last_depth_frame() 53 | print_frame = 32*depth_frame.reshape(424,512) 54 | 55 | 56 | self._bodies = self._kinect.get_last_body_frame() 57 | 58 | # --- draw skeletons to _frame_surface 59 | if self._bodies is not None: 60 | # print ':IN_RUN:body received' 61 | for i in range(0, self._kinect.max_body_count): 62 | body = self._bodies.bodies[i] 63 | if not body.is_tracked: 64 | continue 65 | 66 | joints = body.joints 67 | # convert joint coordinates to color space 68 | joint_points = self._kinect.body_joints_to_depth_space(joints) 69 | # print ':' 70 | rx=joint_points[PyKinectV2.JointType_HandRight].x 71 | ry=joint_points[PyKinectV2.JointType_HandRight].y 72 | lx=joint_points[PyKinectV2.JointType_HandLeft].x 73 | ly=joint_points[PyKinectV2.JointType_HandLeft].y 74 | rx=math.floor(rx) 75 | ry=math.floor(ry) 76 | lx=math.floor(lx) 77 | ly=math.floor(ly) 78 | print_frame=cv2.circle(print_frame,(int(rx),int(ry)), 10,(255,0,0),5) 79 | print_frame=cv2.circle(print_frame,(int(lx),int(ly)), 10,(255,0,0),5) 80 | figure=cv2.imread('last1.png') 81 | imgray1=cv2.cvtColor(figure,cv2.COLOR_BGR2GRAY) 82 | ret1,thresh1=cv2.threshold(imgray1,127,255,0) 83 | #print 'apnawala'+str(type(figure2)) 84 | im2, contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 85 | #print contours 86 | cnt=contours[1] 87 | rect = cv2.minAreaRect(cnt) 88 | #print rect 89 | angle=rect[2] 90 | width,height=rect[1] 91 | #print angle 92 | box = cv2.boxPoints(rect) 93 | box = np.int0(box) 94 | figure1=cv2.drawContours(figure,[box],0,(0,0,255),2) 95 | figure2=cv2.drawContours(figure,[cnt],0, (0,255,0), 1) 96 | cv2.imshow('figure1',figure1) 97 | cv2.imshow('figure2',figure2) 98 | #patch = self.subimage(figure,rect[0],angle,width,height) 99 | #cv.SaveImage('patch.jpg', patch) 100 | M = cv2.getRotationMatrix2D(rect[0],angle,1) 101 | dst = cv2.warpAffine(figure,M,(424,512)) 102 | cv2.imshow('figure3',dst) 103 | #if print_frame != None: 104 | 105 | # cv2.imshow('Depthimage',print_frame) 106 | 107 | if cv2.waitKey(1) & 0xFF == ord('q'): 108 | break 109 | 110 | 111 | # --- Limit to 60 frames per second 112 | 113 | 114 | # Close our Kinect sensor, close the window and quit. 115 | self._kinect.close() 116 | 117 | 118 | 119 | HandGestureObject = HandGestureObjectClass(); 120 | HandGestureObject.run(); 121 | -------------------------------------------------------------------------------- /Codes/Anuj/countors_defects.txt: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | 9 | class HandGestureObjectClass(object): 10 | def __init__(self): 11 | 12 | # Kinect runtime object, we want only color and body frames 13 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 14 | 15 | # here we will store skeleton data 16 | self._bodies = None 17 | 18 | 19 | def neighbourhood(self, array, radius, seed): 20 | 21 | 22 | neighbour = np.array(array) 23 | neighbour *= 0 24 | 25 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 26 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 27 | return temp 28 | 29 | def merge(self, array_big, array_small, seed ): 30 | [a,b] = np.shape(array_small) 31 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 32 | return array_big 33 | 34 | def max_hist_depth(self, frame): 35 | #print 'FRAME_MAX = ' + str(frame.max()) 36 | binaries = int(frame.max()) 37 | if binaries <= 0: 38 | return 0 39 | histogram, bins = np.histogram(frame, bins = binaries) 40 | histogram = histogram.tolist(); bins = bins.tolist(); 41 | histogram[0 : 1] = [0, 0] 42 | max_hist = bins[histogram.index( max(histogram) )] 43 | return max_hist 44 | 45 | def max_area_contour(self, contours): 46 | max_area=0 47 | for i in range(len(contours)): 48 | cnt=contours[i] 49 | area = cv2.contourArea(cnt) 50 | if(area>max_area): 51 | max_area=area 52 | ci=i 53 | return ci 54 | 55 | def run(self): 56 | print_frame=None 57 | 58 | # -------- Main Program Loop ----------- 59 | while (True): 60 | # --- Main event loop 61 | 62 | if self._kinect.has_new_depth_frame(): 63 | print 'frame acquired' 64 | depth_frame = self._kinect.get_last_depth_frame() 65 | 66 | depth_frame = np.array(depth_frame/16, dtype= np.uint8) 67 | depth_frame = depth_frame.reshape(424,512) 68 | cv2.imshow('lol',depth_frame) 69 | 70 | if self._kinect.has_new_body_frame(): 71 | self._bodies = self._kinect.get_last_body_frame() 72 | 73 | i = 0 74 | 75 | if self._bodies is not None: 76 | #first detected body taken 77 | if i > 6: 78 | i=0 79 | 80 | body = self._bodies.bodies[i] 81 | if not body.is_tracked: 82 | i = i + 1 83 | continue 84 | 85 | # while not body.is_tracked: 86 | # body = self._ 87 | 88 | joints = body.joints 89 | 90 | # convert joint coordinates to color space 91 | joint_points = self._kinect.body_joints_to_depth_space(joints) 92 | 93 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 94 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 95 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 96 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 97 | 98 | right_x = right_x if right_x < 424 else 423 99 | right_y = right_y if right_y < 512 else 511 100 | left_x = left_x if left_x < 424 else 423 101 | left_y = left_y if left_y < 512 else 511 102 | 103 | right_hand_depth = depth_frame[right_x,right_y] 104 | left_hand_depth = depth_frame[left_x,left_y] 105 | right_hand = [right_x,right_y] 106 | left_hand = [left_x,left_y] 107 | 108 | d = 50 109 | if depth_frame != None: 110 | right_hand_filtered = self.neighbourhood(depth_frame,d,right_hand) 111 | left_hand_filtered = self.neighbourhood(depth_frame,d,left_hand) 112 | neighbour = np.array(depth_frame) 113 | neighbour *= 0 114 | 115 | print_frame = np.zeros(np.shape(depth_frame)) 116 | 117 | if right_hand_filtered != None: 118 | img1,contours1, hierarchy1 = cv2.findContours(right_hand_filtered,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE) 119 | cnt=contours1[self.max_area_contour(contours1)] 120 | hull = cv2.convexHull(cnt,returnPoints = False) 121 | defects = cv2.convexityDefects(cnt,hull) 122 | drawing = np.zeros(right_hand_filtered.shape,np.uint8) 123 | drawing = cv2.cvtColor(drawing,cv2.COLOR_GRAY2RGB) 124 | for i in range(defects.shape[0]): 125 | s,e,f,d = defects[i,0] 126 | start = tuple(cnt[s][0]) 127 | end = tuple(cnt[e][0]) 128 | far = tuple(cnt[f][0]) 129 | cv2.line(drawing,start,end,[0,255,0],2) 130 | cv2.circle(drawing,far,5,[0,0,255],-1) 131 | drawing = cv2.drawContours(drawing,[cnt],-1,150,1) 132 | cv2.imshow('contours1',drawing) 133 | right_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, right_hand_filtered,right_hand),depth_frame) 134 | ret,right_hand_filtered_depth_frame = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 135 | print_frame += right_hand_filtered_depth_frame 136 | if left_hand_filtered != None: 137 | left_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, left_hand_filtered, left_hand),depth_frame) 138 | ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 139 | print_frame += left_hand_filtered_depth_frame 140 | cv2.imshow('Hand Filtered',print_frame) 141 | 142 | if print_frame != None: 143 | dpt = depth_frame 144 | 145 | 146 | 147 | 148 | if cv2.waitKey(1) & 0xFF == ord('q'): 149 | break 150 | 151 | 152 | cv2.imshow('OG',tp) 153 | 154 | 155 | # --- Limit to 60 frames per second 156 | 157 | 158 | # Close our Kinect sensor, close the window and quit. 159 | self._kinect.close() 160 | 161 | 162 | 163 | HandGestureObject = HandGestureObjectClass(); 164 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Anuj/feature_closed_hand: -------------------------------------------------------------------------------- 1 | [2, 121] 2 | [2, 173] 3 | [3, 173] 4 | [3, 163] 5 | [3, 134] 6 | [3, 147] 7 | [3, 83] 8 | [3, 97] 9 | [3, 80] 10 | [3, 41] 11 | [3, 49] 12 | [3, 42] 13 | [2, 56] 14 | [3, 55] 15 | [2, 53] 16 | [2, 93] 17 | [3, 42] 18 | [2, 108] 19 | [4, 101] 20 | [3, 123] 21 | [3, 97] 22 | [3, 110] 23 | [3, 83] 24 | [3, 80] 25 | [3, 79] 26 | [3, 96] 27 | [3, 87] 28 | [2, 66] 29 | [2, 57] 30 | [3, 65] 31 | [2, 68] 32 | [3, 73] 33 | [3, 58] 34 | [3, 75] 35 | [3, 65] 36 | [5, 66] 37 | [3, 60] 38 | [3, 62] 39 | [3, 56] 40 | [2, 25] 41 | [2, 35] 42 | [2, 40] 43 | [3, 38] 44 | [3, 51] 45 | [2, 42] 46 | [2, 87] 47 | [2, 105] 48 | [3, 136] 49 | [2, 172] 50 | [2, 97] 51 | [3, 38] 52 | [3, 47] 53 | [3, 40] 54 | [3, 38] 55 | [2, 128] 56 | [3, 113] 57 | [3, 122] 58 | [3, 90] 59 | [3, 91] 60 | [2, 102] 61 | [3, 65] 62 | [3, 69] 63 | [2, 51] 64 | [3, 53] 65 | [3, 60] 66 | [1, 44] 67 | [2, 55] 68 | [3, 45] 69 | [1, 30] 70 | [2, 42] 71 | [2, 40] 72 | [2, 43] 73 | [1, 43] 74 | [2, 42] 75 | [2, 35] 76 | [1, 34] 77 | [1, 56] 78 | [3, 61] 79 | [4, 65] 80 | [2, 46] 81 | [1, 49] 82 | [3, 45] 83 | [2, 57] 84 | [1, 50] 85 | [3, 50] 86 | [1, 46] 87 | [2, 34] 88 | [2, 39] 89 | [3, 38] 90 | [3, 56] 91 | [2, 35] 92 | [1, 25] 93 | [3, 37] 94 | [2, 34] 95 | [2, 38] 96 | [2, 52] 97 | [2, 34] 98 | [2, 31] 99 | [2, 35] 100 | [2, 38] 101 | [3, 37] 102 | [2, 30] 103 | [2, 41] 104 | [2, 43] 105 | [2, 64] 106 | [2, 50] 107 | [3, 58] 108 | [4, 44] 109 | [2, 64] 110 | [2, 74] 111 | [3, 33] 112 | [2, 37] 113 | [2, 23] 114 | [2, 27] 115 | [2, 40] 116 | [2, 31] 117 | [3, 38] 118 | [3, 43] 119 | [3, 31] 120 | [3, 45] 121 | [3, 46] 122 | [3, 39] 123 | [3, 45] 124 | [3, 56] 125 | [2, 43] 126 | [4, 48] 127 | [3, 56] 128 | [3, 57] 129 | [3, 56] 130 | [3, 64] 131 | [3, 52] 132 | [3, 53] 133 | [3, 53] 134 | [3, 49] 135 | [3, 31] 136 | [3, 16] 137 | [3, 35] 138 | [3, 161] 139 | [3, 158] 140 | [3, 170] 141 | [3, 159] 142 | [3, 79] 143 | [3, 79] 144 | [2, 81] 145 | [3, 181] 146 | [3, 172] 147 | [2, 162] 148 | [3, 141] 149 | [2, 49] 150 | [3, 50] 151 | [2, 29] 152 | [2, 34] 153 | [2, 22] 154 | [2, 22] 155 | [2, 23] 156 | [2, 17] 157 | [2, 40] 158 | [2, 31] 159 | [2, 63] 160 | [4, 30] 161 | [2, 33] 162 | [2, 64] 163 | [2, 45] 164 | [2, 42] 165 | [2, 50] 166 | [3, 14] 167 | [2, 28] 168 | [2, 56] 169 | [2, 80] 170 | [2, 75] 171 | [2, 53] 172 | [2, 39] 173 | [2, 35] 174 | [3, 26] 175 | [3, 31] 176 | [3, 33] 177 | [3, 30] 178 | [3, 42] 179 | [3, 38] 180 | [3, 48] 181 | [3, 59] 182 | [3, 44] 183 | [3, 47] 184 | [3, 44] 185 | [3, 49] 186 | [3, 43] 187 | [3, 38] 188 | [3, 38] 189 | [3, 41] 190 | [3, 29] 191 | [3, 33] 192 | [3, 28] 193 | [2, 33] 194 | [3, 23] 195 | [3, 40] 196 | [3, 21] 197 | [3, 34] 198 | [3, 26] 199 | [3, 29] 200 | [3, 28] 201 | [3, 39] 202 | [3, 27] 203 | [3, 31] 204 | [4, 42] 205 | [5, 62] 206 | [3, 43] 207 | [4, 58] 208 | [4, 69] 209 | [4, 58] 210 | [2, 68] 211 | [7, 60] 212 | [4, 61] 213 | [4, 48] 214 | [5, 61] 215 | [2, 51] 216 | [4, 46] 217 | [2, 58] 218 | [5, 40] 219 | [2, 36] 220 | [2, 52] 221 | [3, 73] 222 | [5, 39] 223 | [4, 56] 224 | [2, 53] 225 | [3, 66] 226 | [3, 52] 227 | [3, 54] 228 | [4, 60] 229 | [3, 37] 230 | [3, 39] 231 | [2, 49] 232 | [3, 42] 233 | [3, 37] 234 | [2, 35] 235 | [2, 40] 236 | [3, 32] 237 | [3, 30] 238 | [3, 28] 239 | [3, 24] 240 | [3, 25] 241 | [3, 27] 242 | [3, 44] 243 | [3, 39] 244 | [2, 35] 245 | [4, 84] 246 | [2, 32] 247 | [3, 43] 248 | [3, 39] 249 | [2, 52] 250 | [3, 51] 251 | [2, 47] 252 | [2, 49] 253 | [2, 62] 254 | [2, 183] 255 | [2, 69] 256 | [2, 193] 257 | [2, 178] 258 | [3, 456] 259 | [2, 180] 260 | [2, 219] 261 | [2, 208] 262 | [2, 219] 263 | [2, 282] 264 | [2, 242] 265 | [2, 218] 266 | [2, 238] 267 | [2, 241] 268 | [2, 237] 269 | [2, 247] 270 | [2, 340] 271 | [2, 318] 272 | [2, 361] 273 | [2, 344] 274 | [2, 265] 275 | [2, 342] 276 | [2, 420] 277 | [2, 433] 278 | [2, 279] 279 | [2, 328] 280 | [2, 361] 281 | [2, 387] 282 | [2, 394] 283 | [2, 459] 284 | [2, 438] 285 | [2, 429] 286 | [2, 378] 287 | [2, 284] 288 | [2, 333] 289 | [2, 305] 290 | [2, 290] 291 | [2, 244] 292 | [2, 180] 293 | [2, 122] 294 | [2, 111] 295 | [2, 100] 296 | [2, 85] 297 | [2, 81] 298 | [2, 61] 299 | [2, 25] 300 | [2, 21] 301 | [3, 30] 302 | [2, 44] 303 | [2, 49] 304 | [2, 57] 305 | [2, 60] 306 | [3, 52] 307 | [2, 39] 308 | [2, 47] 309 | [2, 55] 310 | [2, 28] 311 | [2, 22] 312 | [2, 19] 313 | [3, 67] 314 | [2, 46] 315 | [2, 54] 316 | [2, 26] 317 | [2, 57] 318 | [2, 58] 319 | -------------------------------------------------------------------------------- /Codes/Anuj/feature_open_hand: -------------------------------------------------------------------------------- 1 | [7, 246] 2 | [4, 113] 3 | [6, 244] 4 | [6, 248] 5 | [6, 245] 6 | [5, 234] 7 | [5, 253] 8 | [7, 253] 9 | [6, 251] 10 | [5, 255] 11 | [6, 272] 12 | [6, 271] 13 | [6, 277] 14 | [7, 269] 15 | [7, 276] 16 | [5, 274] 17 | [6, 290] 18 | [5, 277] 19 | [7, 290] 20 | [6, 288] 21 | [5, 285] 22 | [4, 292] 23 | [4, 304] 24 | [5, 296] 25 | [5, 307] 26 | [5, 324] 27 | [7, 336] 28 | [6, 344] 29 | [6, 349] 30 | [4, 375] 31 | [5, 376] 32 | [6, 402] 33 | [6, 406] 34 | [5, 412] 35 | [5, 407] 36 | [5, 418] 37 | [5, 432] 38 | [6, 393] 39 | [5, 372] 40 | [5, 375] 41 | [6, 371] 42 | [6, 375] 43 | [5, 359] 44 | [5, 393] 45 | [6, 424] 46 | [6, 405] 47 | [5, 432] 48 | [5, 431] 49 | [6, 439] 50 | [5, 444] 51 | [6, 449] 52 | [5, 430] 53 | [5, 409] 54 | [7, 414] 55 | [6, 421] 56 | [5, 439] 57 | [6, 394] 58 | [6, 402] 59 | [6, 394] 60 | [5, 374] 61 | [5, 375] 62 | [5, 349] 63 | [5, 354] 64 | [5, 346] 65 | [5, 373] 66 | [5, 367] 67 | [5, 395] 68 | [5, 391] 69 | [5, 397] 70 | [5, 415] 71 | [5, 469] 72 | [5, 439] 73 | [5, 426] 74 | [5, 445] 75 | [5, 416] 76 | [5, 419] 77 | [4, 398] 78 | [4, 417] 79 | [5, 410] 80 | [6, 411] 81 | [6, 424] 82 | [6, 421] 83 | [6, 423] 84 | [5, 412] 85 | [6, 422] 86 | [5, 403] 87 | [5, 393] 88 | [7, 384] 89 | [6, 373] 90 | [5, 368] 91 | [6, 359] 92 | [7, 351] 93 | [6, 355] 94 | [6, 334] 95 | [4, 325] 96 | [5, 346] 97 | [6, 325] 98 | [6, 310] 99 | [5, 316] 100 | [4, 307] 101 | [6, 301] 102 | [5, 295] 103 | [5, 297] 104 | [6, 262] 105 | [6, 280] 106 | [6, 261] 107 | [6, 238] 108 | [6, 259] 109 | [6, 257] 110 | [6, 239] 111 | [5, 391] 112 | [6, 365] 113 | [6, 374] 114 | [4, 369] 115 | [6, 379] 116 | [5, 391] 117 | [5, 373] 118 | [5, 428] 119 | [4, 447] 120 | [4, 398] 121 | [3, 412] 122 | [4, 451] 123 | [5, 419] 124 | [4, 438] 125 | [5, 341] 126 | [4, 319] 127 | [4, 319] 128 | [4, 337] 129 | [4, 329] 130 | [4, 319] 131 | [4, 350] 132 | [4, 364] 133 | [4, 328] 134 | [5, 305] 135 | [4, 346] 136 | [4, 356] 137 | [4, 335] 138 | [5, 343] 139 | [5, 395] 140 | [5, 386] 141 | [6, 392] 142 | [5, 404] 143 | [6, 395] 144 | [6, 431] 145 | [6, 420] 146 | [6, 429] 147 | [6, 420] 148 | [6, 456] 149 | [6, 404] 150 | [5, 433] 151 | [6, 455] 152 | [6, 426] 153 | [6, 280] 154 | [6, 396] 155 | [6, 267] 156 | [6, 271] 157 | [6, 312] 158 | [6, 350] 159 | [6, 318] 160 | [5, 311] 161 | [5, 275] 162 | [5, 298] 163 | [5, 300] 164 | [5, 305] 165 | [5, 312] 166 | [5, 310] 167 | [5, 338] 168 | [5, 343] 169 | [5, 347] 170 | [4, 334] 171 | [5, 373] 172 | [5, 368] 173 | [4, 365] 174 | [4, 292] 175 | [4, 317] 176 | [4, 336] 177 | [5, 401] 178 | [5, 421] 179 | [5, 402] 180 | [4, 425] 181 | [4, 409] 182 | [5, 427] 183 | [5, 437] 184 | [4, 437] 185 | [5, 436] 186 | [5, 445] 187 | [5, 473] 188 | [5, 470] 189 | [5, 508] 190 | [5, 530] 191 | [5, 543] 192 | [5, 563] 193 | [5, 596] 194 | [5, 600] 195 | [5, 645] 196 | [4, 669] 197 | [5, 662] 198 | [5, 711] 199 | [5, 700] 200 | [5, 697] 201 | [4, 735] 202 | [5, 749] 203 | [5, 784] 204 | [5, 829] 205 | [5, 763] 206 | [6, 795] 207 | [5, 756] 208 | [5, 754] 209 | [5, 870] 210 | [5, 827] 211 | [5, 799] 212 | [5, 822] 213 | [4, 773] 214 | [4, 811] 215 | [6, 783] 216 | [5, 792] 217 | [5, 749] 218 | [5, 787] 219 | [6, 798] 220 | [6, 804] 221 | [4, 629] 222 | [4, 629] 223 | [5, 658] 224 | [5, 679] 225 | [5, 705] 226 | [5, 680] 227 | [5, 666] 228 | [5, 678] 229 | [5, 697] 230 | [5, 670] 231 | [5, 694] 232 | [5, 724] 233 | [5, 644] 234 | [5, 637] 235 | [5, 595] 236 | [5, 602] 237 | [5, 618] 238 | [5, 591] 239 | [5, 613] 240 | [5, 636] 241 | [5, 639] 242 | [5, 605] 243 | [5, 692] 244 | [5, 678] 245 | [5, 716] 246 | [5, 718] 247 | [5, 753] 248 | [5, 727] 249 | [5, 754] 250 | [6, 751] 251 | [5, 769] 252 | [6, 761] 253 | [5, 731] 254 | [6, 793] 255 | [6, 773] 256 | [4, 707] 257 | [6, 809] 258 | [5, 758] 259 | [5, 783] 260 | [5, 731] 261 | [5, 680] 262 | [5, 707] 263 | [5, 675] 264 | [5, 688] 265 | [5, 717] 266 | [5, 704] 267 | [5, 666] 268 | [5, 671] 269 | [5, 669] 270 | [5, 665] 271 | [5, 634] 272 | [5, 634] 273 | [5, 676] 274 | [6, 668] 275 | [5, 662] 276 | [5, 655] 277 | [5, 659] 278 | [6, 664] 279 | [4, 645] 280 | [5, 650] 281 | [5, 666] 282 | [5, 663] 283 | [5, 675] 284 | [5, 624] 285 | [5, 636] 286 | [5, 628] 287 | [5, 653] 288 | [5, 650] 289 | [5, 655] 290 | [5, 642] 291 | [5, 670] 292 | [5, 642] 293 | [5, 637] 294 | [5, 616] 295 | [5, 569] 296 | [5, 606] 297 | [5, 609] 298 | [5, 593] 299 | [5, 598] 300 | [5, 554] 301 | [5, 625] 302 | [5, 639] 303 | [5, 639] 304 | [5, 646] 305 | [5, 684] 306 | [5, 666] 307 | [5, 664] 308 | [5, 706] 309 | [5, 702] 310 | [5, 722] 311 | [6, 663] 312 | [5, 679] 313 | [5, 619] 314 | [5, 687] 315 | [4, 672] 316 | [5, 675] 317 | [5, 668] 318 | [5, 629] 319 | [5, 636] 320 | [5, 645] 321 | [5, 637] 322 | [5, 644] 323 | [4, 633] 324 | [5, 612] 325 | [5, 601] 326 | [5, 636] 327 | [6, 640] 328 | [5, 653] 329 | [5, 675] 330 | [5, 668] 331 | [6, 633] 332 | [5, 660] 333 | [5, 703] 334 | [5, 694] 335 | [6, 727] 336 | [5, 746] 337 | [6, 691] 338 | [6, 684] 339 | [6, 643] 340 | [6, 637] 341 | [6, 599] 342 | [5, 574] 343 | [6, 593] 344 | [5, 535] 345 | [5, 515] 346 | [6, 567] 347 | [5, 546] 348 | [5, 551] 349 | [6, 562] 350 | [6, 578] 351 | [5, 575] 352 | [5, 555] 353 | [6, 570] 354 | [6, 606] 355 | [6, 644] 356 | [6, 662] 357 | [6, 626] 358 | [5, 641] 359 | [6, 655] 360 | [5, 656] 361 | [6, 694] 362 | [6, 688] 363 | [5, 653] 364 | [5, 653] 365 | [6, 651] 366 | [5, 656] 367 | [5, 641] 368 | [5, 633] 369 | [5, 623] 370 | [5, 679] 371 | [5, 587] 372 | [5, 583] 373 | [5, 529] 374 | [5, 528] 375 | [4, 515] 376 | [4, 508] 377 | [5, 559] 378 | [5, 540] 379 | [5, 696] 380 | [5, 707] 381 | [5, 751] 382 | [5, 776] 383 | [5, 767] 384 | [6, 749] 385 | [6, 773] 386 | [7, 783] 387 | [6, 705] 388 | [5, 743] 389 | [5, 735] 390 | [7, 727] 391 | [5, 744] 392 | [5, 746] 393 | [5, 747] 394 | [5, 725] 395 | [5, 728] 396 | [5, 723] 397 | [6, 736] 398 | [5, 705] 399 | [5, 723] 400 | [5, 714] 401 | [5, 732] 402 | [5, 740] 403 | [7, 723] 404 | [6, 719] 405 | [5, 689] 406 | [5, 709] 407 | [6, 700] 408 | [5, 690] 409 | [5, 737] 410 | [5, 718] 411 | [7, 753] 412 | -------------------------------------------------------------------------------- /Codes/Anuj/test.py: -------------------------------------------------------------------------------- 1 | # Import datasets, classifiers and performance metrics 2 | from sknn.mlp import Classifier, Layer 3 | import numpy as np 4 | # The digits dataset 5 | opt = [] 6 | 7 | f = open("feature_open_hand", "r+") 8 | ipt_open = f.read() 9 | f.close() 10 | ipt_open = ipt_open.split("\n") 11 | for i in range(0,len(ipt_open)-1): 12 | ipt_open[i] = ipt_open[i].strip("[]").split(",") 13 | # print ipt_open 14 | ipt_open[i][0] = int(ipt_open[i][0]) 15 | ipt_open[i][1] = int(ipt_open[i][1]) 16 | opt.append(1) 17 | 18 | f = open("feature_closed_hand", "r+") 19 | ipt_closed = f.read() 20 | f.close() 21 | ipt_closed = ipt_closed.split("\n") 22 | for i in range(0,len(ipt_closed)-1): 23 | ipt_closed[i] = ipt_closed[i].strip("[]").split(",") 24 | ipt_closed[i][0] = int(ipt_closed[i][0]) 25 | ipt_closed[i][1] = int(ipt_closed[i][1]) 26 | opt.append(0) 27 | 28 | ipt = ipt_open[:-1]+ipt_closed[:-1] 29 | ipt = np.asarray(ipt) 30 | opt = np.asarray(opt) 31 | print ":"+str(len(ipt)) 32 | print len(opt) 33 | nn = Classifier( 34 | layers=[ 35 | Layer("Softmax", units=5), 36 | Layer("Softmax",units=2), 37 | Layer("Softmax")], 38 | learning_rate=0.05, 39 | n_iter=10) 40 | nn.fit(ipt,opt) 41 | 42 | a = np.asarray([[6,300]]) 43 | # a =a.reshape(2,-1) 44 | op = nn.predict(a) 45 | 46 | print op -------------------------------------------------------------------------------- /Codes/Anuj/test_abhi.py: -------------------------------------------------------------------------------- 1 | # Import datasets, classifiers and performance metrics 2 | from sknn.mlp import Classifier, Layer 3 | import numpy as np 4 | import logging 5 | logging.basicConfig() 6 | 7 | # The digits dataset 8 | opt = [] 9 | f = open("feature_open_hand", "r+") 10 | ipt_open = f.read() 11 | f.close() 12 | ipt_open = ipt_open.split("\n") 13 | 14 | for i in range(0,len(ipt_open)-1): 15 | ipt_open[i] = ipt_open[i].strip("[]").split(",") 16 | # print ipt_open 17 | ipt_open[i][0] = int(ipt_open[i][0]) 18 | ipt_open[i][1] = int(ipt_open[i][1]) 19 | opt.append(1) 20 | 21 | f = open("feature_closed_hand", "r+") 22 | ipt_closed = f.read() 23 | f.close() 24 | ipt_closed = ipt_closed.split("\n") 25 | 26 | for i in range(0,len(ipt_closed)-1): 27 | ipt_closed[i] = ipt_closed[i].strip("[]").split(",") 28 | ipt_closed[i][0] = int(ipt_closed[i][0]) 29 | ipt_closed[i][1] = int(ipt_closed[i][1]) 30 | opt.append(0) 31 | 32 | ipt = ipt_open[:-1]+ipt_closed[:-1] 33 | ipt = np.asarray(ipt) 34 | opt = np.asarray(opt) 35 | print ":"+str(len(ipt)) 36 | print len(opt) 37 | 38 | nn = Classifier( 39 | layers=[ 40 | Layer("Softmax", units=2), 41 | Layer("Softmax",units=2), 42 | Layer("Softmax", units = 2)], 43 | learning_rate=0.05, 44 | n_iter=15) 45 | 46 | nn.fit(ipt,opt) 47 | 48 | a = np.asarray([[4,30],[2,30],[6,300],[4,300]]) 49 | # a =a.reshape(2,-1) 50 | op = nn.predict(a) 51 | 52 | print op -------------------------------------------------------------------------------- /Codes/Anuj/train_ip.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 1 3 | 9 4 | 10 -------------------------------------------------------------------------------- /Codes/Anuj/train_op.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 0 3 | 1 4 | 1 -------------------------------------------------------------------------------- /Codes/Ash/feature: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Ash/feature -------------------------------------------------------------------------------- /Codes/Ash/old/Hand_segmentation_ash.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | 9 | class HandGestureObjectClass(object): 10 | def __init__(self): 11 | 12 | # Kinect runtime object, we want only color and body frames 13 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 14 | 15 | # here we will store skeleton data 16 | self._bodies = None 17 | 18 | 19 | def neighbourhood(self, array, radius, seed): 20 | 21 | 22 | neighbour = np.array(array) 23 | neighbour *= 0 24 | 25 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 26 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 27 | return temp 28 | 29 | def merge(self, array_big, array_small, seed ): 30 | [a,b] = np.shape(array_small) 31 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 32 | return array_big 33 | 34 | def max_hist_depth(self, frame): 35 | #print 'FRAME_MAX = ' + str(frame.max()) 36 | binaries = int(frame.max()) 37 | if binaries <= 0: 38 | return 0 39 | histogram, bins = np.histogram(frame, bins = binaries) 40 | histogram = histogram.tolist(); bins = bins.tolist(); 41 | histogram[0 : 1] = [0, 0] 42 | max_hist = bins[histogram.index( max(histogram) )] 43 | return max_hist 44 | 45 | def max_area_contour(self, contours): 46 | max_area = 0 47 | for i in range(len(contours)): 48 | cnt=contours[i] 49 | area = cv2.contourArea(cnt) 50 | if(area>max_area): 51 | max_area=area 52 | ci=i 53 | return contours[ci] 54 | 55 | def run(self): 56 | print_frame=None 57 | 58 | # -------- Main Program Loop ----------- 59 | while (True): 60 | # --- Main event loop 61 | 62 | if self._kinect.has_new_depth_frame() or self._kinect.has_new_body_frame(): 63 | 64 | depth_frame = self._kinect.get_last_depth_frame() 65 | 66 | depth_frame = np.array(depth_frame/16, dtype= np.uint8) 67 | depth_frame = depth_frame.reshape(424,512) 68 | 69 | self._bodies = self._kinect.get_last_body_frame() 70 | 71 | if self._bodies is not None: 72 | #first detected body taken 73 | body = self._bodies.bodies[0] 74 | if not body.is_tracked: 75 | continue 76 | 77 | joints = body.joints 78 | 79 | # convert joint coordinates to color space 80 | joint_points = self._kinect.body_joints_to_depth_space(joints) 81 | 82 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 83 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 84 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 85 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 86 | 87 | right_x = right_x if right_x < 424 else 423 88 | right_y = right_y if right_y < 512 else 511 89 | left_x = left_x if left_x < 424 else 423 90 | left_y = left_y if left_y < 512 else 511 91 | 92 | right_hand_depth = depth_frame[right_x,right_y] 93 | left_hand_depth = depth_frame[left_x,left_y] 94 | print 'ld:' + str(left_hand_depth)+'\trd:' + str(right_hand_depth) 95 | 96 | right_hand = [right_x,right_y] 97 | left_hand = [left_x,left_y] 98 | 99 | #print type(c) 100 | 101 | d = 50 102 | if depth_frame != None: 103 | right_hand_filtered = self.neighbourhood(depth_frame,d,right_hand) 104 | left_hand_filtered = self.neighbourhood(depth_frame,d,left_hand) 105 | 106 | neighbour = np.array(depth_frame) 107 | neighbour *= 0 108 | 109 | print_frame = np.zeros(np.shape(depth_frame)) 110 | 111 | 112 | 113 | if right_hand_filtered != None: 114 | 115 | img1,contours1, hierarchy1 = cv2.findContours(right_hand_filtered,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE) 116 | cnt = self.max_area_contour(contours1) 117 | hull = cv2.convexHull(cnt) 118 | drawing = np.zeros(right_hand_filtered.shape,np.uint8) 119 | drawing = cv2.drawContours(drawing,[cnt],0,150,1) 120 | drawing = cv2.drawContours(drawing,[hull],0,200,1) 121 | cv2.imshow('contours1',drawing) 122 | 123 | right_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, right_hand_filtered,right_hand),depth_frame) 124 | 125 | ret,right_hand_filtered_depth_frame = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 126 | print_frame += right_hand_filtered_depth_frame 127 | 128 | if left_hand_filtered != None: 129 | 130 | 131 | left_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, left_hand_filtered, left_hand),depth_frame) 132 | ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 133 | print_frame += left_hand_filtered_depth_frame 134 | 135 | if print_frame != None: 136 | dpt = depth_frame 137 | cv2.imshow('Hand Filtered',print_frame) 138 | 139 | 140 | 141 | if cv2.waitKey(1) & 0xFF == ord('q'): 142 | break 143 | 144 | 145 | # --- Limit to 60 frames per second 146 | 147 | 148 | # Close our Kinect sensor, close the window and quit. 149 | self._kinect.close() 150 | 151 | 152 | 153 | HandGestureObject = HandGestureObjectClass(); 154 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Ash/old/kinect_body_ash.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | 9 | class HandGestureObjectClass(object): 10 | def __init__(self): 11 | 12 | # Kinect runtime object, we want only color and body frames 13 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 14 | 15 | # here we will store skeleton data 16 | self._bodies = None 17 | 18 | def neighbourhood_old(self, array, radius, seed, depth): 19 | # temp = np.nditer(array, flags = ['multi_index'], op_flags = ['readwrite']) 20 | #cv2.imshow('neigh', array) 21 | # print 'in neighbour' 22 | temp = 0 23 | [a,b] = np.shape(array) 24 | neighbour = np.array(array) 25 | neighbour *= 0 26 | for i in range(seed[0]-radius, seed[0]+radius): 27 | for j in range(seed[1]-radius, seed[1]+radius): 28 | temp+=array[j,i] 29 | if array[j,i] < depth+3: 30 | 31 | neighbour[j,i] = array[j,i] 32 | else: 33 | neighbour[j,i] = 0 34 | 35 | # cv2.imshow('neigh', array) 36 | return neighbour,temp/(2*radius+1)^2 37 | 38 | def neighbourhood(self, array, radius, seed, depth): 39 | [a,b] = np.shape(array) 40 | neighbour = np.array(array) 41 | neighbour *= 0 42 | # for i in range(seed[0]-radius, seed[0]+radius): 43 | # for j in range(seed[1]-radius, seed[1]+radius): 44 | # neighbour[j,i] = array[j,i] 45 | 46 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 47 | 48 | # temp = temp.reshape(2*radius,2*radius) 49 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 50 | 51 | neighbour[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius] = temp 52 | return neighbour 53 | 54 | def max_hist_depth(self, frame): 55 | #print 'FRAME_MAX = ' + str(frame.max()) 56 | binaries = int(frame.max()) 57 | if binaries <= 0: 58 | return 0 59 | histogram, bins = np.histogram(frame, bins = binaries) 60 | histogram = histogram.tolist(); bins = bins.tolist(); 61 | histogram[0 : 1] = [0, 0] 62 | max_hist = bins[histogram.index( max(histogram) )] 63 | return max_hist 64 | 65 | def run(self): 66 | print_frame=None 67 | 68 | # -------- Main Program Loop ----------- 69 | while (True): 70 | # --- Main event loop 71 | 72 | if self._kinect.has_new_depth_frame() or self._kinect.has_new_body_frame(): 73 | 74 | depth_frame = self._kinect.get_last_depth_frame() 75 | depth_frame = np.array(depth_frame/16, dtype= np.uint8) 76 | print depth_frame.max() 77 | print '_' 78 | depth_frame = depth_frame.reshape(424,512) 79 | self._bodies = self._kinect.get_last_body_frame() 80 | 81 | # --- draw skeletons to _frame_surface 82 | if self._bodies is not None: 83 | #for i in range(0, self._kinect.max_body_count): 84 | body = self._bodies.bodies[0] 85 | if not body.is_tracked: 86 | continue 87 | 88 | joints = body.joints 89 | # convert joint coordinates to color space 90 | joint_points = self._kinect.body_joints_to_depth_space(joints) 91 | 92 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 93 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 94 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 95 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 96 | #print right_x 97 | right_x = right_x if right_x < 424 else 423 98 | right_y = right_y if right_y < 512 else 511 99 | left_x = left_x if left_x < 424 else 423 100 | left_y = left_y if left_y < 512 else 511 101 | 102 | right_hand_depth = depth_frame[right_x,right_y] 103 | left_hand_depth = depth_frame[left_x,left_y] 104 | print 'ld:' + str(left_hand_depth)+'\trd:' + str(right_hand_depth) 105 | 106 | 107 | # hand_filtered_depth_frame = np.where(depth_frame< (left_hand_depth + 20) ,0 , depth_frame) 108 | # hand_filtered_depth_frame = np.where(depth_frame> (left_hand_depth - 20) ,0 , depth_frame) 109 | # hand_filtered_depth_frame = np.where(hand_filtered_depth_frame>100, 65535, 0) 110 | 111 | 112 | # print_frame=4*depth_frame 113 | # print_frame=cv2.circle(print_frame,(right_x,right_y), 10,(255,0,0),5) 114 | # print_frame=cv2.circle(print_frame,(left_x,left_y), 10,(255,0,0),5) 115 | 116 | right_hand = [right_x,right_y] 117 | left_hand = [left_x,left_y] 118 | 119 | #print type(c) 120 | 121 | d = 50 122 | if depth_frame != None: 123 | right_hand_filtered_depth_frame = self.neighbourhood(depth_frame,d,right_hand,right_hand_depth) 124 | left_hand_filtered_depth_frame = self.neighbourhood(depth_frame,d,left_hand,left_hand_depth) 125 | tp = depth_frame 126 | #img_grey = cv2.cvtColor(hand_filtered_depth_frame, cv2.COLOR_BGR2GRAY) 127 | # right_hand_filtered_depth_frame = np.array(right_hand_filtered_depth_frame/16, dtype = np.uint8) 128 | # left_hand_filtered_depth_frame = np.array(left_hand_filtered_depth_frame/16, dtype = np.uint8) 129 | 130 | # blur1 = cv2.GaussianBlur(right_hand_filtered_depth_frame,(5,5),0) 131 | # ret1,thresh1 = cv2.threshold(blur1,right_hand_depth-10,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 132 | # # thresh1 = cv2.adaptiveThreshold(blur1,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,11,2) 133 | 134 | # # kernel = np.ones((3,3),np.uint8) 135 | # # opening1 = cv2.morphologyEx(thresh1,cv2.MORPH_OPEN,kernel, iterations = 2) 136 | 137 | # # dist_transform1 = cv2.distanceTransform(opening1,cv2.DIST_L2,5) 138 | # # ret1, sure_fg1 = cv2.threshold(dist_transform1,0.3*dist_transform1.max(),255,0) 139 | 140 | # blur2 = cv2.GaussianBlur(left_hand_filtered_depth_frame,(5,5),0) 141 | # ret2,thresh2 = cv2.threshold(blur2,avg2,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 142 | 143 | 144 | # ret1,thresh1 = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY_INV) 145 | # ret2,thresh2 = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY_INV) 146 | 147 | # thresh = cv2.adaptiveThreshold(img_grey, 0, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2) 148 | # print_frame = thresh1+thresh2 149 | 150 | contours1, hierarchy1 = cv2.findContours(right_hand_filtered_depth_frame,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 151 | 152 | 153 | print_frame = right_hand_filtered_depth_frame+left_hand_filtered_depth_frame 154 | 155 | # it = np.nditer(print_frame, flags=['multi_index'],op_flags=['readwrite']) 156 | # while not it.finished: 157 | # p=it.multi_index 158 | 159 | # if (p[0]>c[0]+d or p[0]c[1]+d or p[1]" % (it[0], it.multi_index), 163 | # it.iternext() 164 | 165 | # hand_filtered_depth_frame = np.where(depth_frame < (left_hand_depth + 2), depth_frame, 0) 166 | # hand_filtered_depth_frame = np.where(depth_frame > (left_hand_depth - 2), depth_frame, 0) 167 | # # hand_filtered_depth_frame = np.where(hand_filtered_depth_frame > 0, 65535, 0) 168 | # print_frame=cv2.circle(print_frame,(right_x,right_y), 10,(255,0,0),5) 169 | # print_frame=cv2.circle(print_frame,(left_x,left_y), 10,(255,0,0),5) 170 | 171 | 172 | if print_frame != None: 173 | dpt = depth_frame 174 | cv2.imshow('Hand Filtered',print_frame) 175 | 176 | 177 | 178 | if cv2.waitKey(1) & 0xFF == ord('q'): 179 | break 180 | 181 | 182 | cv2.imshow('OG',tp) 183 | 184 | 185 | # --- Limit to 60 frames per second 186 | 187 | 188 | # Close our Kinect sensor, close the window and quit. 189 | self._kinect.close() 190 | 191 | 192 | 193 | HandGestureObject = HandGestureObjectClass(); 194 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Ash/old/main_ash.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | 5 | from matplotlib import pyplot as plt 6 | 7 | import scipy 8 | import numpy as np 9 | import cv2 10 | 11 | class HandGestureObjectClass(object): 12 | 13 | 14 | def __init__(self): 15 | 16 | # Kinect runtime object, we want only depth and body depth_frames 17 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth|PyKinectV2.FrameSourceTypes_Color) 18 | 19 | def max_hist_depth(self, frame): 20 | #print 'FRAME_MAX = ' + str(frame.max()) 21 | binaries = int(frame.max()) 22 | if binaries <= 0: 23 | return 0 24 | histogram, bins = np.histogram(frame, bins = binaries) 25 | histogram = histogram.tolist(); bins = bins.tolist(); 26 | histogram[0 : 1] = [0, 0] 27 | max_hist = bins[histogram.index( max(histogram) )] 28 | return max_hist 29 | 30 | def map_depth_to_color(self,depth_x,depth_y): 31 | color_x = depth_x * 1920 / 512 32 | color_y = depth_y * 1080 / 424 33 | 34 | 35 | return 36 | def run(self): 37 | print ':IN_RUN:Pulling Frames' 38 | previous_depth_frame = None 39 | cx=0 40 | cy=0 41 | 42 | 43 | while(True): 44 | #Main event loop 45 | if self._kinect.has_new_depth_frame() or self._kinect.has_new_color_frame(): 46 | 47 | depth_frame = self._kinect.get_last_depth_frame() 48 | 49 | depth_frame = depth_frame.reshape(424,512) 50 | #depth_frame = depth_frame.resize((424*2,512*2)) 51 | 52 | color_frame = self._kinect.get_last_color_frame() 53 | #print 'color' 54 | color_frame = color_frame.reshape(1080,1920,4) 55 | #color_frame = np.resize(color_frame,(1080,1920,4)) 56 | #color_frame = color_frame.resize((1080/2,1920/2,4)) 57 | 58 | if previous_depth_frame != None and not np.array_equal(depth_frame,previous_depth_frame): 59 | 60 | # Foreground Detection 61 | depth_frame_foregnd = cv2.subtract(depth_frame,previous_depth_frame) 62 | depth_frame_denoised = np.where(depth_frame_foregnd>=100,depth_frame_foregnd,0) 63 | depth_frame_denoised = cv2.medianBlur(depth_frame_denoised,5) 64 | 65 | # Denoising by erosion 66 | kernel = np.ones((5,5),np.uint8) 67 | depth_frame_denoised = cv2.erode(depth_frame_denoised,kernel,iterations=1) 68 | depth_frame_denoised = cv2.dilate(depth_frame_denoised,kernel,iterations=1) 69 | 70 | # Depth depth_frame XOR Denoised depth_frame 71 | depth_frame_xored = np.where(depth_frame_denoised != 0, previous_depth_frame, 0) 72 | 73 | # Depth of the closest object 74 | hand_depth = self.max_hist_depth(depth_frame_xored) 75 | # print "Hand Depth: " + str(hand_depth) 76 | hand_filtered_depth_frame = np.where(depth_frame> (hand_depth + 20),0 , depth_frame) 77 | hand_filtered_depth_frame = np.where(hand_filtered_depth_frame < (hand_depth - 20), 0 , hand_filtered_depth_frame) 78 | 79 | 80 | im = np.array(hand_filtered_depth_frame * 255, dtype = np.uint8) 81 | 82 | ret,thresh = cv2.threshold(im,100,255,cv2.THRESH_BINARY) 83 | image,contours,hierarchy = cv2.findContours(thresh, 1, 2) 84 | #print type(contours) 85 | if contours: 86 | 87 | cnt = contours[0] 88 | M = cv2.moments(cnt) 89 | # print M 90 | if M['m00'] != 0: 91 | # print ':' 92 | 93 | cx = int(M['m10']/M['m00']) 94 | # print cx 95 | cy = int(M['m01']/M['m00']) 96 | # print cy 97 | 98 | 99 | thresh = cv2.circle(thresh,(cx,cy), 10,(255,0,0),1) 100 | 101 | #Printing depth_frame 102 | hand_filtered_depth_frame=depth_frame 103 | hand_filtered_depth_frame *= 32 104 | cv2.imshow('Kinect',hand_filtered_depth_frame) 105 | cv2.imshow('COLOR',color_frame) 106 | 107 | previous_depth_frame = depth_frame 108 | 109 | if cv2.waitKey(1) & 0xFF == ord('q'): 110 | break 111 | 112 | # Close our Kinect sensor, close the window and quit. 113 | self._kinect.close() 114 | 115 | 116 | HandGestureObject = HandGestureObjectClass(); 117 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Ash/old/main_ash.txt: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | 5 | from matplotlib import pyplot as plt 6 | 7 | import scipy 8 | import numpy as np 9 | import cv2 10 | 11 | class HandGestureObjectClass(object): 12 | 13 | def __init__(self): 14 | 15 | # Kinect runtime object, we want only depth and body frames 16 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth) 17 | 18 | def max_hist_depth(self, frame): 19 | #print 'FRAME_MAX = ' + str(frame.max()) 20 | binaries = int(frame.max()) 21 | if binaries <= 0: 22 | return 0 23 | histogram, bins = numpy.histogram(frame, bins = binaries) 24 | histogram = histogram.tolist(); bins = bins.tolist(); 25 | histogram[0 : 1] = [0, 0] 26 | max_hist = bins[ histogram.index( max(histogram) ) ] 27 | return max_hist 28 | 29 | def run(self): 30 | previous_frame = None 31 | 32 | while(True): 33 | #Main event loop 34 | if self._kinect.has_new_depth_frame(): 35 | frame = self._kinect.get_last_depth_frame() 36 | frame = frame.reshape(424,512) 37 | if previous_frame != None: 38 | frame_foregnd = cv2.subtract(frame,previous_frame) 39 | frame_denoised = np.where(frame_foregnd>=200,frame_foregnd,0) 40 | kernel = np.ones((5,5),np.uint8) 41 | frame_denoised = cv2.erode(frame_denoised,kernel,iterations=1) 42 | #frame_denoised = cv2.dilate(frame_denoised,kernel,iterations=1) 43 | frame_xored = np.where(frame_denoised != 0, frame, 0) 44 | print_frame = frame_xored << 8 45 | cv2.imshow('Kinect',print_frame) 46 | previous_frame=frame 47 | if cv2.waitKey(1) & 0xFF == ord('q'): 48 | break 49 | # Close our Kinect sensor, close the window and quit. 50 | self._kinect.close() 51 | 52 | HandGestureObject = HandGestureObjectClass(); 53 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Ash/old/research.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | 9 | class HandGestureObjectClass(object): 10 | def __init__(self): 11 | 12 | # Kinect runtime object, we want only color and body frames 13 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 14 | 15 | # here we will store skeleton data 16 | self._bodies = None 17 | 18 | 19 | def neighbourhood(self, array, radius, seed): 20 | 21 | 22 | neighbour = np.array(array) 23 | neighbour *= 0 24 | 25 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 26 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 27 | return temp 28 | 29 | def merge(self, array_big, array_small, seed ): 30 | [a,b] = np.shape(array_small) 31 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 32 | return array_big 33 | 34 | def max_hist_depth(self, frame): 35 | #print 'FRAME_MAX = ' + str(frame.max()) 36 | binaries = int(frame.max()) 37 | if binaries <= 0: 38 | return 0 39 | histogram, bins = np.histogram(frame, bins = binaries) 40 | histogram = histogram.tolist(); bins = bins.tolist(); 41 | histogram[0 : 1] = [0, 0] 42 | max_hist = bins[histogram.index( max(histogram) )] 43 | return max_hist 44 | 45 | def max_area_contour(self, contours): 46 | max_area = 0 47 | ci = 0 48 | for i in range(len(contours)): 49 | cnt=contours[i] 50 | area = cv2.contourArea(cnt) 51 | if(area>max_area): 52 | max_area=area 53 | ci=i 54 | return contours[ci] 55 | 56 | def min_area_contour(self, contours): 57 | min_area = 0 58 | ci = 0 59 | for i in range(len(contours)): 60 | cnt=contours[i] 61 | area = cv2.contourArea(cnt) 62 | if(area 5: 159 | gesture = 2 160 | print 'Pointer' 161 | else: 162 | print 'Hand Closed' 163 | gesture = 1 164 | else: 165 | print 'Hand Open' 166 | gesture = 0 167 | 168 | 169 | # k = cv2.isContourConvex(cnt) 170 | # if k: 171 | # print 'convex' 172 | # # else: 173 | # print 'concave' 174 | 175 | right_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, right_hand_filtered,right_hand),depth_frame) 176 | 177 | ret,right_hand_filtered_depth_frame = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 178 | print_frame += right_hand_filtered_depth_frame 179 | 180 | 181 | if left_hand_filtered != None: 182 | 183 | 184 | left_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, left_hand_filtered, left_hand),depth_frame) 185 | ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 186 | print_frame += left_hand_filtered_depth_frame 187 | 188 | print_frame = np.array(print_frame, dtype = np.uint8) 189 | print_frame = cv2.cvtColor(print_frame, cv2.COLOR_GRAY2RGB) 190 | 191 | font = cv2.FONT_HERSHEY_SIMPLEX 192 | cv2.putText(print_frame, 'Gesture:',(50,320), font, 0.5, (150,150,150),1, cv2.LINE_AA) 193 | if gesture == 0: 194 | cv2.putText(print_frame, 'Hand Open',(50,350), font, 0.5, (200,0,0),1, cv2.LINE_AA) 195 | else: 196 | cv2.putText(print_frame, 'Hand Closed',(50,350), font, 0.5, (0,200,0),1, cv2.LINE_AA) 197 | 198 | if print_frame != None: 199 | dpt = depth_frame 200 | cv2.imshow('Hand Filtered',print_frame) 201 | 202 | 203 | 204 | if cv2.waitKey(1) & 0xFF == ord('q'): 205 | break 206 | 207 | 208 | # --- Limit to 60 frames per second 209 | 210 | 211 | # Close our Kinect sensor, close the window and quit. 212 | self._kinect.close() 213 | 214 | 215 | 216 | HandGestureObject = HandGestureObjectClass(); 217 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Ash/old/tp: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | import math 9 | class HandGestureObjectClass(object): 10 | def __init__(self): 11 | 12 | # Kinect runtime object, we want only color and body frames 13 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 14 | 15 | # here we will store skeleton data 16 | self._bodies = None 17 | 18 | 19 | def neighbourhood(self, array, radius, seed): 20 | 21 | 22 | neighbour = np.array(array) 23 | neighbour *= 0 24 | 25 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 26 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 27 | return temp 28 | 29 | def merge(self, array_big, array_small, seed ): 30 | [a,b] = np.shape(array_small) 31 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 32 | return array_big 33 | 34 | def max_hist_depth(self, frame): 35 | #print 'FRAME_MAX = ' + str(frame.max()) 36 | binaries = int(frame.max()) 37 | if binaries <= 0: 38 | return 0 39 | histogram, bins = np.histogram(frame, bins = binaries) 40 | histogram = histogram.tolist(); bins = bins.tolist(); 41 | histogram[0 : 1] = [0, 0] 42 | max_hist = bins[histogram.index( max(histogram) )] 43 | return max_hist 44 | 45 | def max_area_contour(self, contours): 46 | max_area = 0 47 | ci = 0 48 | for i in range(len(contours)): 49 | cnt=contours[i] 50 | area = cv2.contourArea(cnt) 51 | if(area>max_area): 52 | max_area=area 53 | ci=i 54 | return contours[ci] 55 | 56 | def run(self): 57 | print_frame=None 58 | 59 | # -------- Main Program Loop ----------- 60 | while (True): 61 | # --- Main event loop 62 | print '0:in_main' 63 | 64 | if self._kinect.has_new_body_frame(): 65 | self._bodies = self._kinect.get_last_body_frame() 66 | 67 | if self._kinect.has_new_depth_frame(): 68 | depth_frame = self._kinect.get_last_depth_frame() 69 | 70 | depth_frame = np.array(depth_frame/16, dtype= np.uint8) 71 | depth_frame = depth_frame.reshape(424,512) 72 | cv2.imshow('depth_frame',depth_frame) 73 | 74 | i = 0 75 | if self._bodies is not None: 76 | # print "Bodies"+str(self._bodies) 77 | #first detected body taken 78 | 79 | body = self._bodies.bodies[i] 80 | if not body.is_tracked: 81 | i = i + 1 82 | print i 83 | continue 84 | 85 | joints = body.joints 86 | 87 | # convert joint coordinates to color space 88 | joint_points = self._kinect.body_joints_to_depth_space(joints) 89 | 90 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 91 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 92 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 93 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 94 | 95 | right_x = right_x if right_x < 424 else 423 96 | right_y = right_y if right_y < 512 else 511 97 | left_x = left_x if left_x < 424 else 423 98 | left_y = left_y if left_y < 512 else 511 99 | 100 | right_hand_depth = depth_frame[right_x,right_y] 101 | left_hand_depth = depth_frame[left_x,left_y] 102 | print 'ld:' + str(left_hand_depth)+'\trd:' + str(right_hand_depth) 103 | 104 | right_hand = [right_x,right_y] 105 | left_hand = [left_x,left_y] 106 | 107 | #print type(c) 108 | 109 | # radius_right =int( math.sqrt((int(joint_points[PyKinectV2.JointType_WristRight].x)-int(joint_points[PyKinectV2.JointType_HandTipRight].x))**2+(int(joint_points[PyKinectV2.JointType_WristRight].y)-int(joint_points[PyKinectV2.JointType_HandTipRight].y))**2))+1 110 | # radius_left =int( math.sqrt((int(joint_points[PyKinectV2.JointType_WristLeft].x)-int(joint_points[PyKinectV2.JointType_HandTipLeft].x))**2+(int(joint_points[PyKinectV2.JointType_WristLeft].y)-int(joint_points[PyKinectV2.JointType_HandTipLeft].y))**2))+1 111 | # print d 112 | 113 | radius_right = 50 114 | radius_left = 50 115 | 116 | if depth_frame != None: 117 | right_hand_filtered = self.neighbourhood(depth_frame,radius_right,right_hand) 118 | left_hand_filtered = self.neighbourhood(depth_frame,radius_left,left_hand) 119 | 120 | neighbour = np.array(depth_frame) 121 | neighbour *= 0 122 | 123 | print_frame = np.zeros(np.shape(depth_frame)) 124 | 125 | 126 | 127 | if right_hand_filtered != None: 128 | right = np.array(right_hand_filtered) 129 | # cv2.imwrite('pointer.png',right_hand_filtered) 130 | img1,contours1, hierarchy1 = cv2.findContours(right,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE) 131 | cnt = self.max_area_contour(contours1) 132 | x,y,w,h = cv2.boundingRect(cnt) 133 | 134 | right = np.array(self.neighbourhood(depth_frame,max(w,h),right_hand)) 135 | img1,contours1, hierarchy1 = cv2.findContours(right,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE) 136 | cnt = self.max_area_contour(contours1) 137 | 138 | hull = cv2.convexHull(cnt) 139 | # print hull 140 | # defects = cv2.convexityDefects(cnt,hull) 141 | # print defects.shape[0] 142 | drawing = np.zeros(right_hand_filtered.shape,np.uint8) 143 | drawing = cv2.drawContours(drawing,[cnt],0,150,1) 144 | drawing = cv2.drawContours(drawing,[hull],0,200,1) 145 | cv2.imshow('contours1',drawing) 146 | 147 | # img2 = cv2.imread('right_hand_filtered.png',0) 148 | # # img3 = cv2.imread('pointer.png',0) 149 | # # ret, thresh2 = cv2.threshold(img2, 127, 255,0) 150 | # im2,contours2,hierarchy2 = cv2.findContours(img2,2,1) 151 | # cnt2 = self.max_area_contour(contours2) 152 | 153 | # ret = cv2.matchShapes(cnt,cnt2,1,0.0) 154 | # # print ret 155 | # gesture = -1 156 | # if ret <= 0.1: 157 | # print 'Hand Closed' 158 | # gesture = 1 159 | # else: 160 | # print 'Hand Open' 161 | # gesture = 0 162 | 163 | 164 | # k = cv2.isContourConvex(cnt) 165 | # if k: 166 | # print 'convex' 167 | # # else: 168 | # print 'concave' 169 | 170 | right_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, right_hand_filtered,right_hand),depth_frame) 171 | 172 | ret,right_hand_filtered_depth_frame = cv2.threshold(right_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 173 | print_frame += right_hand_filtered_depth_frame 174 | 175 | 176 | if left_hand_filtered != None: 177 | 178 | 179 | left_hand_filtered_depth_frame = cv2.bitwise_and(self.merge(neighbour, left_hand_filtered, left_hand),depth_frame) 180 | ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 181 | print_frame += left_hand_filtered_depth_frame 182 | 183 | print_frame = np.array(print_frame, dtype = np.uint8) 184 | # print_frame = cv2.cvtColor(print_frame, cv2.COLOR_GRAY2RGB) 185 | 186 | # font = cv2.FONT_HERSHEY_SIMPLEX 187 | # cv2.putText(print_frame, 'Gesture:',(50,320), font, 0.5, (150,150,150),1, cv2.LINE_AA) 188 | # if gesture == 0: 189 | # cv2.putText(print_frame, 'Hand Open',(50,350), font, 0.5, (200,0,0),1, cv2.LINE_AA) 190 | # else: 191 | # cv2.putText(print_frame, 'Hand Closed',(50,350), font, 0.5, (0,200,0),1, cv2.LINE_AA) 192 | 193 | if print_frame != None: 194 | dpt = depth_frame 195 | cv2.imshow('Hand Filtered',print_frame) 196 | 197 | 198 | 199 | if cv2.waitKey(1) & 0xFF == ord('q'): 200 | break 201 | 202 | 203 | # --- Limit to 60 frames per second 204 | 205 | 206 | # Close our Kinect sensor, close the window and quit. 207 | self._kinect.close() 208 | 209 | 210 | 211 | HandGestureObject = HandGestureObjectClass(); 212 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/Ash/topViewHand.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/Ash/topViewHand.jpg -------------------------------------------------------------------------------- /Codes/Finals/clean_hand_filtered'.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import scipy 6 | import numpy as np 7 | import cv2 8 | from os import system as cmd 9 | 10 | class HandGestureObjectClass(object): 11 | def __init__(self): 12 | 13 | # Kinect runtime object, we want only color and body frames 14 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 15 | 16 | # here we will store skeleton data 17 | self._bodies = None 18 | 19 | def get_hand_coordinates(self, joint_points): 20 | 21 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 22 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 23 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 24 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 25 | 26 | right_x = right_x if right_x < 424 else 423 27 | right_y = right_y if right_y < 512 else 511 28 | left_x = left_x if left_x < 424 else 423 29 | left_y = left_y if left_y < 512 else 511 30 | 31 | right_hand = [right_x,right_y] 32 | left_hand = [left_x,left_y] 33 | return [right_hand,left_hand] 34 | 35 | def neighbourhood(self, array, radius, seed): 36 | neighbour = np.array(array) 37 | neighbour *= 0 38 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint8) 39 | ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 40 | return temp 41 | 42 | def merge(self, array_big, array_small, seed ): 43 | [a,b] = np.shape(array_small) 44 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 45 | return array_big 46 | 47 | def max_hist_depth(self, frame): 48 | #print 'FRAME_MAX = ' + str(frame.max()) 49 | binaries = int(frame.max()) 50 | if binaries <= 0: 51 | return 0 52 | histogram, bins = np.histogram(frame, bins = binaries) 53 | histogram = histogram.tolist(); bins = bins.tolist(); 54 | histogram[0 : 1] = [0, 0] 55 | max_hist = bins[histogram.index( max(histogram) )] 56 | return max_hist 57 | 58 | def max_area_contour(self, contours): 59 | max_area = 0 60 | ci = 0 61 | for i in range(len(contours)): 62 | cnt=contours[i] 63 | area = cv2.contourArea(cnt) 64 | if(area>max_area): 65 | max_area=area 66 | ci=i 67 | return contours[ci] 68 | 69 | def min_area_contour(self, contours): 70 | min_area = 0 71 | ci = 0 72 | for i in range(len(contours)): 73 | cnt=contours[i] 74 | area = cv2.contourArea(cnt) 75 | if(area= 0x03000000: 12 | import _thread as thread 13 | else: 14 | import thread 15 | 16 | # colors for drawing different bodies 17 | SKELETON_COLORS = [pygame.color.THECOLORS["red"], 18 | pygame.color.THECOLORS["blue"], 19 | pygame.color.THECOLORS["green"], 20 | pygame.color.THECOLORS["orange"], 21 | pygame.color.THECOLORS["purple"], 22 | pygame.color.THECOLORS["yellow"], 23 | pygame.color.THECOLORS["violet"]] 24 | 25 | 26 | class BodyGameRuntime(object): 27 | def __init__(self): 28 | pygame.init() 29 | 30 | # Used to manage how fast the screen updates 31 | self._clock = pygame.time.Clock() 32 | 33 | # Set the width and height of the screen [width, height] 34 | self._infoObject = pygame.display.Info() 35 | self._screen = pygame.display.set_mode((self._infoObject.current_w >> 1, self._infoObject.current_h >> 1), 36 | pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 8 ) 37 | 38 | pygame.display.set_caption("Kinect for Windows v2 Body Game") 39 | 40 | # Loop until the user clicks the close button. 41 | self._done = False 42 | 43 | # Used to manage how fast the screen updates 44 | self._clock = pygame.time.Clock() 45 | 46 | # Kinect runtime object, we want only color and body frames 47 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 48 | 49 | # back buffer surface for getting Kinect color frames, 8 bit color, width and height equal to the Kinect color frame size 50 | self._frame_surface = pygame.Surface((self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height), 0, 8 ) 51 | 52 | # here we will store skeleton data 53 | self._bodies = None 54 | 55 | 56 | def draw_body_bone(self, joints, jointPoints, color, joint0, joint1): 57 | joint0State = joints[joint0].TrackingState; 58 | joint1State = joints[joint1].TrackingState; 59 | 60 | # both joints are not tracked 61 | if (joint0State == PyKinectV2.TrackingState_NotTracked) or (joint1State == PyKinectV2.TrackingState_NotTracked): 62 | return 63 | 64 | # both joints are not *really* tracked 65 | if (joint0State == PyKinectV2.TrackingState_Inferred) and (joint1State == PyKinectV2.TrackingState_Inferred): 66 | return 67 | 68 | # ok, at least one is good 69 | start = (jointPoints[joint0].x, jointPoints[joint0].y) 70 | end = (jointPoints[joint1].x, jointPoints[joint1].y) 71 | 72 | try: 73 | pygame.draw.line(self._frame_surface, color, start, end, 8) 74 | except: # need to catch it due to possible invalid positions (with inf) 75 | pass 76 | 77 | def draw_body(self, joints, jointPoints, color): 78 | # Torso 79 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_Head, PyKinectV2.JointType_Neck); 80 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_Neck, PyKinectV2.JointType_SpineShoulder); 81 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_SpineMid); 82 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineMid, PyKinectV2.JointType_SpineBase); 83 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderRight); 84 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderLeft); 85 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipRight); 86 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipLeft); 87 | 88 | # Right Arm 89 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ShoulderRight, PyKinectV2.JointType_ElbowRight); 90 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ElbowRight, PyKinectV2.JointType_WristRight); 91 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_HandRight); 92 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HandRight, PyKinectV2.JointType_HandTipRight); 93 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_ThumbRight); 94 | 95 | # Left Arm 96 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ShoulderLeft, PyKinectV2.JointType_ElbowLeft); 97 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ElbowLeft, PyKinectV2.JointType_WristLeft); 98 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_HandLeft); 99 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HandLeft, PyKinectV2.JointType_HandTipLeft); 100 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_ThumbLeft); 101 | 102 | # Right Leg 103 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HipRight, PyKinectV2.JointType_KneeRight); 104 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_KneeRight, PyKinectV2.JointType_AnkleRight); 105 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_AnkleRight, PyKinectV2.JointType_FootRight); 106 | 107 | # Left Leg 108 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HipLeft, PyKinectV2.JointType_KneeLeft); 109 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_KneeLeft, PyKinectV2.JointType_AnkleLeft); 110 | self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_AnkleLeft, PyKinectV2.JointType_FootLeft); 111 | 112 | 113 | def draw_depth_frame(self, frame, target_surface): 114 | target_surface.lock() 115 | address = self._kinect.surface_as_array(target_surface.get_buffer()) 116 | ctypes.memmove(address, frame.ctypes.data, frame.size) 117 | del address 118 | target_surface.unlock() 119 | 120 | def run(self): 121 | # -------- Main Program Loop ----------- 122 | while not self._done: 123 | # --- Main event loop 124 | for event in pygame.event.get(): # User did something 125 | if event.type == pygame.QUIT: # If user clicked close 126 | self._done = True # Flag that we are done so we exit this loop 127 | 128 | elif event.type == pygame.VIDEORESIZE: # window resized 129 | self._screen = pygame.display.set_mode(event.dict['size'], 130 | pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 8 ) 131 | 132 | # --- Game logic should go here 133 | 134 | # --- Getting frames and drawing 135 | # --- Woohoo! We've got a color frame! Let's fill out back buffer surface with frame's data 136 | if self._kinect.has_new_depth_frame(): 137 | frame = self._kinect.get_last_depth_frame() 138 | frame = np.array(frame, dtype= np.uint8) 139 | frame *= 1 140 | # new = cv2.cvtColor(frame,cv2.COLOR_GRAY2RGB) 141 | self.draw_depth_frame(frame, self._frame_surface) 142 | frame = None 143 | 144 | # --- Cool! We have a body frame, so can get skeletons 145 | if self._kinect.has_new_body_frame(): 146 | self._bodies = self._kinect.get_last_body_frame() 147 | 148 | # --- draw skeletons to _frame_surface 149 | if self._bodies is not None: 150 | for i in range(0, self._kinect.max_body_count): 151 | body = self._bodies.bodies[i] 152 | if not body.is_tracked: 153 | continue 154 | 155 | joints = body.joints 156 | # convert joint coordinates to color space 157 | joint_points = self._kinect.body_joints_to_depth_space(joints) 158 | self.draw_body(joints, joint_points, SKELETON_COLORS[i]) 159 | 160 | # --- copy back buffer surface pixels to the screen, resize it if needed and keep aspect ratio 161 | # --- (screen size may be different from Kinect's color frame size) 162 | h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width() 163 | target_height = int(h_to_w * self._screen.get_width()) 164 | surface_to_draw = pygame.transform.scale(self._frame_surface, (self._screen.get_width(), target_height)); 165 | self._screen.blit(surface_to_draw, (0,0)) 166 | surface_to_draw = None 167 | pygame.display.update() 168 | 169 | # --- Go ahead and update the screen with what we've drawn. 170 | pygame.display.flip() 171 | 172 | # --- Limit to 60 frames per second 173 | self._clock.tick(60) 174 | 175 | # Close our Kinect sensor, close the window and quit. 176 | self._kinect.close() 177 | pygame.quit() 178 | 179 | 180 | __main__ = "Kinect v2 Body Game" 181 | game = BodyGameRuntime(); 182 | game.run(); 183 | 184 | -------------------------------------------------------------------------------- /Codes/feature: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/feature -------------------------------------------------------------------------------- /Codes/feature_closed_hand: -------------------------------------------------------------------------------- 1 | [3, 83] 2 | [3, 97] 3 | [3, 80] 4 | [3, 41] 5 | [3, 49] 6 | [3, 42] 7 | [2, 56] 8 | [3, 55] 9 | [2, 53] 10 | [2, 93] 11 | [3, 42] 12 | [2, 108] 13 | [4, 101] 14 | [3, 123] 15 | [3, 97] 16 | [3, 110] 17 | [3, 83] 18 | [3, 80] 19 | [3, 79] 20 | [3, 96] 21 | [3, 87] 22 | [2, 66] 23 | [2, 57] 24 | [3, 65] 25 | [2, 68] 26 | [3, 73] 27 | [3, 58] 28 | [3, 75] 29 | [3, 65] 30 | [5, 66] 31 | [3, 60] 32 | [3, 62] 33 | [3, 56] 34 | [2, 25] 35 | [2, 35] 36 | [2, 40] 37 | [3, 38] 38 | [3, 51] 39 | [2, 42] 40 | [2, 87] 41 | [2, 105] 42 | [3, 136] 43 | [2, 172] 44 | [2, 97] 45 | [3, 38] 46 | [3, 47] 47 | [3, 40] 48 | [3, 38] 49 | [2, 128] 50 | [3, 113] -------------------------------------------------------------------------------- /Codes/feature_open_hand: -------------------------------------------------------------------------------- 1 | [7, 246] 2 | [4, 113] 3 | [6, 244] 4 | [6, 248] 5 | [6, 245] 6 | [5, 234] 7 | [5, 253] 8 | [7, 253] 9 | [6, 251] 10 | [5, 255] 11 | [6, 272] 12 | [6, 271] 13 | [6, 277] 14 | [7, 269] 15 | [7, 276] 16 | [5, 274] 17 | [6, 290] 18 | [5, 277] 19 | [7, 290] 20 | [6, 288] 21 | [5, 285] 22 | [4, 292] 23 | [4, 304] 24 | [5, 296] 25 | [5, 307] 26 | [5, 324] 27 | [7, 336] 28 | [6, 344] 29 | [6, 349] 30 | [4, 375] 31 | [5, 376] 32 | [6, 402] 33 | [6, 406] 34 | [5, 412] 35 | [5, 407] 36 | [5, 418] 37 | [5, 432] 38 | [6, 393] 39 | [5, 372] 40 | [5, 375] 41 | [6, 371] 42 | [6, 375] 43 | [5, 359] 44 | [5, 393] 45 | [6, 424] 46 | [6, 405] 47 | [5, 432] 48 | [5, 431] 49 | [6, 439] 50 | [5, 444] -------------------------------------------------------------------------------- /Codes/hand_body.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Codes/hand_body.jpg -------------------------------------------------------------------------------- /Codes/kinect_body_abhi.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | from scipy import ndimage 6 | import numpy as np 7 | import cv2 8 | from os import system as cmd 9 | import math 10 | 11 | class HandGestureObjectClass(object): 12 | def __init__(self): 13 | 14 | # Kinect runtime object, we want only color and body frames 15 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 16 | 17 | # here we will store skeleton data 18 | self._bodies = None 19 | 20 | def get_hand_coordinates(self, joint_points): 21 | 22 | right_x=int(joint_points[PyKinectV2.JointType_HandRight].x) 23 | right_y=int(joint_points[PyKinectV2.JointType_HandRight].y) 24 | left_x=int(joint_points[PyKinectV2.JointType_HandLeft].x) 25 | left_y=int(joint_points[PyKinectV2.JointType_HandLeft].y) 26 | 27 | right_x = right_x if right_x < 424 else 423 28 | right_y = right_y if right_y < 512 else 511 29 | left_x = left_x if left_x < 424 else 423 30 | left_y = left_y if left_y < 512 else 511 31 | 32 | right_hand = [right_x,right_y] 33 | left_hand = [left_x,left_y] 34 | return [right_hand,left_hand] 35 | 36 | def get_wrist_coordinates(self, joint_points): 37 | 38 | right_x=int(joint_points[PyKinectV2.JointType_WristRight].x) 39 | right_y=int(joint_points[PyKinectV2.JointType_WristRight].y) 40 | left_x=int(joint_points[PyKinectV2.JointType_WristLeft].x) 41 | left_y=int(joint_points[PyKinectV2.JointType_WristLeft].y) 42 | 43 | right_x = right_x if right_x < 424 else 423 44 | right_y = right_y if right_y < 512 else 511 45 | left_x = left_x if left_x < 424 else 423 46 | left_y = left_y if left_y < 512 else 511 47 | 48 | right_wrist = [right_x,right_y] 49 | left_wrist = [left_x,left_y] 50 | return [right_wrist,left_wrist] 51 | 52 | def neighbourhood(self, array, radius, seed): 53 | neighbour = np.array(array) 54 | neighbour *= 0 55 | temp = np.array(array[seed[1]-radius:seed[1]+radius, seed[0]-radius:seed[0]+radius], dtype = np.uint16) 56 | # cv2.imshow('hand',temp) 57 | # ret,temp = cv2.threshold(temp,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 58 | return temp 59 | 60 | def merge(self, array_big, array_small, seed ): 61 | [a,b] = np.shape(array_small) 62 | array_big[seed[1]-b/2:seed[1]+b/2, seed[0]-a/2:seed[0]+a/2] = array_small 63 | return array_big 64 | 65 | def max_hist_depth(self, frame): 66 | #print 'FRAME_MAX = ' + str(frame.max()) 67 | binaries = int(frame.max()) 68 | if binaries <= 0: 69 | return 0 70 | histogram, bins = np.histogram(frame, bins = binaries) 71 | histogram = histogram.tolist(); bins = bins.tolist(); 72 | histogram[0 : 1] = [0, 0] 73 | max_hist = bins[histogram.index( max(histogram) )] 74 | return max_hist 75 | 76 | def max_area_contour(self, contours): 77 | max_area = 0 78 | ci = 0 79 | for i in range(len(contours)): 80 | cnt=contours[i] 81 | area = cv2.contourArea(cnt) 82 | if(area>max_area): 83 | max_area=area 84 | ci=i 85 | return ci 86 | 87 | def min_area_contour(self, contours): 88 | min_area = 0 89 | ci = 0 90 | for i in range(len(contours)): 91 | cnt=contours[i] 92 | area = cv2.contourArea(cnt) 93 | if(area right_hand_depth + 1200] = 0 172 | # right_hand_filtered[right_hand_filtered < right_hand_depth - 1200] = 0 173 | 174 | 175 | right_hand_filtered_depth_frame = self.merge(neighbour, right_hand_filtered,right_hand) 176 | neighbour = right_hand_filtered_depth_frame 177 | 178 | if left_hand_filtered != None: 179 | left_hand_depth = left_hand_filtered[d,d] 180 | left_hand_filtered[left_hand_filtered > left_hand_depth + 1200] = 0 181 | # left_hand_filtered[left_hand_filtered < left_hand_depth - 1200] = 0 182 | 183 | left_hand_filtered_depth_frame = self.merge(neighbour, left_hand_filtered,left_hand) 184 | # ret,left_hand_filtered_depth_frame = cv2.threshold(left_hand_filtered_depth_frame,0,255,cv2.THRESH_OTSU) 185 | 186 | hand_filtered = left_hand_filtered_depth_frame 187 | hand_filtered_8 = np.array(hand_filtered/255, dtype = np.uint8) 188 | # hand_filtered += right_hand_filtered_depth_frame 189 | 190 | cv2.imshow('final',hand_filtered) 191 | cv2.imshow('8-bit', hand_filtered_8) 192 | 193 | 194 | right = np.array(right_hand_filtered/255, dtype = np.uint8) 195 | ret, right = cv2.threshold(right,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 196 | 197 | img1,contours1, hierarchy1 = cv2.findContours(right,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE) 198 | cnt = contours1[self.max_area_contour(contours1)] 199 | hull = cv2.convexHull(cnt,returnPoints = False) 200 | defects = cv2.convexityDefects(cnt,hull) 201 | drawing = np.zeros(right_hand_filtered.shape,np.uint8) 202 | drawing = cv2.cvtColor(drawing,cv2.COLOR_GRAY2RGB) 203 | for i in range(defects.shape[0]): 204 | s,e,f,d = defects[i,0] 205 | start = tuple(cnt[s][0]) 206 | end = tuple(cnt[e][0]) 207 | far = tuple(cnt[f][0]) 208 | cv2.line(drawing,start,end,[0,255,0],2) 209 | cv2.circle(drawing,far,5,[0,0,255],-1) 210 | drawing = cv2.drawContours(drawing,[cnt],-1,150,1) 211 | cv2.imshow('contours1',drawing) 212 | 213 | 214 | 215 | if cv2.waitKey(1) & 0xFF == ord('q'): 216 | break 217 | 218 | # Close our Kinect sensor, close the window and quit. 219 | self._kinect.close() 220 | 221 | 222 | 223 | HandGestureObject = HandGestureObjectClass(); 224 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Codes/nn.pkl: -------------------------------------------------------------------------------- 1 | ccopy_reg 2 | _reconstructor 3 | p0 4 | (csknn.mlp 5 | Classifier 6 | p1 7 | c__builtin__ 8 | object 9 | p2 10 | Ntp3 11 | Rp4 12 | (dp5 13 | S'loss_type' 14 | p6 15 | NsS'dropout_rate' 16 | p7 17 | NsS'verbose' 18 | p8 19 | NsS'valid_set' 20 | p9 21 | NsS'regularize' 22 | p10 23 | NsS'learning_rate' 24 | p11 25 | F0.5 26 | sS'batch_size' 27 | p12 28 | I1 29 | sS'valid_size' 30 | p13 31 | F0.0 32 | sS'debug' 33 | p14 34 | I00 35 | sS'learning_momentum' 36 | p15 37 | F0.9 38 | sS'learning_rule' 39 | p16 40 | Vsgd 41 | p17 42 | sS'unit_counts' 43 | p18 44 | (lp19 45 | I2 46 | aI2 47 | aI2 48 | asS'weight_decay' 49 | p20 50 | NsS'layers' 51 | p21 52 | (lp22 53 | g0 54 | (csknn.nn 55 | Layer 56 | p23 57 | g2 58 | Ntp24 59 | Rp25 60 | (dp26 61 | S'name' 62 | p27 63 | Vhidden0 64 | p28 65 | sS'frozen' 66 | p29 67 | I00 68 | sS'dropout' 69 | p30 70 | NsS'pieces' 71 | p31 72 | NsS'units' 73 | p32 74 | I2 75 | sg20 76 | NsS'type' 77 | p33 78 | S'Sigmoid' 79 | p34 80 | sbag0 81 | (g23 82 | g2 83 | Ntp35 84 | Rp36 85 | (dp37 86 | g27 87 | Voutput 88 | p38 89 | sg29 90 | I00 91 | sg30 92 | Nsg31 93 | Nsg32 94 | I2 95 | sg20 96 | Nsg33 97 | S'Softmax' 98 | p39 99 | sbasS'n_iter' 100 | p40 101 | I5 102 | sS'f_stable' 103 | p41 104 | F0.001 105 | sS'n_stable' 106 | p42 107 | I10 108 | sS'callback' 109 | p43 110 | NsS'random_state' 111 | p44 112 | NsS'weights' 113 | p45 114 | (lp46 115 | (cnumpy.core.multiarray 116 | _reconstruct 117 | p47 118 | (cnumpy 119 | ndarray 120 | p48 121 | (I0 122 | tp49 123 | S'b' 124 | p50 125 | tp51 126 | Rp52 127 | (I1 128 | (I2 129 | I2 130 | tp53 131 | cnumpy 132 | dtype 133 | p54 134 | (S'f8' 135 | p55 136 | I0 137 | I1 138 | tp56 139 | Rp57 140 | (I3 141 | S'<' 142 | p58 143 | NNNI-1 144 | I-1 145 | I0 146 | tp59 147 | bI00 148 | S'\x9a#\xf9]\x9c\x1c\x05@j\xfb\x02D\xa3\x18\x07\xc0;\x024}E\x81\x0b@N{q\xf7\xb4N\x10\xc0' 149 | p60 150 | tp61 151 | bg47 152 | (g48 153 | (I0 154 | tp62 155 | g50 156 | tp63 157 | Rp64 158 | (I1 159 | (I2 160 | tp65 161 | g57 162 | I00 163 | S'\x18\xa7d\xdf\xb7q\x04\xc0@\xa2\xbd/z%\x07@' 164 | p66 165 | tp67 166 | btp68 167 | a(g47 168 | (g48 169 | (I0 170 | tp69 171 | g50 172 | tp70 173 | Rp71 174 | (I1 175 | (I2 176 | I2 177 | tp72 178 | g57 179 | I00 180 | S'?H\x97r\xa6\x85\x0e\xc0\xc0J+\xca\xf9E\t@C\x14\x06\x92\xc4,\x0f@\xafu\x96p[\xac\x10\xc0' 181 | p73 182 | tp74 183 | bg47 184 | (g48 185 | (I0 186 | tp75 187 | g50 188 | tp76 189 | Rp77 190 | (I1 191 | (I2 192 | tp78 193 | g57 194 | I00 195 | S'\x8e\x84LBG\x86\xc4?m\x84LBG\x86\xc4\xbf' 196 | p79 197 | tp80 198 | btp81 199 | asS'label_binarizers' 200 | p82 201 | (lp83 202 | g0 203 | (csklearn.preprocessing.label 204 | LabelBinarizer 205 | p84 206 | g2 207 | Ntp85 208 | Rp86 209 | (dp87 210 | S'neg_label' 211 | p88 212 | I0 213 | sS'sparse_input_' 214 | p89 215 | I00 216 | sS'sparse_output' 217 | p90 218 | I00 219 | sS'classes_' 220 | p91 221 | g47 222 | (g48 223 | (I0 224 | tp92 225 | g50 226 | tp93 227 | Rp94 228 | (I1 229 | (I2 230 | tp95 231 | g54 232 | (S'i4' 233 | p96 234 | I0 235 | I1 236 | tp97 237 | Rp98 238 | (I3 239 | S'<' 240 | p99 241 | NNNI-1 242 | I-1 243 | I0 244 | tp100 245 | bI00 246 | S'\x00\x00\x00\x00\x01\x00\x00\x00' 247 | p101 248 | tp102 249 | bsS'y_type_' 250 | p103 251 | Vmulticlass 252 | p104 253 | sS'pos_label' 254 | p105 255 | I1 256 | sbasb. -------------------------------------------------------------------------------- /Codes/test.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | from scipy import ndimage 6 | import numpy as np 7 | import cv2 8 | from os import system as cmd 9 | import math 10 | 11 | while(True): 12 | img = cv2.imread('hand_body.jpg',0) 13 | img = np.array(img, dtype = np.uint8) 14 | cv2.imshow('oring',img) 15 | val = img[40,40]+3 16 | 17 | img[img>val] /=3 18 | 19 | 20 | cv2.imshow('orig2',img) 21 | ret, img = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 22 | cv2.imshow('thresh', img) 23 | # im2, contours, hierarchy = cv2.findContours(img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 24 | 25 | # cv2.drawContours(img, contours, 3, 255, 3) 26 | # cv2.imshow('cont',img) 27 | 28 | if cv2.waitKey(1) & 0xFF == ord('q'): 29 | break -------------------------------------------------------------------------------- /Codes/test_ANN.py: -------------------------------------------------------------------------------- 1 | # Import datasets, classifiers and performance metrics 2 | from sknn.mlp import Classifier, Layer 3 | import numpy as np 4 | import logging 5 | import pickle 6 | logging.basicConfig() 7 | 8 | # The digits dataset 9 | opt = [] 10 | f = open("feature_open_hand", "r+") 11 | ipt_open = f.read() 12 | f.close() 13 | ipt_open = ipt_open.split("\n") 14 | 15 | for i in range(0,len(ipt_open)-1): 16 | ipt_open[i] = ipt_open[i].strip("[]").split(",") 17 | # print ipt_open 18 | ipt_open[i][0] = float(ipt_open[i][0])/7 19 | ipt_open[i][1] = float(ipt_open[i][1])/500 20 | opt.append(1) 21 | 22 | f = open("feature_closed_hand", "r+") 23 | ipt_closed = f.read() 24 | f.close() 25 | ipt_closed = ipt_closed.split("\n") 26 | 27 | for i in range(0,len(ipt_closed)-1): 28 | ipt_closed[i] = ipt_closed[i].strip("[]").split(",") 29 | ipt_closed[i][0] = float(ipt_closed[i][0])/7 30 | ipt_closed[i][1] = float(ipt_closed[i][1])/500 31 | opt.append(0) 32 | 33 | ipt = ipt_open[:-1]+ipt_closed[:-1] 34 | 35 | # ipt = [[2/7,30/400],[6/7,400/400]] 36 | # opt = [0,1] 37 | ipt = np.asarray(ipt) 38 | opt = np.asarray(opt) 39 | 40 | print ":"+str(len(ipt)) 41 | print len(opt) 42 | print opt 43 | print ipt 44 | 45 | nn = Classifier( 46 | layers=[ 47 | Layer("Sigmoid", units=2), 48 | # Layer("Softmax",units=5), 49 | Layer("Softmax")], 50 | learning_rate=0.5, 51 | n_iter=5) 52 | 53 | # nn.set_parameters([([[-3.75906463, 1.26411728],[-5.44439202, 0.44432455]], [ 2.63582797, -0.23474542]), 54 | # ([[ 4.32310838, -5.46097277],[-1.114463 , 1.37638111]], [-2.13190273, 2.13190273])]) 55 | nn.fit(ipt,opt) 56 | 57 | a = np.asarray([[2/7,30/400],[4/7,30/400],[6/7,400/400],[4/7,400/400]]) 58 | # a = np.asarray([[2,30],[4,30],[6,400],[4,400]]) 59 | # a =a.reshape(2,-1) 60 | 61 | 62 | # params = nn.get_parameters() 63 | 64 | # print "::NEW::" 65 | 66 | # tnn = Classifier( 67 | # layers=[ 68 | # Layer("Sigmoid", units=2), 69 | # # Layer("Softmax",units=5), 70 | # Layer("Softmax")], 71 | # learning_rate=0.5, 72 | # n_iter=5) 73 | 74 | # tnn.set_parameters(params) 75 | 76 | op = nn.predict(a) 77 | pickle.dump(nn, open('nn.pkl','wb')) -------------------------------------------------------------------------------- /Documentation/MATLAB_approach: -------------------------------------------------------------------------------- 1 | # MATLAB APPROACH # 2 | -SUBTRACT 3 | -DENOISE(EROSION) 4 | -XOR WITH IMAGE 5 | -HISTOGRAM 6 | -MAX VALUE=DEPTH OF HAND 7 | -FILL REGION WITH TOLERANCE 8 | -------------------------------------------------------------------------------- /Documentation/gestures: -------------------------------------------------------------------------------- 1 | ::static gestures:: 2 | 1]hand open 3 | 2]hand closed 4 | 3]peace 5 | 4]thumbs up 6 | 7 | ::single hand motion:: 8 | 1]grab 9 | 2]pinch to zoom 10 | 3]rotate 11 | 4]wave to shuffle 12 | 5]drag and drop 13 | 6]tap select 14 | 15 | ::double hand motion:: 16 | 1]double hand zoom 17 | 2] 18 | 19 | http://www.morethantechnical.com/2010/12/28/hand-gesture-recognition-via-model-fitting-in-energy-minimization-wopencv/ -------------------------------------------------------------------------------- /Drivers/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Drivers/.DS_Store -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.1/PyKinectRuntime.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Drivers/pykinect2_v1.0.1/PyKinectRuntime.pyc -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.1/PyKinectV2.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Drivers/pykinect2_v1.0.1/PyKinectV2.pyc -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Drivers/pykinect2_v1.0.1/__init__.py -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.1/__init__.pyc: -------------------------------------------------------------------------------- 1 | 03f3 0d0a 4342 2f56 6300 0000 0000 0000 2 | 0001 0000 0040 0000 0073 0400 0000 6400 3 | 0053 2801 0000 004e 2800 0000 0028 0000 4 | 0000 2800 0000 0028 0000 0000 7333 0000 5 | 0043 3a5c 416e 6163 6f6e 6461 5c4c 6962 6 | 5c73 6974 652d 7061 636b 6167 6573 5c70 7 | 796b 696e 6563 7432 5c5f 5f69 6e69 745f 8 | 5f2e 7079 7408 0000 003c 6d6f 6475 6c65 9 | 3e01 0000 0073 0000 0000 10 | 11 | 0 -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.2/PyKinectRuntime.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Drivers/pykinect2_v1.0.2/PyKinectRuntime.pyc -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.2/PyKinectV2.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Drivers/pykinect2_v1.0.2/PyKinectV2.pyc -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Drivers/pykinect2_v1.0.2/__init__.py -------------------------------------------------------------------------------- /Drivers/pykinect2_v1.0.2/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Drivers/pykinect2_v1.0.2/__init__.pyc -------------------------------------------------------------------------------- /Images/Test.p: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | from scipy import ndimage 6 | from skimage.morphology import skeletonize,medial_axis 7 | import numpy as np 8 | import cv2 9 | from os import system as cmd 10 | import math 11 | import time 12 | 13 | while(1): 14 | im = cv2.imread("openhand.png") 15 | imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) 16 | ret,thresh = cv2.threshold(imgray,127,255,0) 17 | img1,contours1, hierarchy1 = cv2.findContours(thresh,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_NONE) 18 | 19 | drawing = np.zeros(thresh.shape,np.uint8) 20 | 21 | # cv2.drawContours(drawing, contours1, 0, 255, 1) 22 | # cv2.imshow("int",drawing) 23 | 24 | N = 5 25 | parts = np.array_split(contours1[0],N) 26 | for i in range(N-1): 27 | cnt = [parts[i]] 28 | print cnt 29 | cv2.drawContours(drawing, cnt, -1, 255, 1) 30 | cv2.imshow("int",drawing) 31 | # time.sleep(1) 32 | 33 | if cv2.waitKey(1) & 0xFF == ord('q'): 34 | break -------------------------------------------------------------------------------- /Images/hand.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Images/hand.bmp -------------------------------------------------------------------------------- /Images/hand.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Images/hand.jpg -------------------------------------------------------------------------------- /Images/hand1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Images/hand1.jpg -------------------------------------------------------------------------------- /Images/hand2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Images/hand2.png -------------------------------------------------------------------------------- /Images/openhand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Images/openhand.png -------------------------------------------------------------------------------- /Images/pointer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Images/pointer.png -------------------------------------------------------------------------------- /Images/right_hand_filtered.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Images/right_hand_filtered.png -------------------------------------------------------------------------------- /Papers/Hand Tracking Using Detection.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Papers/Hand Tracking Using Detection.pdf -------------------------------------------------------------------------------- /Papers/PSO_Kinect Hand Tracking.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Papers/PSO_Kinect Hand Tracking.pdf -------------------------------------------------------------------------------- /Papers/Shape based hand recog.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Papers/Shape based hand recog.pdf -------------------------------------------------------------------------------- /Papers/Vision based hand pose extimation Review.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Papers/Vision based hand pose extimation Review.pdf -------------------------------------------------------------------------------- /Papers/cvpr14_handtracking.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Papers/cvpr14_handtracking.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PyKubed 2 | This is our attempt at improving the Gesture recognition in the Microsoft Kinect. We have done pretty much everything from scratch. 3 | We have even modified the Drivers to enable fast mapping of color and depth plots. The default became slower with time due to RAM issues. 4 | 5 | ### Things To-Do # 6 | - Skeleton map[Done] 7 | - Hand recognition and segmentation[Done] 8 | ![Segmented Hand](https://github.com/abhirajD/PyKubed/blob/master/Images/hand1.jpg) 9 | 10 | 11 | ![Hand_Body Interface](https://github.com/abhirajD/PyKubed/blob/master/Codes/Abhi/hand_body.jpg) 12 | ![Filtered Hand](https://github.com/abhirajD/PyKubed/blob/master/Images/openhand.png) 13 | - Feature Extraction[Done] 14 | - Gesture classification[Done] 15 | - Augmented reality engine[Done] 16 | - Modularize everything(API) 17 | - Finish till gesture classification by 1-2-2016[Kinda Done] 18 | 19 | ### What is this repository for? ### 20 | 21 | * Quick summary 22 | Get more gestures to be supported by Kinect (Well, this is a generic approach, could be used with any Depth streamer). Get the gesture classifier to perform better. 23 | * Version: 2.0.0 24 | 25 | ### How do I get set up? ### 26 | 27 | * Dependencies: PyKinect2, Numpy, OpenCV 28 | * Configuration: 29 | Its simple: Clone the repository, cd to directory and python run the main_*.py in cmd/terminal. (Do SSPY :P) 30 | 31 | ### Contribution guidelines ### 32 | We appreciate contributions in following forms: 33 | 34 | - Writing tests 35 | - Code review 36 | 37 | If you want to become one of us, you are always welcomed. Contact us by any means comfortable. 38 | 39 | ### Who do I talk to? ### 40 | * Feel free to contact any of the Admins(A, A or A) on their corresponding email ids. -------------------------------------------------------------------------------- /Test Data/data_dump: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Test Data/data_dump -------------------------------------------------------------------------------- /Test/centroid.py: -------------------------------------------------------------------------------- 1 | 2 | import cv2 3 | import numpy as np 4 | 5 | 6 | img=cv2.imread('hand1.jpg',0) 7 | cv2.imshow('image',img) 8 | cv2.waitKey(0) 9 | 10 | ret2,thresh = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) 11 | kernel = np.ones((5,5),np.uint8) 12 | thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel) 13 | cv2.imshow('threshold',thresh) 14 | cv2.waitKey(0) 15 | 16 | image,contours,hierarchy = cv2.findContours(thresh, 1, 2) 17 | print len(contours) 18 | thresh = cv2.drawContours(thresh, contours, -1, (0,255,0), 10) 19 | cv2.imshow('centroid',thresh) 20 | cv2.waitKey(0) 21 | # #print type(contours) 22 | # if contours: 23 | 24 | # cnt = contours[1] 25 | # convex_Hull=cv2.convexHull(cnt) 26 | # convexity_Defects = cv2.convexityDefects(cnt,convex_Hull) 27 | # M = cv2.moments(cnt) 28 | # print M 29 | # if M['m00'] != 0: 30 | # print ':' 31 | 32 | # cx = int(M['m10']/M['m00']) 33 | # print cx 34 | # cy = int(M['m01']/M['m00']) 35 | # print cy 36 | # frame = cv2.circle(img,(cx,cy), 10,(255,0,0),5) 37 | # cv2.imshow('centroid',frame) 38 | # cv2.waitKey(0) -------------------------------------------------------------------------------- /Test/hand.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abhirajD/PyKubed/81a8fda9207e6dd0a2df98e074c53f8368d9af5f/Test/hand.bmp -------------------------------------------------------------------------------- /Test/test_blob.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | from matplotlib import pyplot as plt 5 | import math 6 | import scipy 7 | import numpy as np 8 | import cv2 9 | import pygame 10 | 11 | 12 | class HandGestureObjectClass(object): 13 | def __init__(self): 14 | 15 | # Kinect runtime object, we want only color and body frames 16 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body) 17 | 18 | 19 | # here we will store skeleton data 20 | self._bodies = None 21 | 22 | 23 | 24 | 25 | def run(self): 26 | print_frame=None 27 | 28 | print ':IN_RUN:Pulling Frames' 29 | 30 | 31 | 32 | # -------- Main Program Loop ----------- 33 | while (True): 34 | # --- Main event loop 35 | 36 | if self._kinect.has_new_depth_frame() or self._kinect.has_new_body_frame(): 37 | print ':IN_RUN:depth_frame received' 38 | 39 | depth_frame = self._kinect.get_last_depth_frame() 40 | print_frame = 32*depth_frame.reshape(424,512) 41 | 42 | 43 | self._bodies = self._kinect.get_last_body_frame() 44 | 45 | # --- draw skeletons to _frame_surface 46 | if self._bodies is not None: 47 | print ':IN_RUN:body received' 48 | for i in range(0, self._kinect.max_body_count): 49 | body = self._bodies.bodies[i] 50 | if not body.is_tracked: 51 | continue 52 | 53 | joints = body.joints 54 | # convert joint coordinates to color space 55 | joint_points = self._kinect.body_joints_to_depth_space(joints) 56 | print ':' 57 | rx=joint_points[PyKinectV2.JointType_HandRight].x 58 | ry=joint_points[PyKinectV2.JointType_HandRight].y 59 | lx=joint_points[PyKinectV2.JointType_HandLeft].x 60 | ly=joint_points[PyKinectV2.JointType_HandLeft].y 61 | rx=math.floor(rx) 62 | ry=math.floor(ry) 63 | lx=math.floor(lx) 64 | ly=math.floor(ly) 65 | print_frame=cv2.circle(print_frame,(int(rx),int(ry)), 10,(255,0,0),5) 66 | print_frame=cv2.circle(print_frame,(int(lx),int(ly)), 10,(255,0,0),5) 67 | ############# 68 | # Read image 69 | im = cv2.imread("hand.bmp", cv2.IMREAD_GRAYSCALE) 70 | #im=print_frame 71 | # Setup SimpleBlobDetector parameters. 72 | params = cv2.SimpleBlobDetector_Params() 73 | # Change thresholds 74 | params.minThreshold = 10 75 | params.maxThreshold = 200 76 | # Filter by Area. 77 | #params.filterByArea = True 78 | #params.minArea = 1500 79 | # Filter by Circularity 80 | #params.filterByCircularity = True 81 | #params.minCircularity = 0.1 82 | # Filter by Convexity 83 | #params.filterByConvexity = True 84 | #params.minConvexity = 0.87 85 | # Filter by Inertia 86 | #params.filterByInertia = True 87 | #params.minInertiaRatio = 0.01 88 | # Create a detector with the parameters 89 | ver = (cv2.__version__).split('.') 90 | if int(ver[0]) < 3 : 91 | detector = cv2.SimpleBlobDetector(params) 92 | else : 93 | detector = cv2.SimpleBlobDetector_create(params) 94 | # Detect blobs. 95 | keypoints = detector.detect(im) 96 | # Draw detected blobs as red circles. 97 | # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures 98 | # the size of the circle corresponds to the size of blob 99 | im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) 100 | # Show blobs 101 | cv2.imshow("Keypoints", im_with_keypoints) 102 | cv2.waitKey(0) 103 | 104 | 105 | if print_frame != None: 106 | 107 | cv2.imshow('Depthimage',print_frame) 108 | 109 | if cv2.waitKey(1) & 0xFF == ord('q'): 110 | break 111 | 112 | 113 | # --- Limit to 60 frames per second 114 | 115 | 116 | # Close our Kinect sensor, close the window and quit. 117 | self._kinect.close() 118 | 119 | 120 | 121 | HandGestureObject = HandGestureObjectClass(); 122 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Test/test_infrared.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | 5 | from matplotlib import pyplot as plt 6 | import SimpleCV.ImageClass as scv 7 | 8 | import scipy 9 | import numpy as np 10 | import cv2 11 | 12 | class Kinect_infrared(object): 13 | 14 | def __init__(self): 15 | 16 | # Kinect runtime object, we want only depth and body frames 17 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Infrared|PyKinectV2.FrameSourceTypes_Depth) 18 | 19 | def max_hist_depth(self, frame): 20 | #print 'FRAME_MAX = ' + str(frame.max()) 21 | binaries = int(frame.max()) 22 | if binaries <= 0: 23 | return 0 24 | histogram, bins = np.histogram(frame, bins = binaries) 25 | histogram = histogram.tolist(); bins = bins.tolist(); 26 | histogram[0 : 1] = [0, 0] 27 | max_hist = bins[histogram.index( max(histogram) )] 28 | return max_hist 29 | 30 | def run(self): 31 | 32 | 33 | print ':IN_RUN:Pulling Frames' 34 | 35 | while(True): 36 | #Main event loop 37 | if self._kinect.has_new_infrared_frame() or self._kinect.has_new_depth_frame: 38 | 39 | iframe = self._kinect.get_last_infrared_frame() 40 | iframe *= 1 41 | iframe = iframe.reshape(424,512) 42 | cv2.imshow('Infrared',iframe) 43 | 44 | dframe = self._kinect.get_last_depth_frame() 45 | dframe = dframe.reshape(424,512) 46 | dframe = np.array(dframe/16, dtype = np.uint8) 47 | gaussian_thresh = cv2.adaptiveThreshold(dframe, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV,11,2) 48 | blur = cv2.GaussianBlur(dframe,(5,5),0) 49 | ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 50 | # dframe = cv2.findContours(dframe, mode = cv2.RETR_EXTERNAL, method = cv2.CHAIN_APPROX_NONE) 51 | cv2.imshow('Gaussian_thresh', gaussian_thresh) 52 | cv2.imshow('Gaussian_blur', blur) 53 | cv2.imshow('Gaussian_otsu', th3) 54 | 55 | 56 | 57 | if cv2.waitKey(1) & 0xFF == ord('q'): 58 | break 59 | 60 | # Close our Kinect sensor, close the window and quit. 61 | self._kinect.close() 62 | 63 | 64 | HandGestureObject = Kinect_infrared(); 65 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Test/test_longExposureInfrared.py: -------------------------------------------------------------------------------- 1 | from pykinect2 import PyKinectV2 2 | from pykinect2.PyKinectV2 import * 3 | from pykinect2 import PyKinectRuntime 4 | 5 | from matplotlib import pyplot as plt 6 | 7 | import scipy 8 | import numpy as np 9 | import cv2 10 | 11 | class Kinect_LongExposureInfrared(object): 12 | 13 | def __init__(self): 14 | 15 | # Kinect runtime object, we want only depth and body frames 16 | self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_LongExposureInfrared) 17 | 18 | def run(self): 19 | print ':IN_RUN:Pulling Frames' 20 | 21 | while(True): 22 | #Main event loop 23 | if self._kinect.has_new_long_exposure_infrared_frame(): 24 | 25 | frame = self._kinect.get_last_long_exposure_infrared_frame() 26 | # frame = np.array(frame ,dtype = np.uint8) 27 | frame = frame.reshape(424,512) 28 | cv2.imshow('Long_Exposure_Infrared',frame) 29 | 30 | if cv2.waitKey(1) & 0xFF == ord('q'): 31 | break 32 | 33 | # Close our Kinect sensor, close the window and quit. 34 | self._kinect.close() 35 | 36 | 37 | HandGestureObject = Kinect_LongExposureInfrared(); 38 | HandGestureObject.run(); -------------------------------------------------------------------------------- /Test/testiter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | a = np.arange(25).reshape(5,5) 4 | d=0 5 | c=[2,3] 6 | print a 7 | it = np.nditer(a, flags=['multi_index'],op_flags=['readwrite']) 8 | while not it.finished: 9 | p=it.multi_index 10 | 11 | 12 | if (p[0]>c[0]+d or p[0]c[1]+d or p[1]" % (it[0], it.multi_index), 16 | it.iternext() 17 | print a --------------------------------------------------------------------------------