├── Hand gesture ├── gesture.py └── readme.txt ├── README.md └── blob_webcam ├── readme.txt └── skindetectorhk.py /Hand gesture/gesture.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import math 4 | cap = cv2.VideoCapture(0) 5 | while(cap.isOpened()): 6 | ret, img = cap.read() 7 | cv2.rectangle(img,(300,300),(100,100),(0,255,0),0) 8 | crop_img = img[100:300, 100:300] 9 | grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY) 10 | value = (35, 35) 11 | blurred = cv2.GaussianBlur(grey, value, 0) 12 | _, thresh1 = cv2.threshold(blurred, 127, 255, 13 | cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) 14 | cv2.imshow('Thresholded', thresh1) 15 | contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \ 16 | cv2.CHAIN_APPROX_NONE) 17 | max_area = -1 18 | for i in range(len(contours)): 19 | cnt=contours[i] 20 | area = cv2.contourArea(cnt) 21 | if(area>max_area): 22 | max_area=area 23 | ci=i 24 | cnt=contours[ci] 25 | x,y,w,h = cv2.boundingRect(cnt) 26 | cv2.rectangle(crop_img,(x,y),(x+w,y+h),(0,0,255),0) 27 | hull = cv2.convexHull(cnt) 28 | drawing = np.zeros(crop_img.shape,np.uint8) 29 | cv2.drawContours(drawing,[cnt],0,(0,255,0),0) 30 | cv2.drawContours(drawing,[hull],0,(0,0,255),0) 31 | hull = cv2.convexHull(cnt,returnPoints = False) 32 | defects = cv2.convexityDefects(cnt,hull) 33 | count_defects = 0 34 | cv2.drawContours(thresh1, contours, -1, (0,255,0), 3) 35 | for i in range(defects.shape[0]): 36 | s,e,f,d = defects[i,0] 37 | start = tuple(cnt[s][0]) 38 | end = tuple(cnt[e][0]) 39 | far = tuple(cnt[f][0]) 40 | a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2) 41 | b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2) 42 | c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2) 43 | angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57 44 | if angle <= 90: 45 | count_defects += 1 46 | cv2.circle(crop_img,far,1,[0,0,255],-1) 47 | #dist = cv2.pointPolygonTest(cnt,far,True) 48 | cv2.line(crop_img,start,end,[0,255,0],2) 49 | #cv2.circle(crop_img,far,5,[0,0,255],-1) 50 | if count_defects == 1: 51 | cv2.putText(img,"I'M VIKRANT SHARMA", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2) 52 | elif count_defects == 2: 53 | str = "THIS IS A BASIC HAND GESTURE RECOGNISER!!" 54 | cv2.putText(img, str, (5,50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2) 55 | elif count_defects == 3: 56 | cv2.putText(img,"This is FOUR (:", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2) 57 | elif count_defects == 4: 58 | cv2.putText(img,"HARE KRSNA", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2) 59 | else: 60 | cv2.putText(img,"GOOD AFTERNOON TEACHERS", (50,50),\ 61 | cv2.FONT_HERSHEY_SIMPLEX, 2, 2) 62 | #cv2.imshow('drawing', drawing) 63 | #cv2.imshow('end', crop_img) 64 | cv2.imshow('Gesture', img) 65 | all_img = np.hstack((drawing, crop_img)) 66 | cv2.imshow('Contours', all_img) 67 | k = cv2.waitKey(10) 68 | if k == 27: 69 | break 70 | -------------------------------------------------------------------------------- /Hand gesture/readme.txt: -------------------------------------------------------------------------------- 1 | # Hand-Gesture-Recognition-Python-OPENCV 2 | 3 | 4 | This python script can be used to analyze hand gestures by contour detection, and convex hull of hand palm using 5 | opencv library used for computer vision processes. 6 | 7 | 8 | 9 | Built using OpenCV 2.4.12 on Python 2.7.10 10 | 11 | Works on OpenCV 2.4.x and Python 2.x 12 | 13 | 14 | 15 | # Image 16 | ![IMAGE ALT TEXT](https://i.ytimg.com/vi/bh9_uOdz-bU/maxresdefault.jpg) 17 | 18 | 19 | Run On Server: 20 | 21 | python gesture.py 22 | 23 | 24 | 25 | 26 | # See Also 27 | 28 | Python Script to detect skin in given module [Blob-WEbcam] 29 | 30 | 31 | Run on Server: 32 | 33 | python skindetectorhk.py 34 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hand-Gesture-Recognition-Python-OPENCV 2 | 3 | This python script can be used to analyze hand gestures by contour detection, and convex hull of hand palm using opencv library used for computer vision processes. 4 | 5 | Built using OpenCV 2.4.12 on Python 2.7.10 6 | Works on OpenCV 2.4.x and Python 2.x 7 | 8 | # Image 9 | ![IMAGE ALT TEXT](https://i.ytimg.com/vi/bh9_uOdz-bU/maxresdefault.jpg) 10 | 11 | Run On Server: 12 | python gesture.py 13 | 14 | # See Also 15 | Python Script to detect skin in given module [Blob-WEbcam] 16 | 17 | Run on Server: 18 | python skindetectorhk.py 19 | -------------------------------------------------------------------------------- /blob_webcam/readme.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | Python Script to detect skin in given module [Blob-WEbcam] 4 | 5 | Run on Server: 6 | python skindetectorhk.py 7 | 8 | For more Info: 9 | http://www.pyimagesearch.com/2014/08/18/skin-detection-step-step-example-using-python-opencv/ 10 | -------------------------------------------------------------------------------- /blob_webcam/skindetectorhk.py: -------------------------------------------------------------------------------- 1 | # import the necessary packages 2 | #from pyimagesearch import imutils 3 | import numpy as np 4 | import argparse 5 | import cv2 6 | 7 | # construct the argument parse and parse the arguments 8 | ap = argparse.ArgumentParser() 9 | ap.add_argument("-v", "--video", 10 | help = "path to the (optional) video file") 11 | args = vars(ap.parse_args()) 12 | 13 | # define the upper and lower boundaries of the HSV pixel 14 | # intensities to be considered 'skin' 15 | lower = np.array([0, 48, 80], dtype = "uint8") 16 | upper = np.array([20, 255, 255], dtype = "uint8") 17 | # if a video path was not supplied, grab the reference 18 | # to the gray 19 | if not args.get("video", False): 20 | camera = cv2.VideoCapture(0) 21 | 22 | # otherwise, load the video 23 | else: 24 | camera = cv2.VideoCapture(args["video"]) 25 | # keep looping over the frames in the video 26 | while True: 27 | # grab the current frame 28 | (grabbed, frame) = camera.read() 29 | 30 | # if we are viewing a video and we did not grab a 31 | # frame, then we have reached the end of the video 32 | if args.get("video") and not grabbed: 33 | break 34 | 35 | # resize the frame, convert it to the HSV color space, # and determine the HSV pixel intensities that fall into 36 | # the speicifed upper and lower boundaries 37 | #frame = imutils.resize(frame, width = 400) 38 | converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 39 | skinMask = cv2.inRange(converted, lower, upper) 40 | 41 | # apply a series of erosions and dilations to the mask 42 | # using an elliptical kernel 43 | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)) 44 | skinMask = cv2.erode(skinMask, kernel, iterations = 2) 45 | skinMask = cv2.dilate(skinMask, kernel, iterations = 2) 46 | 47 | # blur the mask to help remove noise, then apply the 48 | # mask to the frame 49 | skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0) 50 | skin = cv2.bitwise_and(frame, frame, mask = skinMask) 51 | 52 | # show the skin in the image along with the mask 53 | cv2.imshow("images", np.hstack([frame, skin])) 54 | 55 | # if the 'q' key is pressed, stop the loop 56 | if cv2.waitKey(1) & 0xFF == ord("q"): 57 | break 58 | 59 | # cleanup the camera and close any open windows 60 | camera.release() 61 | cv2.destroyAllWindows() 62 | --------------------------------------------------------------------------------