├── requirements.txt
├── README.md
├── movement-v1.py
└── movement-v2.py
/requirements.txt:
--------------------------------------------------------------------------------
1 | opencv-python
2 | keyboard
3 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Realtime Face Movement Tracking 
2 | 90 Lines of code to convert your face movement into keyboard commands.
3 |
4 | # Description
5 | This is a basic face movement tracking that can convert face movement into keyboard commands like **UP - DOWN - LEFT - RIGHT**. I used facial landmarks to detect face and get the nose out of it for better referencing. I have created two versions of it, v1 is using a fixed reference boundary which not work expected properly because we need to come at the same position after each movement. To save this I created V2 which uses position change with respect to the previous position. This is more dynamic and easy to control the moves. No need to set position again and again.
6 |
7 | ## movement-v1.py
8 | In version1, I used a fixed reference boundary. If nose reference is out of boundary then I calculate the direction of movement. After getting direction I am converting it into keyboard commands using the keyboard library.
9 |
10 | ## movement-v2.py
11 | In version2, I am using reference change with respect to the previous position in a particular time window and then calculating the direction vector to get direction and converting it to keyboard command.
12 |
13 | ## Dependencies
14 | This is the list of dependencies for running this application. Use pip to install them.
15 | * **opencv**
16 | * **keyboard**
17 |
18 | ```
19 | $ pip install -r requirements.txt
20 | ```
21 |
22 | ## How to use
23 | 1. Download or clone this repository.
24 | 2. Extract to some location.
25 | 3. First, run **```movement-v1.py```** (for fix boundary) or run **```movement-v2.py```**(for dynamic movement)
26 | NOTE: If you are getting 215 assertion failed!! on line 81 check this (https://github.com/surya-veer/movement-tracking/issues/4#issuecomment-664018021)
27 |
28 | 4. open any online atari game like Subway surfers or temple run.
29 | 5. Start doing movements to play game. It will press up-down-left-right based on your movements.
30 |
31 | # Fun with face movements
32 | Open any online game on the browser which needs UP-DOWN-LEFT-RIGHT movements following games, you can find many games if you search on google.
33 | 1. Subway surfer https://www.kiloo.com/subway-surfers/
34 | 2. Temple run https://m.plonga.com/adventure/Temple-Run-2-Online-Tablet
35 |
36 | ### You can do a lot more things by the small code change.
37 |
38 | ### ** SUPPORT OPEN SOURCE **
39 |
--------------------------------------------------------------------------------
/movement-v1.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import keyboard
3 |
4 | color = {"blue":(255,0,0), "red":(0,0,255), "green":(0,255,0), "white":(255,255,255)}
5 |
6 | # Method to detect nose
7 | def detect_nose(img, faceCascade):
8 |
9 | # convert image to gray-scale
10 | gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
11 |
12 | # detecting features in gray-scale image, returns coordinates, width and height of features
13 | features = faceCascade.detectMultiScale(gray_img, 1.1, 8)
14 | nose_cords = []
15 | # drawing rectangle around the feature and labeling it
16 | for (x, y, w, h) in features:
17 | # cv2.rectangle(img, (x,y), (x+w, y+h), color['green'], 2) #uncomment if you want to see face boundary
18 | cv2.circle(img, ((2*x+w)//2,(2*y+h)//2), 10, color['green'], 2)
19 | nose_cords = ((2*x+w)//2,(2*y+h)//2)
20 | return img, nose_cords
21 |
22 | def draw_controller(img, cords):
23 | size = 40
24 | x1 = cords[0] - size
25 | y1 = cords[1] - size
26 | x2 = cords[0] + size
27 | y2 = cords[1] + size
28 | cv2.circle(img, cords, size, color['blue'], 2)
29 | return [(x1,y1), (x2,y2)]
30 |
31 | def keyboard_events(nose_cords, cords, cmd):
32 | try:
33 | [(x1,y1), (x2,y2)] = cords
34 | xc, yc = nose_cords
35 | except Exception as e:
36 | print(e)
37 | return
38 | if xc < x1:
39 | cmd = "left"
40 | elif(xc > x2):
41 | cmd = "right"
42 | elif(yc y2):
45 | cmd = "down"
46 | if cmd:
47 | print("Detected movement: ", cmd, "\n")
48 | keyboard.press_and_release(cmd)
49 | return img,cmd
50 | def reset_press_flag(nose_cords, cords,cmd):
51 | try:
52 | [(x1,y1), (x2,y2)] = cords
53 | xc, yc = nose_cords
54 | except:
55 | return True,cmd
56 | if x1 0 and len(prev) > 0):
35 | return last_time_update, cmd
36 | xc, yc = nose_cords
37 | tc = time.time()
38 | ox,oy, to = prev_cords
39 | diffx = xc - ox
40 | diffy = yc - oy
41 | thres_diff = 50
42 |
43 | thres_diff_t = 1
44 | if last_time_update + 0.4 > tc:
45 | return last_time_update, cmd
46 |
47 | if (abs(diffx)>thres_diff or abs(diffy) > thres_diff) and abs(tc-to) abs(diffy):
49 | if diffx > 0:
50 | cmd = "right"
51 | else:
52 | cmd = "left"
53 | else:
54 | if diffy > 0:
55 | cmd = "down"
56 | else:
57 | cmd = "up"
58 | print("Movement detected: ", cmd, "\n")
59 | keyboard.press_and_release(cmd)
60 | last_time_update = time.time()
61 | return last_time_update, cmd
62 |
63 | # Loading classifiers
64 | faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
65 |
66 | # Capturing real time video stream.
67 | video_capture = cv2.VideoCapture(-1)
68 |
69 | # get vcap property
70 | width = video_capture.get(3) # float
71 | height = video_capture.get(4) # float
72 | cmd = ""
73 | prev_cords = (0,0, time.time())
74 | last_time_update = time.time()
75 |
76 |
77 | while True:
78 | # Reading image from video stream
79 | _, img = video_capture.read()
80 | img = cv2.flip( img, 1 )
81 |
82 | # detect nose and draw
83 | img, nose_cords = detect_nose(img, faceCascade)
84 | cv2.putText(img, cmd, (10,50), cv2.FONT_HERSHEY_SIMPLEX, 1, color['red'], 1, cv2.LINE_AA)
85 |
86 | last_time_update, cmd = get_movement(nose_cords, prev_cords, last_time_update, cmd)
87 |
88 | x, y = (0,0) if len(nose_cords) == 0 else nose_cords
89 | prev_cords = (x,y,time.time())
90 |
91 | # Writing processed image in a new window
92 | cv2.imshow("face detection", img)
93 | if cv2.waitKey(1) & 0xFF == ord('q'):
94 | break
95 |
96 | # releasing web-cam
97 | video_capture.release()
98 | # Destroying output window
99 | cv2.destroyAllWindows()
--------------------------------------------------------------------------------