├── hand_landmarks.png ├── __pycache__ ├── directkeys.cpython-311.pyc └── directkeys.cpython-38.pyc ├── requirements.txt ├── README.md ├── directkeys.py ├── main.py └── temple_run.py /hand_landmarks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adityapratapsingh28/Game_Automation/HEAD/hand_landmarks.png -------------------------------------------------------------------------------- /__pycache__/directkeys.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adityapratapsingh28/Game_Automation/HEAD/__pycache__/directkeys.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/directkeys.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adityapratapsingh28/Game_Automation/HEAD/__pycache__/directkeys.cpython-38.pyc -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pandas 2 | numpy 3 | mediapipe 4 | opencv-python 5 | pyautogui 6 | matplotlib 7 | flask 8 | flask-cors 9 | numpy 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Game_Automation 2 | 3 | 4 | ### Cloning 5 | 1.Run the following command to clone the repository: 6 | ``` 7 | git clone https://github.com/Adityapratapsingh28/Game_Automation 8 | ``` 9 | 2.Get into the directory using 10 | ``` 11 | cd "Game_Automation" 12 | ``` 13 | 14 | ### Install the required dependencies: 15 | ``` 16 | pip install -r requirements.txt 17 | ``` 18 | ### Run final file: 19 | ``` 20 | python main.py 21 | ``` 22 | 23 | ## Landmarks 24 | ![image](https://github.com/user-attachments/assets/f0e1ada5-a44c-4ce6-9b15-4ef83e5c96f0) 25 | 26 | ## Demo 27 | 28 | ![image](https://github.com/user-attachments/assets/2b1f0894-4a1c-4980-9654-d27a8807635d) 29 | 30 | ![image](https://github.com/user-attachments/assets/8471ebae-2e27-4c8c-9d4b-cf0fd6dbc382) 31 | 32 | 33 | -------------------------------------------------------------------------------- /directkeys.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import time 3 | 4 | SendInput = ctypes.windll.user32.SendInput 5 | 6 | 7 | right_pressed=0x4D 8 | 9 | left_pressed=0x4B 10 | 11 | # C struct redefinitions 12 | PUL = ctypes.POINTER(ctypes.c_ulong) 13 | class KeyBdInput(ctypes.Structure): 14 | _fields_ = [("wVk", ctypes.c_ushort), 15 | ("wScan", ctypes.c_ushort), 16 | ("dwFlags", ctypes.c_ulong), 17 | ("time", ctypes.c_ulong), 18 | ("dwExtraInfo", PUL)] 19 | 20 | class HardwareInput(ctypes.Structure): 21 | _fields_ = [("uMsg", ctypes.c_ulong), 22 | ("wParamL", ctypes.c_short), 23 | ("wParamH", ctypes.c_ushort)] 24 | 25 | class MouseInput(ctypes.Structure): 26 | _fields_ = [("dx", ctypes.c_long), 27 | ("dy", ctypes.c_long), 28 | ("mouseData", ctypes.c_ulong), 29 | ("dwFlags", ctypes.c_ulong), 30 | ("time",ctypes.c_ulong), 31 | ("dwExtraInfo", PUL)] 32 | 33 | class Input_I(ctypes.Union): 34 | _fields_ = [("ki", KeyBdInput), 35 | ("mi", MouseInput), 36 | ("hi", HardwareInput)] 37 | 38 | class Input(ctypes.Structure): 39 | _fields_ = [("type", ctypes.c_ulong), 40 | ("ii", Input_I)] 41 | 42 | def PressKey(hexKeyCode): 43 | extra = ctypes.c_ulong(0) 44 | ii_ = Input_I() 45 | ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) ) 46 | x = Input( ctypes.c_ulong(1), ii_ ) 47 | ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x)) 48 | 49 | def ReleaseKey(hexKeyCode): 50 | extra = ctypes.c_ulong(0) 51 | ii_ = Input_I() 52 | ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) ) 53 | x = Input( ctypes.c_ulong(1), ii_ ) 54 | ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x)) 55 | 56 | if __name__=='__main__': 57 | while (True): 58 | PressKey(0x11) 59 | time.sleep(1) 60 | ReleaseKey(0x11) 61 | time.sleep(1) -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import mediapipe as mp 3 | import time 4 | from directkeys import right_pressed,left_pressed 5 | from directkeys import PressKey, ReleaseKey 6 | 7 | 8 | break_key_pressed=left_pressed 9 | accelerato_key_pressed=right_pressed 10 | 11 | time.sleep(2.0) 12 | current_key_pressed = set() 13 | 14 | mp_draw=mp.solutions.drawing_utils 15 | mp_hand=mp.solutions.hands 16 | 17 | 18 | tipIds=[4,8,12,16,20] 19 | 20 | video=cv2.VideoCapture(0) 21 | 22 | with mp_hand.Hands(min_detection_confidence=0.5, 23 | min_tracking_confidence=0.5) as hands: 24 | while True: 25 | keyPressed = False 26 | break_pressed=False 27 | accelerator_pressed=False 28 | key_count=0 29 | key_pressed=0 30 | ret,image=video.read() 31 | image=cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 32 | image.flags.writeable=False 33 | results=hands.process(image) 34 | image.flags.writeable=True 35 | image=cv2.cvtColor(image, cv2.COLOR_RGB2BGR) 36 | lmList=[] 37 | if results.multi_hand_landmarks: 38 | for hand_landmark in results.multi_hand_landmarks: 39 | myHands=results.multi_hand_landmarks[0] 40 | for id, lm in enumerate(myHands.landmark): 41 | h,w,c=image.shape 42 | cx,cy= int(lm.x*w), int(lm.y*h) 43 | lmList.append([id,cx,cy]) 44 | mp_draw.draw_landmarks(image, hand_landmark, mp_hand.HAND_CONNECTIONS) 45 | fingers=[] 46 | if len(lmList)!=0: 47 | if lmList[tipIds[0]][1] > lmList[tipIds[0]-1][1]: 48 | fingers.append(1) 49 | else: 50 | fingers.append(0) 51 | for id in range(1,5): 52 | if lmList[tipIds[id]][2] < lmList[tipIds[id]-2][2]: 53 | fingers.append(1) 54 | else: 55 | fingers.append(0) 56 | total=fingers.count(1) 57 | if total==0: 58 | cv2.rectangle(image, (20, 300), (270, 425), (0, 255, 0), cv2.FILLED) 59 | cv2.putText(image, "BRAKE", (45, 375), cv2.FONT_HERSHEY_SIMPLEX, 60 | 2, (255, 0, 0), 5) 61 | PressKey(break_key_pressed) 62 | break_pressed=True 63 | current_key_pressed.add(break_key_pressed) 64 | key_pressed=break_key_pressed 65 | keyPressed = True 66 | key_count=key_count+1 67 | elif total==5: 68 | cv2.rectangle(image, (20, 300), (270, 425), (0, 255, 0), cv2.FILLED) 69 | cv2.putText(image, " GAS", (45, 375), cv2.FONT_HERSHEY_SIMPLEX, 70 | 2, (255, 0, 0), 5) 71 | PressKey(accelerato_key_pressed) 72 | key_pressed=accelerato_key_pressed 73 | accelerator_pressed=True 74 | keyPressed = True 75 | current_key_pressed.add(accelerato_key_pressed) 76 | key_count=key_count+1 77 | if not keyPressed and len(current_key_pressed) != 0: 78 | for key in current_key_pressed: 79 | ReleaseKey(key) 80 | current_key_pressed = set() 81 | elif key_count==1 and len(current_key_pressed)==2: 82 | for key in current_key_pressed: 83 | if key_pressed!=key: 84 | ReleaseKey(key) 85 | current_key_pressed = set() 86 | for key in current_key_pressed: 87 | ReleaseKey(key) 88 | current_key_pressed = set() 89 | 90 | 91 | # if lmList[8][2] < lmList[6][2]: 92 | # print("Open") 93 | # else: 94 | # print("Close") 95 | cv2.imshow("Frame",image) 96 | k=cv2.waitKey(1) 97 | if k==ord('q'): 98 | break 99 | video.release() 100 | cv2.destroyAllWindows() 101 | 102 | -------------------------------------------------------------------------------- /temple_run.py: -------------------------------------------------------------------------------- 1 | import pyautogui 2 | import math 3 | import cv2 4 | import numpy as np 5 | import time 6 | import mediapipe as mp 7 | 8 | 9 | mp_pose = mp.solutions.mediapipe.solutions.pose 10 | pose = mp_pose.Pose(static_image_mode=True, min_detection_confidence=0.3, model_complexity=2) 11 | mp_drawing = mp.solutions.mediapipe.solutions.drawing_utils 12 | 13 | def detectPose(image, pose, blankImage=False): 14 | 15 | output_image = image.copy() 16 | 17 | if blankImage: 18 | blank_image = np.zeros((720,1920,3), np.uint8) 19 | output_image = blank_image 20 | 21 | imageRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 22 | 23 | 24 | results = pose.process(imageRGB) 25 | 26 | height, width, _ = image.shape 27 | 28 | landmarks = [] 29 | 30 | if results.pose_landmarks: 31 | mp_drawing.draw_landmarks(image=output_image, landmark_list=results.pose_landmarks, connections=mp_pose.POSE_CONNECTIONS) 32 | 33 | for landmark in results.pose_landmarks.landmark: 34 | 35 | landmarks.append((int(landmark.x * width), int(landmark.y * height), 36 | (landmark.z * width))) 37 | return output_image, landmarks, results 38 | 39 | 40 | def calculateAngle(landmark1, landmark2, landmark3): 41 | 42 | x1, y1, _ = landmark1 43 | x2, y2, _ = landmark2 44 | x3, y3, _ = landmark3 45 | angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 - y2, x1 - x2)) 46 | if angle < 0: 47 | angle += 360 48 | return angle 49 | 50 | 51 | def classifyPose(landmarks, output_image): 52 | 53 | label = 'Unknown Pose' 54 | 55 | color = (0, 0, 255) 56 | 57 | left_elbow_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value], 58 | landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value], 59 | landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value]) 60 | 61 | right_elbow_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value], 62 | landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value], 63 | landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value]) 64 | 65 | left_shoulder_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value], 66 | landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value], 67 | landmarks[mp_pose.PoseLandmark.LEFT_HIP.value]) 68 | 69 | right_shoulder_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value], 70 | landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value], 71 | landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value]) 72 | 73 | left_knee_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_HIP.value], 74 | landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value], 75 | landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value]) 76 | 77 | right_knee_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value], 78 | landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value], 79 | landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value]) 80 | 81 | if left_elbow_angle > 165 and left_elbow_angle < 195 and right_elbow_angle > 165 and right_elbow_angle < 195: 82 | 83 | if left_shoulder_angle > 80 and left_shoulder_angle < 110 and right_shoulder_angle > 80 and right_shoulder_angle < 110: 84 | 85 | 86 | if left_knee_angle > 165 and left_knee_angle < 195 or right_knee_angle > 165 and right_knee_angle < 195: 87 | 88 | if left_knee_angle > 90 and left_knee_angle < 120 or right_knee_angle > 90 and right_knee_angle < 120: 89 | 90 | label = 'Warrior II Pose' 91 | 92 | if left_knee_angle > 160 and left_knee_angle < 195 and right_knee_angle > 160 and right_knee_angle < 195: 93 | 94 | label = 'T Pose' 95 | 96 | if left_knee_angle > 165 and left_knee_angle < 195 or right_knee_angle > 165 and right_knee_angle < 195: 97 | 98 | if left_knee_angle > 315 and left_knee_angle < 335 or right_knee_angle > 25 and right_knee_angle < 45: 99 | 100 | label = 'Tree Pose' 101 | 102 | if label != 'Unknown Pose': 103 | 104 | color = (0, 255, 0) 105 | 106 | cv2.putText(output_image, label, (10, 30),cv2.FONT_HERSHEY_PLAIN, 2, color, 2) 107 | 108 | return output_image, label 109 | 110 | 111 | 112 | def checkHandsJoined(img,results, draw=False): 113 | height, width, _ = img.shape 114 | 115 | output_img = img.copy() 116 | 117 | left_wrist_landmark = (results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_WRIST].x * width,results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_WRIST].y * height) 118 | right_wrist_landmark = (results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_WRIST].x * width,results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_WRIST].y * height) 119 | 120 | distance = int(math.hypot(left_wrist_landmark[0] - right_wrist_landmark[0],left_wrist_landmark[1] - right_wrist_landmark[1])) 121 | 122 | if distance < 130: 123 | hand_status = 'Hands Joined' 124 | color = (0, 255, 0) 125 | 126 | else: 127 | hand_status = 'Hands Not Joined' 128 | color = (0, 0, 255) 129 | 130 | if draw: 131 | cv2.putText(output_img, hand_status, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, color, 3) 132 | cv2.putText(output_img, f'Distance: {distance}', (10, 70), cv2.FONT_HERSHEY_PLAIN, 2, color, 3) 133 | 134 | return output_img, hand_status 135 | 136 | def checkLeftRight(img, results, draw=False): 137 | 138 | 139 | horizontal_position = None 140 | 141 | height, width, c = img.shape 142 | 143 | output_image = img.copy() 144 | 145 | left_x = int(results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER].x * width) 146 | right_x = int(results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER].x * width) 147 | 148 | if (right_x <= width//2 and left_x <= width//2): 149 | horizontal_position = 'Left' 150 | 151 | elif (right_x >= width//2 and left_x >= width//2): 152 | horizontal_position = 'Right' 153 | 154 | elif (right_x >= width//2 and left_x <= width//2): 155 | horizontal_position = 'Center' 156 | 157 | if draw: 158 | 159 | cv2.putText(output_image, horizontal_position, (5, height - 10), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 3) 160 | cv2.line(output_image, (width//2, 0), (width//2, height), (255, 255, 255), 2) 161 | 162 | 163 | return output_image, horizontal_position 164 | 165 | 166 | def checkJumpCrouch(img, results, MID_Y=250, draw=False): 167 | 168 | height, width, _ = img.shape 169 | 170 | output_image = img.copy() 171 | 172 | left_y = int(results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER].y * height) 173 | right_y = int(results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER].y * height) 174 | 175 | actual_mid_y = abs(right_y + left_y) // 2 176 | 177 | lower_bound = MID_Y-15 178 | upper_bound = MID_Y+100 179 | 180 | if (actual_mid_y < lower_bound): 181 | posture = 'Jumping' 182 | 183 | elif (actual_mid_y > upper_bound): 184 | posture = 'Crouching' 185 | 186 | else: 187 | posture = 'Standing' 188 | 189 | if draw: 190 | cv2.putText(output_image, posture, (5, height - 50), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 3) 191 | cv2.line(output_image, (0, MID_Y),(width, MID_Y),(255, 255, 255), 2) 192 | 193 | return output_image, posture 194 | 195 | 196 | if __name__ == '__main__': 197 | 198 | pose_video = mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.5, model_complexity=1) 199 | 200 | 201 | cap = cv2.VideoCapture(0) 202 | cap.set(3,640) 203 | cap.set(4,360) 204 | pTime = 0 205 | 206 | 207 | game_started = False 208 | x_pos_index = 1 209 | y_pos_index = 1 210 | MID_Y = None 211 | counter = 0 212 | num_of_frames = 10 213 | 214 | 215 | 216 | 217 | 218 | while True: 219 | success, img = cap.read() 220 | img = cv2.flip(img, 1) 221 | h, w, _ = img.shape 222 | # img = cv2.resize(img, (1280, 720)) 223 | img, landmarks ,results = detectPose(img, pose_video) 224 | if landmarks: 225 | if game_started: 226 | img, horizontal_position = checkLeftRight(img, results, draw=True) 227 | if (horizontal_position=='Left' and x_pos_index!=0) or (horizontal_position=='Center' and x_pos_index==2): 228 | 229 | pyautogui.press('left') 230 | 231 | x_pos_index -= 1 232 | 233 | elif (horizontal_position=='Right' and x_pos_index!=2) or (horizontal_position=='Center' and x_pos_index==0): 234 | 235 | pyautogui.press('right') 236 | 237 | x_pos_index += 1 238 | 239 | else: 240 | 241 | cv2.putText(img, 'JOIN BOTH HANDS TO START THE GAME.', (5, h - 10), cv2.FONT_HERSHEY_PLAIN, 242 | 2, (0, 255, 0), 3) 243 | 244 | if checkHandsJoined(img, results)[1] == 'Hands Joined': 245 | 246 | counter += 1 247 | 248 | if counter == num_of_frames: 249 | 250 | if not(game_started): 251 | 252 | game_started = True 253 | left_y = int(results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER].y * h) 254 | right_y = int(results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER].y * h) 255 | MID_Y = abs(right_y + left_y) // 2 256 | pyautogui.click(x=1300, y=800, button='left') 257 | else: 258 | pyautogui.press('space') 259 | 260 | 261 | counter = 0 262 | 263 | else: 264 | 265 | counter = 0 266 | 267 | if MID_Y: 268 | 269 | img, posture = checkJumpCrouch(img, results, MID_Y, draw=True) 270 | 271 | if posture == 'Jumping' and y_pos_index == 1: 272 | 273 | pyautogui.press('up') 274 | y_pos_index += 1 275 | 276 | elif posture == 'Crouching' and y_pos_index == 1: 277 | 278 | 279 | pyautogui.press('down') 280 | 281 | y_pos_index -= 1 282 | 283 | elif posture == 'Standing' and y_pos_index != 1: 284 | 285 | y_pos_index = 1 286 | print(posture) 287 | 288 | 289 | else: 290 | 291 | counter = 0 292 | 293 | cTime = time.time() 294 | fps = 1/(cTime-pTime) 295 | pTime = cTime 296 | 297 | cv2.putText(img,str(int(fps)),(70,50),cv2.FONT_HERSHEY_PLAIN,3,(255,0,0),3) 298 | 299 | 300 | cv2.imshow('Game', img) 301 | k = cv2.waitKey(1) & 0xFF 302 | if(k == 27) or (k==113): 303 | break 304 | cap.release() 305 | cv2.destroyAllWindows() --------------------------------------------------------------------------------