├── README.md ├── PoseEstimationModule.py └── My AI Trainer.py /README.md: -------------------------------------------------------------------------------- 1 | This project utilizes advanced computer vision technology, enabling real-time analysis and personalized feedback for optimized workout experiences. Spearheaded the development of key features such as pose estimation,interactive visuals, exercise recognition and counting.Demo Link:https://www.linkedin.com/feed/update/urn:li:activity:7163440662342819840/ 2 | -------------------------------------------------------------------------------- /PoseEstimationModule.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import mediapipe as mp 3 | import time 4 | import math 5 | class poseDetector(): 6 | def __init__(self): 7 | self.mpPose = mp.solutions.pose 8 | self.pose = self.mpPose.Pose(static_image_mode = False, 9 | model_complexity = 0, 10 | smooth_landmarks = True, 11 | enable_segmentation = False, 12 | smooth_segmentation = True, 13 | min_detection_confidence= 0.5, 14 | min_tracking_confidence = 0.5) 15 | self.mpDraw = mp.solutions.drawing_utils 16 | 17 | def findPose(self,img,draw=True): 18 | imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 19 | self.results = self.pose.process(imgRGB) 20 | if self.results.pose_landmarks: 21 | if draw: 22 | self.mpDraw.draw_landmarks(img, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS) 23 | return img 24 | def getPosition(self,img,draw=True): 25 | self.lmList=[] 26 | if self.results.pose_landmarks: 27 | for id,lm in enumerate(self.results.pose_landmarks.landmark): 28 | h,w,c=img.shape 29 | cx,cy=int(lm.x*w),int(lm.y*h) 30 | self.lmList.append([id,cx,cy]) 31 | if draw: 32 | cv2.circle(img,(cx,cy),5,(0,255,0),cv2.FILLED) 33 | return self.lmList 34 | def findDistance(self, p1, p2, img=None, color=(255, 0, 255), scale=5): 35 | x1, y1 = p1 36 | x2, y2 = p2 37 | cx, cy = (x1 + x2) // 2, (y1 + y2) // 2 38 | length = math.hypot(x2 - x1, y2 - y1) 39 | info = (x1, y1, x2, y2, cx, cy) 40 | 41 | if img is not None: 42 | cv2.circle(img, (x1, y1), 10, color, cv2.FILLED) 43 | cv2.circle(img, (x2, y2), 10, color, cv2.FILLED) 44 | cv2.line(img, (x1, y1), (x2, y2), color, max(1, scale // 3)) 45 | cv2.circle(img, (cx, cy), 10, color, cv2.FILLED) 46 | 47 | return length, info, img 48 | 49 | def findAngle(self,img,p1,p2,p3,draw=True): 50 | x1,y1=self.lmList[p1][1:] 51 | x2,y2=self.lmList[p2][1:] 52 | x3,y3=self.lmList[p3][1:] 53 | 54 | # Calculate the angle 55 | angle=math.degrees(math.atan2(y3-y2,x3-x2)-math.atan2(y1-y2,x1-x2)) 56 | if angle<0: 57 | angle=angle+360 58 | if draw: 59 | cv2.line(img,(x1,y1),(x2,y2),(255,255,255),3) 60 | cv2.line(img,(x3,y3),(x2,y2),(255,255,255),3) 61 | cv2.circle(img,(x1,y1),5,(255,0,0),cv2.FILLED) 62 | cv2.circle(img, (x1, y1), 5, (255, 0, 0), 2) 63 | cv2.circle(img,(x2,y2),5,(255,0,0),cv2.FILLED) 64 | cv2.circle(img, (x2, y2), 5, (255, 0, 0), 2) 65 | cv2.circle(img,(x3,y3),5,(255,0,0),cv2.FILLED) 66 | cv2.circle(img, (x3, y3), 5, (255, 0, 0), 2) 67 | cv2.putText(img,str(int(angle)),(x2-20,y2+50),cv2.FONT_HERSHEY_PLAIN,2,(255,0,255),2) 68 | return angle 69 | def main(): 70 | cap = cv2.VideoCapture(0) 71 | pTime=0 72 | detector=poseDetector() 73 | while True: 74 | success, img = cap.read() 75 | img=cv2.resize(img,(900,600)) 76 | detector.findPose(img) 77 | lmList=detector.getPosition(img) 78 | print(lmList) 79 | cTime = time.time() 80 | fps = 1 / (cTime - pTime) 81 | pTime = cTime 82 | cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 3) 83 | cv2.imshow("Image", img) 84 | cv2.waitKey(1) 85 | if __name__=='__main__': 86 | main() -------------------------------------------------------------------------------- /My AI Trainer.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import PoseEstimationModule as pem 3 | import time 4 | import mediapipe as mp 5 | import os 6 | import numpy as np 7 | path="New Images" 8 | Images=os.listdir(path) 9 | print(Images) 10 | List=[] 11 | for i in Images: 12 | im=cv2.imread(path+'/'+i) 13 | List.append(im) 14 | cap=cv2.VideoCapture(0) 15 | bg_img=List[0] 16 | detector=pem.poseDetector() 17 | jump_cnt,pushups_cnt,twister_cnt,jj_cnt,sq_cnt,dumb_cnt=0,0,0,0,0,0 18 | delay_counter=0 19 | delay_counter_2=0 20 | bar,per=700,0 21 | while True: 22 | success,img=cap.read() 23 | img2=img.copy() 24 | img2=cv2.resize(img,(1000,700)) 25 | img2=cv2.flip(img2,1) 26 | img=cv2.resize(img,(1500,800)) 27 | img2 = detector.findPose(img2,draw=False) 28 | lmList = detector.getPosition(img2,draw=False) 29 | print(lmList) 30 | st = "" 31 | if lmList: 32 | angle = detector.findAngle(img2, 11, 13, 15,draw=False) 33 | left_hand, right_hand = lmList[18][1:], lmList[17][1:] 34 | left_leg, right_leg = lmList[28][1:], lmList[27][1:] 35 | left_hip, right_hip = lmList[24][1:], lmList[23][1:] 36 | left_leg_knee, right_leg_knee = lmList[26][1:], lmList[25][1:] 37 | left_hand_knee, right_hand_knee = lmList[14][1:], lmList[13][1:] 38 | crunch_dist1, _, _ = detector.findDistance(left_hand_knee, right_leg_knee) 39 | crunch_dist2, _, _ = detector.findDistance(right_hand_knee, left_leg_knee) 40 | cross_dist1, _, _ = detector.findDistance(left_hand, right_leg) 41 | cross_dist2, _, _ = detector.findDistance(right_hand, right_leg) 42 | hand_dist, _, _ = detector.findDistance(left_hand, right_hand) 43 | eye = lmList[1][1:] 44 | if delay_counter == 0: 45 | if eye[1] < 70: 46 | bg_img=List[2] 47 | per = 100 48 | bar = 200 49 | jump_cnt += 1 50 | elif left_hand[0] > right_hip[0] and left_hand[1] < right_hip[1]: 51 | dist=left_hand[1] < right_hip[1] 52 | per = 100 53 | bar = 200 54 | twister_cnt += 1 55 | bg_img = List[4] 56 | elif left_hand[0] < 300 and right_hand[0] > 650: 57 | per = 100 58 | bar = 200 59 | jj_cnt += 1 60 | bg_img = List[3] 61 | if delay_counter_2 == 0: 62 | per = np.interp(angle, (240, 320), (0, 100)) 63 | bar = np.interp(angle, (240, 320), (700, 200)) 64 | if eye[1] > 400: 65 | per = np.interp(angle, (200, 250), (0, 100)) 66 | bar = np.interp(angle, (200, 250), (700, 200)) 67 | if angle > 250: 68 | pushups_cnt += 1 69 | bg_img = List[5] 70 | elif eye[1] < 400 and abs(left_hip[1] - left_leg_knee[1]) <= 60: 71 | dist=-abs(left_hip[1] - left_leg_knee[1]) 72 | per = np.interp(dist, (-100, -60), (0, 100)) 73 | bar = np.interp(dist, (-100, -60), (700, 200)) 74 | sq_cnt += 1 75 | bg_img = List[6] 76 | elif angle >= 320: 77 | dumb_cnt += 1 78 | bg_img = List[1] 79 | delay_counter += 1 80 | delay_counter_2 += 1 81 | if delay_counter == 9: 82 | delay_counter = 0 83 | if delay_counter_2 == 15: 84 | delay_counter_2 = 0 85 | bg_img = cv2.resize(bg_img,(1500, 800)) 86 | img[:800,:1500]=bg_img 87 | img[100:800, 250:1250] = img2 88 | cv2.rectangle(img, (1100, 200), (1200, 700), (0, 255, 0), 4) 89 | cv2.rectangle(img, (1100, int(bar)), (1200, 700), (0, 255, 255), cv2.FILLED) 90 | cv2.putText(img,str(int(per))+"%",(1100,170),cv2.FONT_HERSHEY_PLAIN,3,(255,0,255),3) 91 | cv2.putText(img,str(jj_cnt),(30,730),cv2.FONT_HERSHEY_DUPLEX,3,(0,0,0),5) 92 | cv2.putText(img,str(jump_cnt),(30,480),cv2.FONT_HERSHEY_DUPLEX,3,(0,0,0),5) 93 | cv2.putText(img, str(dumb_cnt), (30, 260), cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 0), 5) 94 | cv2.putText(img, str(twister_cnt), (1360, 260), cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 0), 5) 95 | cv2.putText(img, str(pushups_cnt), (1360, 480), cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 0), 5) 96 | cv2.putText(img, str(sq_cnt), (1360, 730), cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 0), 5) 97 | cv2.imshow("My AI Trainer",img) 98 | cv2.waitKey(1) --------------------------------------------------------------------------------