├── .DS_Store ├── Offside_detection.py ├── README.md ├── images ├── dnt1.png ├── dnt2.png ├── dnt3.png ├── dnt4.png ├── point1.png ├── point2.png ├── point3.png ├── point4.png ├── pt1.png ├── pt2.png ├── pt3.png └── result.png ├── soccer_half_field.jpeg ├── track_utils.py └── vid8.mp4 /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/.DS_Store -------------------------------------------------------------------------------- /Offside_detection.py: -------------------------------------------------------------------------------- 1 | # USAGE 2 | # To detect offside in a pre-recorded video, type the following in terminal: 3 | # python Offside_detection.py -v 'name of video file' 4 | # 5 | # To detect offside from live camera feed: 6 | # python Offside_detection.py 7 | # 8 | # while running the program, press 'i' to input and 'q' to quit 9 | 10 | import cv2 11 | import numpy as np 12 | import track_utils 13 | from collections import deque 14 | import math 15 | 16 | frame = None 17 | orig_frame = None 18 | roi_hist_A, roi_hist_B = None, None 19 | roi = None 20 | 21 | team = None 22 | 23 | 24 | teamA = np.array([]) 25 | teamB = np.array([]) 26 | teamB_new = np.array([]) 27 | teamA_new = np.array([]) 28 | pts = [] 29 | 30 | minDist = 0 31 | prevTeam = None 32 | prevPasser = -1 33 | 34 | M = None 35 | op = None 36 | limits = None 37 | ball_center = None 38 | 39 | kernel = np.array([[0, 0, 1, 1, 0, 0], 40 | [0, 0, 1, 1, 0, 0], 41 | [1, 1, 1, 1, 1, 1], 42 | [1, 1, 1, 1, 1, 1], 43 | [0, 0, 1, 1, 0, 0], 44 | [0, 0, 1, 1, 0, 0]], dtype=np.uint8) 45 | 46 | prevgrad = 0 47 | passes = 0 48 | 49 | vel, prev_vel = 0, 0 50 | 51 | pts_ball = deque() 52 | 53 | 54 | def trackBall(): 55 | 56 | global grad, prevgrad, passes, ball_center, pts_ball, frame, vel, prev_vel, prevPasser, prevTeam, minDist 57 | pts_ball.appendleft(ball_center) 58 | 59 | if len(pts_ball) > 2: 60 | if len(pts_ball) > 20: 61 | 62 | for i in xrange(1, 20): 63 | cv2.line(frame, pts_ball[i - 1], pts_ball[i], (0, 0, 255), 2, cv2.LINE_AA) 64 | else: 65 | for i in xrange(1, len(pts_ball)): 66 | cv2.line(frame, pts_ball[i - 1], pts_ball[i], (0, 0, 255), 2, cv2.LINE_AA) 67 | 68 | 69 | l = len(pts_ball) 70 | if l >= 10: 71 | grad = np.arctan2((pts_ball[9][1] - pts_ball[0][1]), (pts_ball[9][0] - pts_ball[0][0])) 72 | grad = grad * (180.0 / np.pi) 73 | grad %= 360 74 | 75 | vel = math.sqrt((pts_ball[9][1] - pts_ball[0][1]) ** 2 + (pts_ball[9][0] - pts_ball[0][0]) ** 2) / 10 76 | if (math.fabs(grad - prevgrad) >= 20): 77 | # or math.fabs(vel-prev_vel) >= 7: 78 | # detectPlayers() 79 | # print("a " + str(len(teamA)) + " b " + str(len(teamB))) 80 | if len(teamA) != 0 and len(teamB) != 0: 81 | getCoordinates() 82 | detectPasser() 83 | 84 | # print(passerIndex) 85 | 86 | if ((prevTeam != team) or (passerIndex != prevPasser)) and minDist < 10000: 87 | # print(minDist) 88 | # print(str(team) + str(passerIndex)) 89 | if (team == 'A'): 90 | 91 | detectOffside() 92 | else: 93 | print('Not offside') 94 | 95 | passes += 1 96 | #print('Ball Passed ' + str(passes)) 97 | prevPasser = passerIndex 98 | prevTeam = team 99 | 100 | 101 | prevgrad = grad 102 | prev_vel = vel 103 | 104 | 105 | 106 | def detectPlayers(): 107 | global frame, roi_hist_A, roi_hist_B, teamA, teamB 108 | teamA = [] 109 | teamB = [] 110 | 111 | hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 112 | cnt_thresh = 180 113 | if roi_hist_A is not None: 114 | backProjA = cv2.calcBackProject([hsv], [0, 1], roi_hist_A, [0, 180, 0, 256], 1) 115 | maskA = track_utils.applyMorphTransforms2(backProjA) 116 | #cv2.imshow('mask a', maskA) 117 | 118 | cnts = cv2.findContours(maskA.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] 119 | 120 | if len(cnts) > 0: 121 | c = sorted(cnts, key=cv2.contourArea, reverse=True) 122 | for i in range(len(c)): 123 | if cv2.contourArea(c[i]) < cnt_thresh: 124 | break 125 | 126 | x, y, w, h = cv2.boundingRect(c[i]) 127 | h += 5 128 | y -= 5 129 | if h < 0.8 * w: 130 | continue 131 | elif h / float(w) > 3: 132 | continue 133 | 134 | cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2) 135 | M = cv2.moments(c[i]) 136 | center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) 137 | foot = (center[0], int(center[1] + h * 1.5)) 138 | teamA.append(foot) 139 | cv2.circle(frame, foot, 5, (0, 0, 255), -1) 140 | if roi_hist_B is not None: 141 | backProjB = cv2.calcBackProject([hsv], [0, 1], roi_hist_B, [0, 180, 0, 256], 1) 142 | maskB = track_utils.applyMorphTransforms2(backProjB) 143 | #cv2.imshow('mask b', maskB) 144 | 145 | cnts = cv2.findContours(maskB.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] 146 | 147 | if len(cnts) > 0: 148 | c = sorted(cnts, key=cv2.contourArea, reverse=True) 149 | for i in range(len(c)): 150 | if cv2.contourArea(c[i]) < cnt_thresh: 151 | break 152 | x, y, w, h = cv2.boundingRect(c[i]) 153 | h += 5 154 | y -= 5 155 | if h < 0.9 * w: 156 | continue 157 | elif h / float(w) > 3: 158 | continue 159 | 160 | cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) 161 | M = cv2.moments(c[i]) 162 | center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) 163 | 164 | foot = (center[0], int(center[1] + h * 1.2)) 165 | 166 | teamB.append(foot) 167 | cv2.circle(frame, foot, 5, (0, 0, 255), -1) 168 | 169 | 170 | def selectPoints(event, x, y, flag, param): 171 | global pts, frame, orig_frame 172 | 173 | if event == cv2.EVENT_LBUTTONUP: 174 | if len(pts) < 8: 175 | pts.append([x, y]) 176 | cv2.circle(frame, (x, y), 5, (0, 0, 255), -1) 177 | else: 178 | print('You have already selected 4 points') 179 | 180 | 181 | def getBoundaryPoints(): 182 | global frame, pts 183 | end_pts = [] 184 | cv2.namedWindow('input field') 185 | cv2.setMouseCallback('input field', selectPoints) 186 | while True: 187 | cv2.imshow('input field', frame) 188 | key = cv2.waitKey(1) & 0xFF 189 | if len(pts) >= 8: 190 | pts = np.array(pts, dtype=np.float32) 191 | pts[:, 1] *= (-1) 192 | for i in range(0, 5, 2): 193 | m1 = (pts[i + 1][1] - pts[i][1]) / (pts[i + 1][0] - pts[i][0]) 194 | m2 = (pts[i + 3][1] - pts[i + 2][1]) / (pts[i + 3][0] - pts[i + 2][0]) 195 | A = np.array([[m1, -1], [m2, -1]]) 196 | A_inv = np.linalg.inv(A) 197 | B = np.array([pts[i][1] - m1 * pts[i][0], pts[i + 2][1] - m2 * pts[i + 2][0]]) 198 | B *= (-1) 199 | p = np.dot(A_inv, B) 200 | end_pts.append(np.int16(p)) 201 | m1 = (pts[7][1] - pts[6][1]) / (pts[7][0] - pts[6][0]) 202 | m2 = (pts[1][1] - pts[0][1]) / (pts[1][0] - pts[0][0]) 203 | A = np.array([[m1, -1], [m2, -1]]) 204 | A_inv = np.linalg.inv(A) 205 | B = np.array([pts[6][1] - m1 * pts[6][0], pts[0][1] - m2 * pts[0][0]]) 206 | B *= (-1) 207 | p = np.dot(A_inv, B) 208 | end_pts.append(np.int16(p)) 209 | end_pts = np.array(end_pts) 210 | end_pts[:, 1] *= (-1) 211 | break 212 | elif key == ord("q"): 213 | break 214 | cv2.destroyWindow('input field') 215 | return end_pts 216 | 217 | 218 | def getCoordinates(): 219 | 220 | global M, teamA, teamB, op, teamB_new, teamA_new, ball_new, ball_center 221 | teamB_new = np.array([]) 222 | teamA_new = np.array([]) 223 | op = orig_op.copy() 224 | if ball_center is not None: 225 | new = np.dot(M, [ball_center[0], ball_center[1], 1]) 226 | ball_new = [new[0] / new[2], new[1] / new[2]] 227 | op = cv2.circle(op, (int(ball_new[0]), int(ball_new[1])), 3, (255, 0, 0), -1) 228 | 229 | if len(teamB) > 0: 230 | for i in range(len(teamB)): 231 | new_pt = np.dot(M, [teamB[i][0], teamB[i][1], 1]) 232 | teamB_new = np.append(teamB_new, [new_pt[0] / new_pt[2], new_pt[1] / new_pt[2]]) 233 | 234 | teamB_new = np.int16(teamB_new).reshape(-1, 2) 235 | for i in range(len(teamB)): 236 | op = cv2.circle(op, (teamB_new[i][0], teamB_new[i][1]), 5, (0, 255, 0), -1) 237 | 238 | if len(teamA) > 0: 239 | for i in range(len(teamA)): 240 | new_pt = np.dot(M, [teamA[i][0], teamA[i][1], 1]) 241 | teamA_new = np.append(teamA_new, [new_pt[0] / new_pt[2], new_pt[1] / new_pt[2]]) 242 | 243 | teamA_new = np.int16(teamA_new).reshape(-1, 2) 244 | for i in range(len(teamA)): 245 | op = cv2.circle(op, (teamA_new[i][0], teamA_new[i][1]), 5, (0, 0, 255), -1) 246 | 247 | 248 | def drawOffsideLine(): 249 | global M, teamB_new, op, frame 250 | if len(teamB_new) > 0: 251 | M_inv = np.linalg.inv(M) 252 | last_def = np.argmin(teamB_new[:,0]) 253 | p1 = np.dot(M_inv, [teamB_new[last_def][0], 0, 1]) 254 | p2 = np.dot(M_inv, [teamB_new[last_def][0], op.shape[0] - 1, 1]) 255 | 256 | pts = [(int(p1[0] / p1[2]), int(p1[1] / p1[2])), (int(p2[0] / p2[2]), int(p2[1] / p2[2]))] 257 | frame = cv2.line(frame, pts[0], pts[1], (255, 0, 0), 2) 258 | 259 | 260 | def closest_node(node, nodes): 261 | nodes = np.asarray(nodes) 262 | node = np.array([node[0], node[1]]) 263 | # print(nodes) 264 | # print(node) 265 | dist_2 = np.sum((nodes - node) ** 2, axis=1) 266 | 267 | return np.argmin(dist_2) 268 | 269 | 270 | def detectPasser(): 271 | global ball_new, teamA, teamB, passerIndex, team, minDist 272 | teamA_min_ind = closest_node(ball_new, teamA_new) 273 | teamB_min_ind = closest_node(ball_new, teamB_new) 274 | # print(np.asarray(teamA[teamA_min_ind])) 275 | # print(np.asarray(ball_center)) 276 | teamA_min = np.sum(([np.asarray(teamA_new[teamA_min_ind])] - np.asarray(ball_new)) ** 2, axis=1) 277 | teamB_min = np.sum(([np.asarray(teamB_new[teamB_min_ind])] - np.asarray(ball_new)) ** 2, axis=1) 278 | minDist = min(teamB_min, teamA_min) 279 | if (teamA_min < teamB_min): 280 | # print("Ball passed by TeamA player") 281 | 282 | passerIndex = teamA_min_ind 283 | # print(passerIndex) 284 | team = 'A' 285 | else: 286 | # print("Ball passed by TeamB player") 287 | passerIndex = teamB_min_ind 288 | team = 'B' 289 | 290 | 291 | def detectOffside(): 292 | global teamA_new, teamB_new, passerIndex 293 | if len(teamB_new) > 0: 294 | if len(teamA_new) > 0: 295 | # teamA_new.sort() 296 | teamB_new.sort() 297 | # print(teamA_new) 298 | if (teamB_new[0][0] > teamA_new[passerIndex][0]): 299 | # if (teamB[0][0] > teamA[passerIndex][0]): 300 | # print(passerIndex) 301 | # Assuming no goalie 302 | print('Offside') 303 | else: 304 | print('Not Offside') 305 | else: 306 | print('Not Offside') 307 | else: 308 | print('Not Offside') 309 | 310 | 311 | 312 | if __name__ == '__main__': 313 | args = track_utils.getArguements() 314 | 315 | if not args.get("video", False): 316 | camera = cv2.VideoCapture(0) 317 | else: 318 | camera = cv2.VideoCapture(args["video"]) 319 | 320 | orig_op = cv2.imread('soccer_half_field.jpeg') 321 | op = orig_op.copy() 322 | fgbg = cv2.createBackgroundSubtractorMOG2(history=20, detectShadows=False) 323 | flag = False 324 | 325 | while True: 326 | (grabbed, frame) = camera.read() 327 | 328 | if args.get("video") and not grabbed: 329 | break 330 | 331 | frame = track_utils.resize(frame, width=400) 332 | 333 | orig_frame = frame.copy() 334 | 335 | frame2 = track_utils.removeBG(orig_frame.copy(), fgbg) 336 | 337 | detectPlayers() 338 | 339 | if roi is not None: 340 | ball_center, cnt = track_utils.detectBallThresh(frame2, limits) 341 | if cnt is not None: 342 | (x, y), radius = cv2.minEnclosingCircle(cnt) 343 | cv2.circle(frame, (int(x), int(y)), int(radius), (255, 255, 0), 2) 344 | cv2.circle(frame, ball_center, 2, (0, 0, 255), -1) 345 | trackBall() 346 | 347 | if M is not None: 348 | src = np.int32(src) 349 | 350 | for i in range(4): 351 | frame = cv2.circle(frame.copy(), (src[i][0], src[i][1]), 3, (255, 0, 255), -1) 352 | 353 | cv2.polylines(frame, np.int32([src]), True, (255, 0, 0), 2, cv2.LINE_AA) 354 | 355 | getCoordinates() 356 | 357 | drawOffsideLine() 358 | 359 | cv2.imshow('camera view', frame) 360 | cv2.imshow('top view', op) 361 | 362 | if flag: 363 | t = 1 364 | else: 365 | t = 100 366 | 367 | key = cv2.waitKey(t) & 0xFF 368 | 369 | if key == ord("q"): 370 | break 371 | elif key == ord('i') and (roi_hist_A is None or roi_hist_B is None): 372 | flag = True 373 | roi_hist_A, roi_hist_B = track_utils.getHist(frame) 374 | 375 | roi = track_utils.getROIvid(orig_frame, 'input ball') 376 | if roi is not None: 377 | limits = track_utils.getLimits(roi) 378 | 379 | src = getBoundaryPoints() 380 | src = np.float32(src) 381 | dst = np.float32([[0, 0], [0, op.shape[0]], [op.shape[1], op.shape[0]], [op.shape[1], 0]]) 382 | M = cv2.getPerspectiveTransform(src, dst) 383 | 384 | camera.release() 385 | cv2.destroyAllWindows() 386 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Offside Detection System for Football 2 | 3 | ### Team Tech Titans 4 | #### Team Members:- 5 | 6 |
Parthasarathi Khirwadkar		 	16D070001
  7 | Sriram Yenamandra				16D070017
  8 | Rahul Chanduka					160070003
  9 | Ishank Juneja					16D070012
10 | ### Abstract 11 | 12 | Here, we present our attempt towards the creation of a mechanism by which the problem of incorrect offside decisions made by referees in football can be addressed. The offside detection problem is simplified by classifying the attacking and defending teams based on the half in which the forward ball is played. Further the assumption that team members wear the same coloured jerseys works towards simplifying the problem. 13 | The implementation involves two separate modules that track the ball and the players respectively. The successful integration of the modules leads to the desired goal of offside detection. 14 | The model works well with numerous sample situations of a single defender against two attacking players. 15 | 16 | ### Motivation 17 | Being avid football followers we commonly notice unfair decisions being called due to an error of judgment on the part of the referees which is a limitation which cannot be overcome in the present system. The off-side rule is by-far the rule that is the most abused. Ideating and creating a system which can handle the rule in question in a fair manner will go a long way in the game of football. 18 | 19 | 20 | 21 | ### Usage of Program: 22 | * To detect offside in a pre-recorded video, go to the folder in which code and video are stored and type the following in terminal: 23 | 24 | ```javascript 25 | $ python Offside_detection.py -v 'path/name of video file' 26 | ``` 27 | * To detect offside from live camera feed: 28 | ```javascript 29 | $ python Offside_detection.py 30 | ``` 31 | * While running the program, press ***'i'*** to input and ***'q'*** to quit 32 | 33 | * Input patch of jersey of any player of team A by clicking and dragging with the mouse and making sure that background does not get included. The size of patch doesn’t matter much though it is better to have a bigger patch. 34 | 35 | ![Image](/images/point1.png) 36 | 37 | * Input patch of jersey of any player of team B in similar manner. 38 | 39 | ![Image](/images/point2.png) 40 | 41 | * Input patch of ball. 42 | 43 | ![Image](/images/point3.png) 44 | 45 | * Input two points along each side of the field in the exact order as shown 46 | i.e. Top edge, Left edge, Bottom edge then Right edge. 47 | 48 | ![Image](/images/point4.png) 49 | 50 | ### Approach to problem: 51 | * The problem was broken down into a **ball tracking module** and a **player tracking module** then combining the two to detect Offside. 52 | * The **ball tracking module** would take care of detecting the ball, tracking it and detecting whether a ball pass has occurred. 53 | * The **player tracking module** would detect the players of each team, attacking and defending, and get an approximate location of the foot of the players. 54 | * Finally the two would be integrated into one program. The ball pass is detected only when it is passed from one player to different player. 55 | * If the player of the attacking team receiving the pass(when he receives the pass) is behind the last player of the defending team then offside is called. 56 | * The offside region is shown by a line passing through the position of the last defender. 57 | * Note that offside is NOT called if the attacking player is behind the offside line but doesn’t receive the ball. 58 | 59 | ### Coding Technicality 60 | We used Python 2.7 and OpenCV 3.0 and did the coding on PyCharm IDE. The installation procedure was made easy by using Anaconda Python. It is recommended to create virtual environment using Anaconda and install OpenCV on it rather than using the in-built Python(in case of MacOS and Linux). 61 | We chose python as it is easy to code in and it was fast enough for our application. 62 | ### Initial Learning: 63 | We initially started learning OpenCV and Python by writing basic codes and studying examples given in OpenCV library and on internet. We found the following links to be very helpful along with the documentation and stackoverflow : 64 | * [http://www.pyimagesearch.com/](http://www.pyimagesearch.com/) 65 | * [http://docs.opencv.org/trunk/d6/d00/tutorial_py_root.html](http://docs.opencv.org/trunk/d6/d00/tutorial_py_root.html) 66 | * [https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_tutorials.html](https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_tutorials.html) 67 | 68 | ### Ball Tracking: 69 | * We tried multiple techniques for ball detection namely colour thresholding, histogram backprojection, Hough circle transform and dlib library’s correlation tracker. 70 | * Out of these, colour thresholding seemed to work the best. Its simple and efficient (Occam’s Razor :P ). Although it will give noise if colour of ball matches the background and thus we used it along with background subtraction. 71 | * Histogram backprojection is another good technique and using a 2D histogram of hue and value gives us some robustness in terms of lighting condition. 72 | * Hough circle transform gave too much noise. 73 | * Dlib library’s correlation tracker is great but it failed at tracking small and fast moving object like football. It also slows down immensely if number of objects to be tracked exceeds 3. 74 | #### Detection and Tracking of Ball on the basis of colour using Thresholding 75 | * Get a sample patch of the ball as input 76 | * Convert from the default RGB space to the HSV colour space and get the range of Hue, Saturation and Value 77 | * Apply background subtraction to get a mask. 78 | * Apply the mask to the frame. 79 | 80 | ![Image](/images/dnt1.png) ![Image](/images/dnt2.png) 81 | 82 | 83 | * Threshold the frames of the video to get contour of the ball which would be the contour with largest area. 84 | 85 | ![Image](/images/dnt3.png) 86 | * Generate a minimum enclosing circle and get center of the contour to detect the ball. 87 | * For tracking, do this for every frame and store points of previous 10-20 frames 88 | * The Pass is detected by change in trajectory which in turn is detected by a large enough change in direction of vector joining the position of center 10 frames before to the current position. This is done so as to eliminate false detection due to noise in the position of center of ball. 89 | 90 | ![Image](/images/dnt4.png) 91 | ### Player Tracking: 92 | * We first thought of using dlib library’s correlation tracker but we realised that it became very slow if we tracked more than 3 players. Hence it could not be used for real time application. 93 | * We even tried HOG descriptor with SVM which is a general method of detecting humans using machine learning and comes pre-trained in /supOpenCV but it gave very disappointing results and was very slow. 94 | * In this case we used Histogram Backprojection to detect players from the colour of their jersey. We take a patch of jersey as input and calculate its histogram with axes hue and saturation and normalize it. 95 | 96 | ![Image](/images/pt1.png) 97 | * Then we used the calcBackProject function of OpenCV to generate a grayscale image where the magnitude of pixel value is proportional to its similarity to the patch given as input. 98 | 99 | ![Image](/images/pt2.png) 100 | * We then threshold this image and apply erosion and dilation to remove noise and we obtain contours of the jersey of the players. This is separated from the noise on the basis of contour area and the fact that height of minimum enclosing square is greater than the width. 101 | 102 | ![Image](/images/pt3.png) 103 | * The location of the player’s feet is approximated to be 1.5 times the height of the contour below the center of the contour 104 | ### Generating the top view 105 | * The user first inputs the endpoints of the field. In our case, the endpoints were not visible, hence we take 2 points along each sideline and solve the equation of line to get the 4 intersection points. 106 | * Using the getPerspectiveTransform function of OpenCV we obtain the homography matrix which maps source points to destination points 107 | * The top view location of the feet of players and the ball is obtained by following formula: x' = Hx 108 | * Where His the homography matrix (3x3 matrix given by getPerspectiveTransform function), x=[x1 y1 z1] where (x1,y1) is the location of the foot in camera view and x'=[x'1 y'1 z'1]T 109 | * The top view co ordinate are (x'1/z'1 , y'1/z'1 ) 110 | * The offside line is drawn by inverse mapping the endpoints of the line in the top view 111 | 112 | ### Offside Detection 113 | * At every pass detected, we check its validity by checking whether it was passed between different players. 114 | * The list index of the previous passer is stored in and checked whether the pass was received by a different player 115 | * The player in possession of the ball is identified measuring distance between ball’s center and location of feet of players. This can give false positive if the ball is bouncing and the nearest player detected as passer. 116 | * To overcome this, a change in trajectory was identified as pass and offside checked only if the distance was less than some maximum (100 in our case) 117 | * Players of the defending team are sorted based on x-coordinate of their location in top view and offside line is drawn. 118 | * If the ball is passed between players of defending team(in this case team B) then offside is not detected 119 | * If the ball is passed between players of attacking team (team A) then we check whether the x-coordinate of the receiving player is less than the last player of team B. If that’s True then Offside is called. 120 | 121 | ### Result: 122 | ![Image](/images/result.png) 123 | ### Future Prospects of Improvement 124 | * The player’s and ball’s location can be detected with much greater accuracy by combining data from multiple cameras. 125 | * In our application, there is still the issue of occlusion i.e. when one player blocks the view of another player. This can be resolved by using multiple viewpoints. 126 | * Using multiple cameras to cover entire field. 127 | * Using better algorithms to detect players and ball as which does not depend on colour of jersey. 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | -------------------------------------------------------------------------------- /images/dnt1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/dnt1.png -------------------------------------------------------------------------------- /images/dnt2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/dnt2.png -------------------------------------------------------------------------------- /images/dnt3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/dnt3.png -------------------------------------------------------------------------------- /images/dnt4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/dnt4.png -------------------------------------------------------------------------------- /images/point1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/point1.png -------------------------------------------------------------------------------- /images/point2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/point2.png -------------------------------------------------------------------------------- /images/point3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/point3.png -------------------------------------------------------------------------------- /images/point4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/point4.png -------------------------------------------------------------------------------- /images/pt1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/pt1.png -------------------------------------------------------------------------------- /images/pt2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/pt2.png -------------------------------------------------------------------------------- /images/pt3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/pt3.png -------------------------------------------------------------------------------- /images/result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/images/result.png -------------------------------------------------------------------------------- /soccer_half_field.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/soccer_half_field.jpeg -------------------------------------------------------------------------------- /track_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import argparse 3 | import cv2 4 | import math 5 | from collections import deque 6 | 7 | img = None 8 | orig = None 9 | roi = None 10 | roi2, roi2_init = None,None 11 | 12 | kernel = np.array([[0, 0, 1, 1, 0, 0], 13 | [0, 1, 1, 1, 1, 0], 14 | [1, 1, 1, 1, 1, 1], 15 | [1, 1, 1, 1, 1, 1], 16 | [0, 1, 1, 1, 1, 0], 17 | [0, 0, 1, 1, 0, 0]],dtype=np.uint8) 18 | 19 | ix,iy = 0,0 20 | draw = False 21 | rad_thresh = 15 22 | 23 | def getArguements(): 24 | ap = argparse.ArgumentParser() 25 | ap.add_argument("-v", "--video", 26 | help="path to the (optional) video file") 27 | args = vars(ap.parse_args()) 28 | return args 29 | 30 | def resize(img,width=400.0): 31 | r = float(width) / img.shape[0] 32 | dim = (int(img.shape[1] * r), int(width)) 33 | img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA) 34 | return img 35 | 36 | def selectROI(event, x, y, flag, param): 37 | global img, ix, iy, draw, orig, roi 38 | if event == cv2.EVENT_LBUTTONDOWN: 39 | ix = x 40 | iy = y 41 | draw = True 42 | 43 | elif event == cv2.EVENT_MOUSEMOVE: 44 | if draw: 45 | img = cv2.rectangle(orig.copy(), (ix, iy), (x, y), (255, 0, 0), 2) 46 | 47 | elif event == cv2.EVENT_LBUTTONUP: 48 | if draw: 49 | x1 = max(x, ix) 50 | y1 = max(y, iy) 51 | ix = min(x, ix) 52 | iy = min(y, iy) 53 | roi = orig[iy:y1, ix:x1] 54 | draw = False 55 | 56 | def getROIvid(frame, winName = 'input'): 57 | global img, orig, roi 58 | roi = None 59 | img = frame.copy() 60 | orig = frame.copy() 61 | cv2.namedWindow(winName) 62 | cv2.setMouseCallback(winName, selectROI) 63 | while True: 64 | cv2.imshow(winName, img) 65 | if roi is not None: 66 | cv2.destroyWindow(winName) 67 | return roi 68 | 69 | k = cv2.waitKey(1) & 0xFF 70 | if k == ord('q'): 71 | cv2.destroyWindow(winName) 72 | break 73 | 74 | return roi 75 | 76 | def getROIext(image,winName = 'input'): 77 | global img, orig, roi2, roi2_init 78 | img = image.copy() 79 | orig = image.copy() 80 | cv2.namedWindow(winName) 81 | cv2.setMouseCallback(winName, selectROI) 82 | while True: 83 | cv2.imshow(winName, img) 84 | if roi is not None: 85 | cv2.destroyWindow(winName) 86 | return roi 87 | 88 | k = cv2.waitKey(1) & 0xFF 89 | if k == ord('q'): 90 | cv2.destroyWindow(winName) 91 | break 92 | 93 | return roi 94 | 95 | 96 | def getLimits(roi): 97 | limits = None 98 | roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) 99 | h, s, v = cv2.split(roi) 100 | limits = [(int(np.amax(h)), int(np.amax(s)), 255), (int(np.amin(h)), int(np.amin(s)), int(np.amin(v)))] 101 | return limits 102 | 103 | def applyMorphTransforms(mask): 104 | global kernel 105 | lower = 100 106 | upper = 255 107 | 108 | #mask = cv2.inRange(mask, lower, upper) 109 | mask = cv2.GaussianBlur(mask, (11, 11), 5) 110 | mask = cv2.inRange(mask, lower, upper) 111 | mask = cv2.dilate(mask, kernel) 112 | mask = cv2.erode(mask, np.ones((5, 5))) 113 | 114 | return mask 115 | 116 | def applyMorphTransforms2(backProj): 117 | global kernel 118 | lower = 50 119 | upper = 255 120 | mask = cv2.inRange(backProj, lower, upper) 121 | mask = cv2.dilate(mask, kernel) 122 | mask = cv2.erode(mask, np.ones((3, 3))) 123 | mask = cv2.GaussianBlur(mask, (11, 11), 5) 124 | mask = cv2.inRange(mask, lower, upper) 125 | return mask 126 | 127 | 128 | 129 | def detectBallThresh(frame,limits): 130 | global rad_thresh 131 | upper = limits[0] 132 | lower = limits[1] 133 | center = None 134 | 135 | hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 136 | 137 | mask = cv2.inRange(hsv, lower, upper) 138 | mask = applyMorphTransforms(mask) 139 | cv2.imshow('mask', mask) 140 | 141 | cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, 142 | cv2.CHAIN_APPROX_SIMPLE)[-2] 143 | cnts = sorted(cnts, key=cv2.contourArea, reverse=True) 144 | flag = False 145 | i=0 146 | if len(cnts) > 0: 147 | for i in range(len(cnts)): 148 | (_, radius) = cv2.minEnclosingCircle(cnts[i]) 149 | if radius < rad_thresh and radius > 5: 150 | flag = True 151 | break 152 | if not flag: 153 | return None, None 154 | 155 | M = cv2.moments(cnts[i]) 156 | center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) 157 | return center, cnts[i] 158 | else: 159 | return None, None 160 | 161 | def detectBallHB(frame, roi): 162 | global rad_thresh 163 | roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) # convert to HSV colour space 164 | 165 | roiHist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256]) 166 | cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX) 167 | 168 | hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 169 | backProj = cv2.calcBackProject([hsv], [0, 1], roiHist, [0, 180, 0, 256], 1) 170 | mask = cv2.inRange(backProj, 50, 255) 171 | mask = cv2.erode(mask, np.ones((5, 5))) 172 | mask = cv2.dilate(mask, np.ones((5, 5))) 173 | 174 | cv2.imshow('mask',mask) 175 | 176 | cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, 177 | cv2.CHAIN_APPROX_SIMPLE)[-2] 178 | cnts = sorted(cnts, key=cv2.contourArea, reverse=True) 179 | i = 0 180 | if len(cnts) > 0: 181 | for i in range(len(cnts)): 182 | (_, radius) = cv2.minEnclosingCircle(cnts[i]) 183 | if radius < rad_thresh and radius>5: 184 | break 185 | M = cv2.moments(cnts[i]) 186 | center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) 187 | return center, cnts[i] 188 | else: 189 | return None, None 190 | 191 | def kalmanFilter(meas): 192 | pred = np.array([],dtype=np.int) 193 | #mp = np.asarray(meas,np.float32).reshape(-1,2,1) # measurement 194 | tp = np.zeros((2, 1), np.float32) # tracked / prediction 195 | 196 | kalman = cv2.KalmanFilter(4, 2) 197 | kalman.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32) 198 | kalman.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) 199 | kalman.processNoiseCov = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.03 200 | kalman.measurementNoiseCov = np.array([[1, 0], [0, 1]], np.float32) * 0.00003 201 | for mp in meas: 202 | mp = np.asarray(mp,dtype=np.float32).reshape(2,1) 203 | kalman.correct(mp) 204 | tp = kalman.predict() 205 | np.append(pred,[int(tp[0]),int(tp[1])]) 206 | 207 | return pred 208 | 209 | def removeBG(frame, fgbg): 210 | bg_mask = fgbg.apply(frame) 211 | bg_mask = cv2.dilate(bg_mask, np.ones((5, 5))) 212 | frame = cv2.bitwise_and(frame, frame, mask=bg_mask) 213 | return frame 214 | 215 | def getHist(frame): 216 | roi_hist_A, roi_hist_B = None, None 217 | 218 | if roi_hist_A is None: 219 | roi = getROIvid(frame,'input team A') 220 | roi = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV) 221 | roi_hist_A = cv2.calcHist([roi],[0,1],None,[180,256],[0,180,0,256]) 222 | roi_hist_A = cv2.normalize(roi_hist_A, roi_hist_A, 0, 255, cv2.NORM_MINMAX) 223 | 224 | if roi_hist_B is None: 225 | roi = getROIvid(frame, 'input team B') 226 | roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) 227 | roi_hist_B = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256]) 228 | roi_hist_B = cv2.normalize(roi_hist_B, roi_hist_B, 0, 255, cv2.NORM_MINMAX) 229 | 230 | return roi_hist_A, roi_hist_B -------------------------------------------------------------------------------- /vid8.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kparth98/ITSP-Project/b7df6f429024a3f6d4b689b60388c38127c933d3/vid8.mp4 --------------------------------------------------------------------------------