├── video1.avi ├── FrameNo0.png ├── Harsh Minor project Presentation Final.pdf ├── README.md └── main.py /video1.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harshagarwal10/abandoned-object-detection/HEAD/video1.avi -------------------------------------------------------------------------------- /FrameNo0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harshagarwal10/abandoned-object-detection/HEAD/FrameNo0.png -------------------------------------------------------------------------------- /Harsh Minor project Presentation Final.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harshagarwal10/abandoned-object-detection/HEAD/Harsh Minor project Presentation Final.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Abandoned-Object-Detection 2 | 3 | Work to be done : 4 | 1.We are processing the live feed of the CCTV camera with image processing. 5 | 2.If a person is releasing off some piece of luggage the camera will catch the activity. 6 | 3.This frames are been detected and been image processed by Edge detection . The processing is done by the OpenCV. 7 | 4.If the bag is untouched for a some period of time the analyser decides and further gives an alarm to the authority. 8 | 9 | Steps to be done: 10 | 1.The input video is divided into frames. 11 | 2.The first frame is converted to Grey from RGB. 12 | 3.The video is converted to Grey from RGB. 13 | 4.Frame difference of first frame and the video is taken. 14 | 5.Canny Edge Detection is applied. 15 | 6.A timer is started in case of a detection. 16 | 7.If the abandoned object is not moved for a specified time then it is displayed on the screen. 17 | 18 | 19 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import cv2 4 | from collections import Counter, defaultdict 5 | 6 | # location of first frame 7 | firstframe_path =r'/Users/Harsh/Desktop/Frames/FrameNo0.png' 8 | 9 | firstframe = cv2.imread(firstframe_path) 10 | firstframe_gray = cv2.cvtColor(firstframe, cv2.COLOR_BGR2GRAY) 11 | firstframe_blur = cv2.GaussianBlur(firstframe_gray,(21,21),0) 12 | 13 | #--------------------------------- 14 | #size the window first 15 | #--------------------------------- 16 | cv2.namedWindow('CannyEdgeDet',cv2.WINDOW_NORMAL) 17 | cv2.namedWindow('Abandoned Object Detection',cv2.WINDOW_NORMAL) 18 | cv2.namedWindow('Morph_CLOSE',cv2.WINDOW_NORMAL) 19 | 20 | # location of video 21 | file_path =r'/Users/Harsh/Desktop/video.avi' 22 | 23 | cap = cv2.VideoCapture(file_path) 24 | 25 | consecutiveframe=20 26 | 27 | track_temp=[] 28 | track_master=[] 29 | track_temp2=[] 30 | 31 | top_contour_dict = defaultdict(int) 32 | obj_detected_dict = defaultdict(int) 33 | 34 | frameno = 0 35 | while (cap.isOpened()): 36 | ret, frame = cap.read() 37 | cv2.imshow('main',frame) 38 | 39 | if ret==0: 40 | break 41 | 42 | frameno = frameno + 1 43 | #cv2.putText(frame,'%s%.f'%('Frameno:',frameno), (400,50),cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255),2) 44 | 45 | frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 46 | frame_blur = cv2.GaussianBlur(frame_gray,(21,21),0) 47 | 48 | frame_diff = cv2.absdiff(firstframe, frame) 49 | 50 | #Canny Edge Detection 51 | edged = cv2.Canny(frame_diff,10,200) #any gradient between 30 and 150 are considered edges 52 | cv2.imshow('CannyEdgeDet',edged) 53 | kernel2 = np.ones((5,5),np.uint8) #higher the kernel, eg (10,10), more will be eroded or dilated 54 | thresh2 = cv2.morphologyEx(edged,cv2.MORPH_CLOSE, kernel2,iterations=2) 55 | cv2.imshow('Morph_Close', thresh2) 56 | 57 | #Create a copy of the thresh to find contours 58 | (_,cnts, _) = cv2.findContours(thresh2.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) 59 | 60 | mycnts =[] # every new frame, set to empty list. 61 | # loop over the contours 62 | for c in cnts: 63 | 64 | 65 | # Calculate Centroid using cv2.moments 66 | M = cv2.moments(c) 67 | if M['m00'] == 0: 68 | pass 69 | else: 70 | cx = int(M['m10']/M['m00']) 71 | cy = int(M['m01']/M['m00']) 72 | 73 | 74 | #---------------------------------------------------------------- 75 | # Set contour criteria 76 | #---------------------------------------------------------------- 77 | 78 | if cv2.contourArea(c) < 200 or cv2.contourArea(c)>20000: 79 | pass 80 | else: 81 | mycnts.append(c) 82 | 83 | # compute the bounding box for the contour, draw it on the frame, 84 | # and update the text 85 | (x, y, w, h) = cv2.boundingRect(c) 86 | #cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) 87 | #cv2.putText(frame,'C %s,%s,%.0f'%(cx,cy,cx+cy), (cx,cy),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0),2) 88 | 89 | 90 | #Store the cx+cy, a single value into a list ; max length of 10000 91 | #Once hit 10000, tranfer top 20 points to dictionary ; empty list 92 | sumcxcy=cx+cy 93 | 94 | 95 | 96 | #track_list.append(cx+cy) 97 | track_temp.append([cx+cy,frameno]) 98 | 99 | 100 | track_master.append([cx+cy,frameno]) 101 | countuniqueframe = set(j for i, j in track_master) # get a set of unique frameno. then len(countuniqueframe) 102 | 103 | #---------------------------------------------------------------- 104 | # Store history of frames ; no. of frames stored set by 'consecutiveframe' ; 105 | # if no. of no. of unique frames > consecutiveframes, then 'pop or remove' the earliest frame ; defined by 106 | # minframeno. Objective is to count the same values occurs in all the frames under this list. if yes, 107 | # it is likely that it is a stationary object and not a passing object (walking) 108 | # And the value is stored separately in top_contour_dict , and counted each time. This dict is the master 109 | # dict to store the list of suspecious object. Ideally, it should be a short list. if there is a long list 110 | # there will be many false detection. To keep the list short, increase the 'consecutiveframe'. 111 | # Keep the number of frames to , remove the minframeno.; but hard to remove, rather form a new list without 112 | #the minframeno. 113 | #---------------------------------------------------------------- 114 | if len(countuniqueframe)>consecutiveframe or False: 115 | minframeno=min(j for i, j in track_master) 116 | for i, j in track_master: 117 | if j != minframeno: # get a new list. omit the those with the minframeno 118 | track_temp2.append([i,j]) 119 | 120 | track_master=list(track_temp2) # transfer to the master list 121 | track_temp2=[] 122 | 123 | 124 | #print 'After',track_master 125 | 126 | #count each of the sumcxcy 127 | #if the same sumcxcy occurs in all the frames, store in master contour dictionary, add 1 128 | 129 | countcxcy = Counter(i for i, j in track_master) 130 | #print countcxcy 131 | #example countcxcy : Counter({544: 1, 537: 1, 530: 1, 523: 1, 516: 1}) 132 | #if j which is the count occurs in all the frame, store the sumcxcy in dictionary, add 1 133 | for i,j in countcxcy.items(): 134 | if j>=consecutiveframe: 135 | top_contour_dict[i] += 1 136 | 137 | 138 | if sumcxcy in top_contour_dict: 139 | if top_contour_dict[sumcxcy]>100: 140 | cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) 141 | cv2.putText(frame,'%s'%('CheckObject'), (cx,cy),cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255),2) 142 | print ('Detected : ', sumcxcy,frameno, obj_detected_dict) 143 | 144 | # Store those objects that are detected, and store the last frame that it happened. 145 | # Need to find a way to clean the top_contour_dict, else contour will be detected after the 146 | # object is removed because the value is still in the dict. 147 | # Method is to record the last frame that the object is detected with the Current Frame (frameno) 148 | # if Current Frame - Last Frame detected > some big number say 100 x 3, then it means that 149 | # object may have been removed because it has not been detected for 100x3 frames. 150 | 151 | obj_detected_dict[sumcxcy]=frameno 152 | 153 | for i, j in obj_detected_dict.items(): 154 | if frameno - obj_detected_dict[i]>200: 155 | print ('PopBefore',i, obj_detected_dict[i],frameno,obj_detected_dict) 156 | print ('PopBefore : top_contour :',top_contour_dict) 157 | obj_detected_dict.pop(i) 158 | 159 | # Set the count for eg 448 to zero. because it has not be 'activated' for 200 frames. Likely, to have been removed. 160 | top_contour_dict[i]=0 161 | print ('PopAfter',i, obj_detected_dict[i],frameno,obj_detected_dict) 162 | print ('PopAfter : top_contour :',top_contour_dict) 163 | 164 | 165 | 166 | 167 | cv2.imshow('Abandoned Object Detection',frame) 168 | 169 | 170 | 171 | if cv2.waitKey(1) & 0xFF == ord('q'): 172 | break 173 | 174 | 175 | cap.release() 176 | cv2.destroyAllWindows() 177 | --------------------------------------------------------------------------------