├── snapshotsound.ogg ├── README.md ├── motionDetectionCV3.py ├── soundDetection.py ├── MotionDetector.py └── MotionDetectorContours.py /snapshotsound.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/richard512/python-security-camera/HEAD/snapshotsound.ogg -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Python Security Camera 2 | ======================= 3 | 4 | Python/OpenCV script that detect motion on webcam and allow record it to a file. 5 | 6 | ## The simple way ## 7 | 8 | [![https://www.youtube.com/watch?v=-RUu3EcielI](https://img.youtube.com/vi/-RUu3EcielI/0.jpg)](https://www.youtube.com/watch?v=-RUu3EcielI) 9 | 10 | Compare video frames, checking how many pixels changed. 11 | 12 | ## The smart way ## 13 | 14 | [![https://www.youtube.com/watch?v=sRIdyfh3054](https://img.youtube.com/vi/sRIdyfh3054/0.jpg)](https://www.youtube.com/watch?v=sRIdyfh3054) 15 | 16 | Detect moving objects and calculate their surface area. 17 | 18 | ## More info ## 19 | 20 | * usually takes about 5 seconds to start up 21 | * initRecorder: you can switch the output video format here 22 | * somethingHasMoved: pixel comparison 23 | * processImage: the fun stuff! 24 | -------------------------------------------------------------------------------- /motionDetectionCV3.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | """ 3 | Webcam motion detection 4 | """ 5 | import cv2 6 | import numpy as np 7 | import pygame 8 | 9 | THRESHOLD = 40 10 | camera = cv2.VideoCapture(0) 11 | 12 | es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,4)) 13 | kernel = np.ones((5,5), np.uint8) 14 | background = None 15 | 16 | # Write test video 17 | fps = 20 #camera.get(cv2.CAP_PROP_FPS) 18 | pygame.mixer.init() 19 | cameraSound = pygame.mixer.Sound("snapshotsound.ogg") 20 | size = (int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)), 21 | int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))) 22 | videoWriter = cv2.VideoWriter('basic_motion_detection.avi', 23 | cv2.VideoWriter_fourcc('D', 'I', 'V', 'X'), 24 | fps, size) 25 | 26 | while (True): 27 | ret, frame = camera.read() 28 | # The first frame as the background 29 | if background is None: 30 | background = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 31 | background = cv2.GaussianBlur(background, (21,21), 0) 32 | continue 33 | 34 | gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 35 | gray_frame = cv2.GaussianBlur(gray_frame, (21,21), 0) 36 | 37 | # Compare the difference between each frame of image and the background 38 | #print(background.shape, gray_frame.shape) 39 | diff = cv2.absdiff(background, gray_frame) 40 | diff = cv2.threshold(diff, THRESHOLD, 255, cv2.THRESH_BINARY)[1] 41 | diff = cv2.dilate(diff, es, iterations=2) 42 | # Calculate the outline of the target in the image 43 | image, cnts, hierarchy = cv2.findContours(diff.copy(), 44 | cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 45 | print ("Detecting " + str(len(cnts)) + " Moving Objects") 46 | if len(cnts) > 0: 47 | cameraSound.play() 48 | 49 | for c in cnts: 50 | if cv2.contourArea(c) < 1500: 51 | continue 52 | # Calculate the bounding box 53 | (x, y, w, h) = cv2.boundingRect(c) 54 | cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2) 55 | 56 | cv2.imshow("contours", frame) 57 | videoWriter.write(frame) 58 | cv2.imshow("dif", diff) 59 | # cv2.imwrite('didff.jpg', diff) 60 | if cv2.waitKey(int(1000/12)) &0xff == ord('q'): 61 | break 62 | cv2.destroyAllWindows() 63 | camera.release() 64 | -------------------------------------------------------------------------------- /soundDetection.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import pyaudio 3 | from numpy import zeros,linspace,short,fromstring,hstack,transpose,log 4 | from scipy import fft 5 | import time 6 | import ctypes 7 | 8 | #Volume Sensitivity, 0.05: Extremely Sensitive, may give false alarms 9 | # 0.1: Probably Ideal volume 10 | # 1: Poorly sensitive, will only go off for relatively loud 11 | SENSITIVITY= 1.0 12 | # Alarm frequencies (Hz) to detect (Use audacity to record a wave and then do Analyze->Plot Spectrum) 13 | TONE = 1593 14 | #Bandwidth for detection (i.e., detect frequencies within this margin of error of the TONE) 15 | BANDWIDTH = 30 16 | #How many 46ms blips before we declare a beep? (Take the beep length in ms, divide by 46ms, subtract a bit) 17 | beeplength=2 18 | # How many beeps before we declare an alarm? 19 | alarmlength=2 20 | # How many false 46ms blips before we declare the alarm is not ringing 21 | resetlength=10 22 | # How many reset counts until we clear an active alarm? 23 | clearlength=30 24 | # Enable blip, beep, and reset debug output 25 | debug=False 26 | # Show the most intense frequency detected (useful for configuration) 27 | frequencyoutput=False 28 | 29 | 30 | #Set up audio sampler - 31 | NUM_SAMPLES = 2048 32 | SAMPLING_RATE = 44100 33 | pa = pyaudio.PyAudio() 34 | _stream = pa.open(format=pyaudio.paInt16, 35 | channels=1, rate=SAMPLING_RATE, 36 | input=True, 37 | frames_per_buffer=NUM_SAMPLES) 38 | 39 | def addToLog(appendText): 40 | print (appendText) 41 | with open("log.txt", "a") as myfile: 42 | myfile.write("\n"+appendText) 43 | 44 | #ctypes.windll.user32.LockWorkStation() 45 | addToLog ("Listening for "+str(alarmlength)+" beeps of "+str(beeplength * 46)+"ms at "+str(TONE)+"Hz") 46 | 47 | blipcount=0 48 | beepcount=0 49 | resetcount=0 50 | clearcount=0 51 | alarm=False 52 | 53 | while True: 54 | while _stream.get_read_available()< NUM_SAMPLES: time.sleep(0.01) 55 | audio_data = fromstring(_stream.read( 56 | _stream.get_read_available()), dtype=short)[-NUM_SAMPLES:] 57 | # Each data point is a signed 16 bit number, so we can normalize by dividing 32*1024 58 | normalized_data = audio_data / 32768.0 59 | intensity = abs(fft(normalized_data))[:int(NUM_SAMPLES/2)] 60 | frequencies = linspace(0.0, float(SAMPLING_RATE)/2, num=NUM_SAMPLES/2) 61 | if frequencyoutput: 62 | which = intensity[1:].argmax()+1 63 | # use quadratic interpolation around the max 64 | if which != len(intensity)-1: 65 | y0,y1,y2 = log(intensity[which-1:which+2:]) 66 | x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0) 67 | # find the frequency and output it 68 | thefreq = (which+x1)*SAMPLING_RATE/NUM_SAMPLES 69 | else: 70 | thefreq = which*SAMPLING_RATE/NUM_SAMPLES 71 | if debug: print ("\t\t\t\tfreq=",thefreq) 72 | if max(intensity[(frequencies < TONE+BANDWIDTH) & (frequencies > TONE-BANDWIDTH )]) > max(intensity[(frequencies < TONE-1000) & (frequencies > TONE-2000)]) + SENSITIVITY: 73 | blipcount+=1 74 | resetcount=0 75 | if debug: print ("\t\tBlip",blipcount) 76 | if (blipcount>=beeplength): 77 | blipcount=0 78 | resetcount=0 79 | beepcount+=1 80 | if debug: print ("\tBeep",beepcount) 81 | if (beepcount>=alarmlength): 82 | if not alarm: 83 | datetime = time.strftime('%Y-%m-%d %H:%M:%S') 84 | humantime = time.strftime('%I:%M:%S %p %Z') 85 | addToLog ("Alarm triggered at "+datetime+" ("+humantime+")") 86 | clearcount=0 87 | alarm=True 88 | if debug: print ("Alarm!") 89 | beepcount=0 90 | else: 91 | blipcount=0 92 | resetcount+=1 93 | if debug: print ("\t\t\treset",resetcount) 94 | if (resetcount>=resetlength): 95 | resetcount=0 96 | beepcount=0 97 | if alarm: 98 | clearcount+=1 99 | if debug: print ("\t\tclear",clearcount) 100 | if clearcount>=clearlength: 101 | clearcount=0 102 | addToLog ("Listening...") 103 | alarm=False 104 | else: 105 | if debug: print ("No alarm") 106 | time.sleep(0.01) 107 | -------------------------------------------------------------------------------- /MotionDetector.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | from datetime import datetime 3 | import pygame 4 | import time 5 | 6 | class MotionDetectorInstantaneous(): 7 | 8 | def onChange(self, val): #callback when the user change the detection threshold 9 | self.threshold = val 10 | 11 | def __init__(self,threshold=8, doRecord=True, showWindows=True): 12 | pygame.mixer.init() 13 | self.cameraSound = pygame.mixer.Sound("snapshotsound.ogg") 14 | self.writer = None 15 | self.font = None 16 | self.doRecord=doRecord #Either or not record the moving object 17 | self.show = showWindows #Either or not show the 2 windows 18 | self.frame = None 19 | 20 | self.capture=cv.CaptureFromCAM(0) 21 | self.frame = cv.QueryFrame(self.capture) #Take a frame to init recorder 22 | if doRecord: 23 | self.initRecorder() 24 | 25 | self.frame1gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t-1 26 | cv.CvtColor(self.frame, self.frame1gray, cv.CV_RGB2GRAY) 27 | 28 | #Will hold the thresholded result 29 | self.res = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) 30 | 31 | self.frame2gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t 32 | 33 | self.width = self.frame.width 34 | self.height = self.frame.height 35 | self.nb_pixels = self.width * self.height 36 | self.threshold = threshold 37 | self.isRecording = False 38 | self.trigger_time = 0 #Hold timestamp of the last detection 39 | 40 | if showWindows: 41 | cv.NamedWindow("Image") 42 | cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange) 43 | 44 | def initRecorder(self): #Create the recorder 45 | codec = cv.CV_FOURCC('D', 'I', 'V', 'X') #('W', 'M', 'V', '2') 46 | self.writer=cv.CreateVideoWriter(datetime.now().strftime("%b-%d_%H_%M_%S")+".avi", codec, 20, cv.GetSize(self.frame), 1) 47 | #FPS set to 20 because it seems to be the fps of my cam but should be ajusted to your needs 48 | self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font 49 | 50 | def run(self): 51 | started = time.time() 52 | while True: 53 | 54 | curframe = cv.QueryFrame(self.capture) 55 | instant = time.time() #Get timestamp o the frame 56 | 57 | self.processImage(curframe) #Process the image 58 | 59 | if not self.isRecording: 60 | if self.somethingHasMoved(): 61 | self.trigger_time = instant #Update the trigger_time 62 | if instant > started +5:#Wait 5 second after the webcam start for luminosity adjusting etc.. 63 | print datetime.now().strftime("%b %d, %H:%M:%S"), "Something is moving !" 64 | self.cameraSound.play() 65 | cv.SaveImage('snapshot.png', curframe) 66 | if self.doRecord: #set isRecording=True only if we record a video 67 | self.isRecording = True 68 | else: 69 | if instant >= self.trigger_time +10: #Record during 10 seconds 70 | print datetime.now().strftime("%b %d, %H:%M:%S"), "Stop recording" 71 | self.isRecording = False 72 | else: 73 | cv.PutText(curframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame 74 | cv.WriteFrame(self.writer, curframe) #Write the frame 75 | 76 | if self.show: 77 | cv.ShowImage("Image", curframe) 78 | cv.ShowImage("Res", self.res) 79 | 80 | cv.Copy(self.frame2gray, self.frame1gray) 81 | c=cv.WaitKey(1) % 0x100 82 | if c==27 or c == 10: #Break if user enters 'Esc'. 83 | break 84 | 85 | def processImage(self, frame): 86 | cv.CvtColor(frame, self.frame2gray, cv.CV_RGB2GRAY) 87 | 88 | #Absdiff to get the difference between to the frames 89 | cv.AbsDiff(self.frame1gray, self.frame2gray, self.res) 90 | 91 | #Remove the noise and do the threshold 92 | cv.Smooth(self.res, self.res, cv.CV_BLUR, 5,5) 93 | cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_OPEN) 94 | cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_CLOSE) 95 | cv.Threshold(self.res, self.res, 10, 255, cv.CV_THRESH_BINARY_INV) 96 | 97 | def somethingHasMoved(self): 98 | nb=0 #Will hold the number of black pixels 99 | 100 | for x in range(self.height): #Iterate the hole image 101 | for y in range(self.width): 102 | if self.res[x,y] == 0.0: #If the pixel is black keep it 103 | nb += 1 104 | avg = (nb*100.0)/self.nb_pixels #Calculate the average of black pixel in the image 105 | 106 | if avg > self.threshold:#If over the ceiling trigger the alarm 107 | return True 108 | else: 109 | return False 110 | 111 | if __name__=="__main__": 112 | detect = MotionDetectorInstantaneous(doRecord=True) 113 | detect.run() 114 | -------------------------------------------------------------------------------- /MotionDetectorContours.py: -------------------------------------------------------------------------------- 1 | import cv2.cv as cv 2 | from datetime import datetime 3 | import time 4 | 5 | class MotionDetectorAdaptative(): 6 | 7 | def onChange(self, val): #callback when the user change the detection threshold 8 | self.threshold = val 9 | 10 | def __init__(self,threshold=7, doRecord=True, showWindows=True): 11 | self.writer = None 12 | self.font = None 13 | self.doRecord=doRecord #Either or not record the moving object 14 | self.show = showWindows #Either or not show the 2 windows 15 | self.frame = None 16 | 17 | self.capture=cv.CaptureFromCAM(0) 18 | self.frame = cv.QueryFrame(self.capture) #Take a frame to init recorder 19 | if doRecord: 20 | self.initRecorder() 21 | 22 | self.gray_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_8U, 1) 23 | self.average_frame = cv.CreateImage(cv.GetSize(self.frame), cv.IPL_DEPTH_32F, 3) 24 | self.absdiff_frame = None 25 | self.previous_frame = None 26 | 27 | self.surface = self.frame.width * self.frame.height 28 | self.currentsurface = 0 29 | self.currentcontours = None 30 | self.threshold = threshold 31 | self.isRecording = False 32 | self.trigger_time = 0 #Hold timestamp of the last detection 33 | 34 | if showWindows: 35 | cv.NamedWindow("Image") 36 | cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange) 37 | 38 | def initRecorder(self): #Create the recorder 39 | codec = cv.CV_FOURCC('M', 'J', 'P', 'G') 40 | self.writer=cv.CreateVideoWriter(datetime.now().strftime("%b-%d_%H_%M_%S")+".wmv", codec, 5, cv.GetSize(self.frame), 1) 41 | #FPS set to 5 because it seems to be the fps of my cam but should be ajusted to your needs 42 | self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font 43 | 44 | def run(self): 45 | started = time.time() 46 | while True: 47 | 48 | currentframe = cv.QueryFrame(self.capture) 49 | instant = time.time() #Get timestamp o the frame 50 | 51 | self.processImage(currentframe) #Process the image 52 | 53 | if not self.isRecording: 54 | if self.somethingHasMoved(): 55 | self.trigger_time = instant #Update the trigger_time 56 | if instant > started +10:#Wait 5 second after the webcam start for luminosity adjusting etc.. 57 | print "Something is moving !" 58 | if self.doRecord: #set isRecording=True only if we record a video 59 | self.isRecording = True 60 | cv.DrawContours (currentframe, self.currentcontours, (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED) 61 | else: 62 | if instant >= self.trigger_time +10: #Record during 10 seconds 63 | print "Stop recording" 64 | self.isRecording = False 65 | else: 66 | cv.PutText(currentframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame 67 | cv.WriteFrame(self.writer, currentframe) #Write the frame 68 | 69 | if self.show: 70 | cv.ShowImage("Image", currentframe) 71 | 72 | c=cv.WaitKey(1) % 0x100 73 | if c==27 or c == 10: #Break if user enters 'Esc'. 74 | break 75 | 76 | def processImage(self, curframe): 77 | cv.Smooth(curframe, curframe) #Remove false positives 78 | 79 | if not self.absdiff_frame: #For the first time put values in difference, temp and moving_average 80 | self.absdiff_frame = cv.CloneImage(curframe) 81 | self.previous_frame = cv.CloneImage(curframe) 82 | cv.Convert(curframe, self.average_frame) #Should convert because after runningavg take 32F pictures 83 | else: 84 | cv.RunningAvg(curframe, self.average_frame, 0.05) #Compute the average 85 | 86 | cv.Convert(self.average_frame, self.previous_frame) #Convert back to 8U frame 87 | 88 | cv.AbsDiff(curframe, self.previous_frame, self.absdiff_frame) # moving_average - curframe 89 | 90 | cv.CvtColor(self.absdiff_frame, self.gray_frame, cv.CV_RGB2GRAY) #Convert to gray otherwise can't do threshold 91 | cv.Threshold(self.gray_frame, self.gray_frame, 50, 255, cv.CV_THRESH_BINARY) 92 | 93 | cv.Dilate(self.gray_frame, self.gray_frame, None, 15) #to get object blobs 94 | cv.Erode(self.gray_frame, self.gray_frame, None, 10) 95 | 96 | 97 | def somethingHasMoved(self): 98 | 99 | # Find contours 100 | storage = cv.CreateMemStorage(0) 101 | contours = cv.FindContours(self.gray_frame, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) 102 | 103 | self.currentcontours = contours #Save contours 104 | 105 | while contours: #For all contours compute the area 106 | self.currentsurface += cv.ContourArea(contours) 107 | contours = contours.h_next() 108 | 109 | avg = (self.currentsurface*100)/self.surface #Calculate the average of contour area on the total size 110 | self.currentsurface = 0 #Put back the current surface to 0 111 | 112 | if avg > self.threshold: 113 | return True 114 | else: 115 | return False 116 | 117 | 118 | if __name__=="__main__": 119 | detect = MotionDetectorAdaptative(doRecord=True) 120 | detect.run() 121 | --------------------------------------------------------------------------------