├── Classifier └── faces.xml ├── README.md ├── conf.json ├── pi_surveillance.py └── pyimagesearch ├── __init__.py ├── __init__.pyc ├── tempimage.py └── tempimage.pyc /README.md: -------------------------------------------------------------------------------- 1 | # Human Detection System using Raspberry Pi 2 | 3 | ## Functionality 4 | Activates a relay on detecting motion. 5 | 6 | You may need following components to get the expected Results 7 |
8 | ## Hardware Components 9 | * A raspberry pi 2 or 3 model B ([Raspberry pi 3 model 3](https://www.raspberrypi.org/products/raspberry-pi-2-model-b/) has been used in my case) 10 | * A compatible camera module ([CAMERA MODULE V2](https://www.raspberrypi.org/products/camera-module-v2/)) 11 | * A power Adapter with 2.0A - 2.5A ([RASPBERRY PI UNIVERSAL POWER SUPPLY](https://www.raspberrypi.org/products/universal-power-supply/)) 12 | * A micro SD card (16Gb -32GB recommended) 13 | 14 | 15 | ## Software Requirements 16 | * Any compatible Raspbian OS can be used. 17 | * Update the OS to latest `sudo apt-get update` 18 | * Upgrade the OS `sudo apt-get upgrade` 19 | * Update the Raspberry Pi firmware `sudo rpi-update` 20 | * Should install OpenCV `sudo apt-get install libopencv` 21 | * Should Install Python 22 | * imutils ```pip install imutils``` 23 | * RPi.GPIO```pip install RPi.GPIO``` 24 | 25 | 26 | If you need to update openCV to latest version install following dependencies 27 | ``` 28 | sudo apt-get install build-essential checkinstall cmake pkg-config yasm 29 | sudo apt-get install libtiff4-dev libjpeg-dev libjasper-dev 30 | sudo apt-get install libavcodec-dev libavformat-dev libswscale-dev libdc1394-22-dev libxine-dev libgstreamer0.10-dev libgstreamer-plugins-base0.10-dev libv4l-dev 31 | sudo apt-get install python-dev python-numpy 32 | sudo apt-get install libtbb-dev 33 | sudo apt-get install libqt4-dev libgtk2.0-dev 34 | ``` 35 | 36 | ## Usage 37 | ``` 38 | python pi_surveillance.py --conf conf.json 39 | ``` 40 | -------------------------------------------------------------------------------- /conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "show_video": true, 3 | "min_upload_seconds": 3.0, 4 | "min_motion_frames": 8, 5 | "camera_warmup_time": 2.5, 6 | "delta_thresh": 5, 7 | "resolution": [640, 480], 8 | "fps": 16, 9 | "min_area": 5000 10 | } 11 | -------------------------------------------------------------------------------- /pi_surveillance.py: -------------------------------------------------------------------------------- 1 | # USAGE 2 | # python pi_surveillance.py --conf conf.json 3 | 4 | # import the necessary packages 5 | from __future__ import print_function 6 | from picamera.array import PiRGBArray 7 | from picamera import PiCamera 8 | import argparse 9 | import warnings 10 | import datetime 11 | import imutils 12 | import json 13 | import time 14 | import cv2 15 | import RPi.GPIO as GPIO 16 | from imutils.object_detection import non_max_suppression 17 | import numpy as np 18 | 19 | # construct the argument parser and parse the arguments 20 | ap = argparse.ArgumentParser() 21 | ap.add_argument("-c", "--conf", required=True, 22 | help="path to the JSON configuration file") 23 | args = vars(ap.parse_args()) 24 | 25 | # filter warnings, load the configuration 26 | warnings.filterwarnings("ignore") 27 | conf = json.load(open(args["conf"])) 28 | client = None 29 | 30 | # initialize the camera and grab a reference to the raw camera capture 31 | camera = PiCamera() 32 | camera.resolution = tuple(conf["resolution"]) 33 | camera.framerate = conf["fps"] 34 | rawCapture = PiRGBArray(camera, size=tuple(conf["resolution"])) 35 | 36 | # initialize the HOG descriptor/person detector 37 | hog = cv2.HOGDescriptor() 38 | hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) 39 | 40 | #Setup Pins 41 | pin = 18 42 | GPIO.setmode(GPIO.BCM) 43 | GPIO.setup(pin, GPIO.OUT, initial=0) 44 | 45 | # allow the camera to warmup, then initialize the average frame, last 46 | # uploaded timestamp, and frame motion counter 47 | print("[INFO] warming up...") 48 | time.sleep(conf["camera_warmup_time"]) 49 | avg = None 50 | lastUploaded = datetime.datetime.now() 51 | motionCounter = 0 52 | 53 | # capture frames from the camera 54 | for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): 55 | # grab the raw NumPy array representing the image and initialize 56 | # the timestamp and occupied/unoccupied text 57 | frame = f.array 58 | timestamp = datetime.datetime.now() 59 | text = "Unoccupied" 60 | 61 | # resize the frame 62 | frame = imutils.resize(frame, width=500) 63 | orig = frame.copy() 64 | 65 | # detect people in the image 66 | (rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4), 67 | padding=(8, 8), scale=1.05) 68 | 69 | # convert image to grayscale and blur it 70 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 71 | gray = cv2.GaussianBlur(gray, (21, 21), 0) 72 | 73 | # if the average frame is None, initialize it 74 | if avg is None: 75 | print("[INFO] starting background model...") 76 | avg = gray.copy().astype("float") 77 | rawCapture.truncate(0) 78 | continue 79 | 80 | # accumulate the weighted average between the current frame and 81 | # previous frames, then compute the difference between the current 82 | # frame and running average 83 | cv2.accumulateWeighted(gray, avg, 0.5) 84 | frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) 85 | 86 | # threshold the delta image, dilate the thresholded image to fill 87 | # in holes, then find contours on thresholded image 88 | thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255, 89 | cv2.THRESH_BINARY)[1] 90 | thresh = cv2.dilate(thresh, None, iterations=2) 91 | cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, 92 | cv2.CHAIN_APPROX_SIMPLE) 93 | cnts = cnts[0] if imutils.is_cv2() else cnts[1] 94 | 95 | # loop over the contours 96 | for c in cnts: 97 | # if the contour is too small, ignore it 98 | if cv2.contourArea(c) < conf["min_area"]: 99 | continue 100 | 101 | # draw the original bounding boxes 102 | for (x, y, w, h) in rects: 103 | cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2) 104 | 105 | # apply non-maxima suppression to the bounding boxes using a 106 | # fairly large overlap threshold to try to maintain overlapping 107 | # boxes that are still people 108 | rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects]) 109 | pick = non_max_suppression(rects, probs=None, overlapThresh=0.65) 110 | 111 | # draw the final bounding boxes 112 | for (xA, yA, xB, yB) in pick: 113 | cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2) 114 | 115 | text = "Occupied" 116 | 117 | # draw the text and timestamp on the frame 118 | ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") 119 | cv2.putText(frame, "Room Status: {}".format(text), (10, 20), 120 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) 121 | cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 122 | 0.35, (0, 0, 255), 1) 123 | 124 | # check to see if the room is occupied 125 | if text == "Occupied": 126 | 127 | #Activate pin 128 | GPIO.output(pin, GPIO.HIGH) 129 | 130 | # check to see if enough time has passed between uploads 131 | if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: 132 | # increment the motion counter 133 | motionCounter += 1 134 | 135 | # check to see if the number of frames with consistent motion is 136 | # high enough 137 | if motionCounter >= conf["min_motion_frames"]: 138 | 139 | # update the last uploaded timestamp and reset the motion 140 | # counter 141 | lastUploaded = timestamp 142 | motionCounter = 0 143 | 144 | # otherwise, the room is not occupied 145 | else: 146 | motionCounter = 0 147 | 148 | #Deactivate Pin 149 | GPIO.output(pin, GPIO.LOW) 150 | 151 | 152 | # check to see if the frames should be displayed to screen 153 | if conf["show_video"]: 154 | # display the security feed 155 | cv2.imshow("Security Feed", frame) 156 | key = cv2.waitKey(1) & 0xFF 157 | 158 | # if the `q` key is pressed, break from the lop 159 | if key == ord("q"): 160 | break 161 | 162 | # clear the stream in preparation for the next frame 163 | rawCapture.truncate(0) 164 | 165 | GPIO.cleanup() 166 | 167 | -------------------------------------------------------------------------------- /pyimagesearch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OmalPerera/Human-detection-system-with-raspberry-Pi/2d31011793110de6dc7685609dcbe172fffca807/pyimagesearch/__init__.py -------------------------------------------------------------------------------- /pyimagesearch/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OmalPerera/Human-detection-system-with-raspberry-Pi/2d31011793110de6dc7685609dcbe172fffca807/pyimagesearch/__init__.pyc -------------------------------------------------------------------------------- /pyimagesearch/tempimage.py: -------------------------------------------------------------------------------- 1 | # import the necessary packages 2 | import uuid 3 | import os 4 | 5 | class TempImage: 6 | def __init__(self, basePath="./", ext=".jpg"): 7 | # construct the file path 8 | self.path = "{base_path}/{rand}{ext}".format(base_path=basePath, 9 | rand=str(uuid.uuid4()), ext=ext) 10 | 11 | def cleanup(self): 12 | # remove the file 13 | os.remove(self.path) -------------------------------------------------------------------------------- /pyimagesearch/tempimage.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OmalPerera/Human-detection-system-with-raspberry-Pi/2d31011793110de6dc7685609dcbe172fffca807/pyimagesearch/tempimage.pyc --------------------------------------------------------------------------------