├── Python
├── openWebCam.py
├── eyeDetection.py
├── faceDetection.py
├── faceEyeDetection.py
└── motionDetection.py
├── LICENSE
├── Jupyter Notebook
├── Open Camera.ipynb
├── eyeDetection.ipynb
├── FaceDetection.ipynb
├── faceEyeDetection.ipynb
└── MotionDetection.ipynb
└── README.md
/Python/openWebCam.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 | # Create video object
4 | video = cv2.VideoCapture(0)
5 |
6 | while True:
7 | # Capture each frame of the video
8 | ret, frame = video.read()
9 |
10 | # Create the color's channels
11 | #grayscale_channel = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
12 | colorfull_channel = frame
13 |
14 | # Open the cam
15 | #cv2.imshow('WebCam', grayscale_channel) # grayscale video (256 levels)
16 | cv2.imshow('Webcam', colorfull_channel) # colored video ([256, 256, 256] levels)
17 |
18 | # Press 'q' (quit '-') key to close the cam
19 | if cv2.waitKey(1) & 0xFF == ord('q'): break
20 |
21 | # Turn off the cam and close all cv2's windows
22 | video.release()
23 | cv2.destroyAllWindows()
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 CSFelix
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Python/eyeDetection.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 |
4 | # Load and create faceCascade Classifier
5 | eyePath = os.path.dirname(cv2.__file__)+"/data/haarcascade_eye.xml"
6 | eyeCascade = cv2.CascadeClassifier(eyePath)
7 |
8 | # Create 'video' and 'out' objects ('out' is the video that will be saved)
9 | video = cv2.VideoCapture(0)
10 | out = cv2.VideoWriter('eyeDetection.mp4', -1, 20.0, (640,480))
11 |
12 | while True:
13 | try:
14 | # start the video with RGB colors
15 | ret, frames = video.read()
16 | gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
17 |
18 | # Eyes Detection
19 | eyes = eyeCascade.detectMultiScale(gray)
20 | for (x, y, w, h) in eyes: cv2.rectangle(frames, (x, y), (x + w, y + h), (255, 255, 0), 2)
21 |
22 | # Save and Show the Video
23 | out.write(frames)
24 | cv2.imshow('WebCam', frames)
25 |
26 | # press 'q' to break
27 | if cv2.waitKey(1) & 0xFF == ord('q'): break
28 |
29 | except Exception as e:
30 | print(str(e))
31 | break
32 |
33 | # turn down the video and finish CV2 execution
34 | video.release()
35 | out.release()
36 | cv2.destroyAllWindows()
37 |
--------------------------------------------------------------------------------
/Python/faceDetection.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 |
4 | # Load and create faceCascade Classifier
5 | facePath = os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
6 | faceCascade = cv2.CascadeClassifier(facePath)
7 |
8 | # Create 'video' and 'out' objects ('out' is the video that will be saved)
9 | video = cv2.VideoCapture(0)
10 | out = cv2.VideoWriter('faceDetection.mp4', -1, 20.0, (640,480))
11 |
12 | while True:
13 | try:
14 | # start the video with RGB colors
15 | ret, frames = video.read()
16 | gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
17 |
18 | # Face Detection
19 | faces = faceCascade.detectMultiScale(gray)
20 | for (x, y, w, h) in faces: cv2.rectangle(frames, (x, y), (x + w, y + h), (255, 255, 0), 2)
21 |
22 | # Save and Show the Video
23 | out.write(frames)
24 | cv2.imshow('WebCam', frames)
25 |
26 | # press 'q' to break
27 | if cv2.waitKey(1) & 0xFF == ord('q'): break
28 |
29 | except Exception as e:
30 | print(str(e))
31 | break
32 |
33 | # turn down the video and finish CV2 execution
34 | video.release()
35 | out.release()
36 | cv2.destroyAllWindows()
37 |
--------------------------------------------------------------------------------
/Python/faceEyeDetection.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 |
4 | # Load and create faceCascade Classifier
5 | facePath = os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
6 | faceCascade = cv2.CascadeClassifier(facePath)
7 |
8 | eyePath = os.path.dirname(cv2.__file__)+"/data/haarcascade_eye.xml"
9 | eyeCascade = cv2.CascadeClassifier(eyePath)
10 |
11 | out = cv2.VideoWriter('faceEyeDetection.mp4', -1, 20.0, (640,480))
12 | video = cv2.VideoCapture(0)
13 |
14 | while True:
15 | try:
16 | ret, frames = video.read()
17 | gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
18 |
19 | # Face Detection
20 | faces = faceCascade.detectMultiScale(gray)
21 | for (x, y, w, h) in faces: cv2.rectangle(frames, (x, y), (x + w, y + h), (255, 255, 0), 2)
22 |
23 | # Eye Detection
24 | eyes = eyeCascade.detectMultiScale(gray)
25 | for (x, y, w, h) in eyes: cv2.rectangle(frames, (x, y), (x + w, y + h), (0, 165, 255), 2)
26 |
27 | # Save and Show the Video
28 | out.write(frames)
29 | cv2.imshow('WebCam', frames)
30 |
31 | if cv2.waitKey(1) & 0xFF == ord('q'): break
32 |
33 | except Exception as e:
34 | print(str(e))
35 | break
36 |
37 | video.release()
38 | out.release()
39 | cv2.destroyAllWindows()
--------------------------------------------------------------------------------
/Jupyter Notebook/Open Camera.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
How to Open Camera Using OpenCV
"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import cv2"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": 2,
22 | "metadata": {},
23 | "outputs": [],
24 | "source": [
25 | "# Create video object\n",
26 | "video = cv2.VideoCapture(0)\n",
27 | "\n",
28 | "while True:\n",
29 | " try:\n",
30 | " # Capture each frame of the video\n",
31 | " ret, frame = video.read()\n",
32 | "\n",
33 | " # Create the color's channels\n",
34 | " #grayscale_channel = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
35 | " colorfull_channel = frame\n",
36 | "\n",
37 | " # Open the cam\n",
38 | " #cv2.imshow('WebCam', grayscale_channel) # grayscale video (256 levels)\n",
39 | " cv2.imshow('Webcam', colorfull_channel) # colored video ([256, 256, 256] levels)\n",
40 | "\n",
41 | " # Press 'q' (quit '-') key to close the cam\n",
42 | " if cv2.waitKey(1) & 0xFF == ord('q'): break\n",
43 | " \n",
44 | " except Exception as e:\n",
45 | " print(str(e))\n",
46 | " break\n",
47 | "\n",
48 | "# Turn off the cam and close all cv2's windows\n",
49 | "video.release()\n",
50 | "cv2.destroyAllWindows()"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {},
57 | "outputs": [],
58 | "source": []
59 | }
60 | ],
61 | "metadata": {
62 | "kernelspec": {
63 | "display_name": "Python 3",
64 | "language": "python",
65 | "name": "python3"
66 | },
67 | "language_info": {
68 | "codemirror_mode": {
69 | "name": "ipython",
70 | "version": 3
71 | },
72 | "file_extension": ".py",
73 | "mimetype": "text/x-python",
74 | "name": "python",
75 | "nbconvert_exporter": "python",
76 | "pygments_lexer": "ipython3",
77 | "version": "3.7.3"
78 | }
79 | },
80 | "nbformat": 4,
81 | "nbformat_minor": 2
82 | }
83 |
--------------------------------------------------------------------------------
/Jupyter Notebook/eyeDetection.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "Eye Detection
"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 12,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import cv2\n",
17 | "import os"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 22,
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "# Load and create faceCascade Classifier\n",
27 | "eyePath = os.path.dirname(cv2.__file__)+\"/data/haarcascade_eye.xml\"\n",
28 | "eyeCascade = cv2.CascadeClassifier(eyePath)\n",
29 | "\n",
30 | "\n",
31 | "video = cv2.VideoCapture(0)\n",
32 | "out = cv2.VideoWriter('eyeDetection.mp4', -1, 20.0, (640,480))\n",
33 | "\n",
34 | "while True:\n",
35 | " try:\n",
36 | " ret, frames = video.read()\n",
37 | " gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)\n",
38 | " \n",
39 | " # Eye Detection\n",
40 | " eyes = eyeCascade.detectMultiScale(gray)\n",
41 | " for (x, y, w, h) in eyes: cv2.rectangle(frames, (x, y), (x + w, y + h), (0, 165, 255), 2)\n",
42 | " \n",
43 | " # Save and Show the Video\n",
44 | " out.write(frames)\n",
45 | " cv2.imshow('WebCam', frames)\n",
46 | " \n",
47 | " if cv2.waitKey(1) & 0xFF == ord('q'): break\n",
48 | " \n",
49 | " except Exception as e:\n",
50 | " print(str(e))\n",
51 | " break\n",
52 | "\n",
53 | "video.release()\n",
54 | "out.release()\n",
55 | "cv2.destroyAllWindows()"
56 | ]
57 | }
58 | ],
59 | "metadata": {
60 | "kernelspec": {
61 | "display_name": "Python 3",
62 | "language": "python",
63 | "name": "python3"
64 | },
65 | "language_info": {
66 | "codemirror_mode": {
67 | "name": "ipython",
68 | "version": 3
69 | },
70 | "file_extension": ".py",
71 | "mimetype": "text/x-python",
72 | "name": "python",
73 | "nbconvert_exporter": "python",
74 | "pygments_lexer": "ipython3",
75 | "version": "3.7.3"
76 | }
77 | },
78 | "nbformat": 4,
79 | "nbformat_minor": 2
80 | }
81 |
--------------------------------------------------------------------------------
/Jupyter Notebook/FaceDetection.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "Face Detection
"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import cv2\n",
17 | "import os"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 4,
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "# Load and create faceCascade Classifier\n",
27 | "facePath = os.path.dirname(cv2.__file__)+\"/data/haarcascade_frontalface_default.xml\"\n",
28 | "faceCascade = cv2.CascadeClassifier(facePath)\n",
29 | "\n",
30 | "video = cv2.VideoCapture(0)\n",
31 | "out = cv2.VideoWriter('faceDetection.mp4', -1, 20.0, (640,480))\n",
32 | "\n",
33 | "while True:\n",
34 | " try:\n",
35 | " ret, frames = video.read()\n",
36 | " gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)\n",
37 | " \n",
38 | " # Face Detection\n",
39 | " faces = faceCascade.detectMultiScale(gray)\n",
40 | " for (x, y, w, h) in faces: cv2.rectangle(frames, (x, y), (x + w, y + h), (255, 255, 0), 2)\n",
41 | " \n",
42 | " # Save and Show the Video\n",
43 | " out.write(frames)\n",
44 | " cv2.imshow('WebCam', frames)\n",
45 | " \n",
46 | " if cv2.waitKey(1) & 0xFF == ord('q'): break\n",
47 | " \n",
48 | " except Exception as e:\n",
49 | " print(str(e))\n",
50 | " break\n",
51 | "\n",
52 | "video.release()\n",
53 | "out.release()\n",
54 | "cv2.destroyAllWindows()"
55 | ]
56 | }
57 | ],
58 | "metadata": {
59 | "kernelspec": {
60 | "display_name": "Python 3",
61 | "language": "python",
62 | "name": "python3"
63 | },
64 | "language_info": {
65 | "codemirror_mode": {
66 | "name": "ipython",
67 | "version": 3
68 | },
69 | "file_extension": ".py",
70 | "mimetype": "text/x-python",
71 | "name": "python",
72 | "nbconvert_exporter": "python",
73 | "pygments_lexer": "ipython3",
74 | "version": "3.7.3"
75 | }
76 | },
77 | "nbformat": 4,
78 | "nbformat_minor": 2
79 | }
80 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 🌟 Face, Eye and Motion Detection 🌟
2 |
3 | ----
4 | ### `❓ What's it? ❓`
5 |
6 | * Face, Eye and Motion Detection are a set of codes ('out of' can be 'outta', so can 'set of' be 'setta'??) that detect peoples' faces\eyes and objects' motions using Python and OpenCV packages.
7 |
8 | * All videos are saved in codes' directory after the program ends. To end any program, press 'q' key.
9 |
10 | * Try out, change the code, publish, add in projects, and much more!!
11 |
12 | ----
13 | ### `⚒️ Tools ⚒️`
14 |
15 | * Camera (notebook, cellphone, webcams...);
16 | * Python 3.7 version or later;
17 | * OpenCV and Pandas packages;
18 | * Jupyter Notebook 7.2 version or later.
19 |
20 | ----
21 | ### `⚙️ Run ⚙️`
22 |
23 | > **Installing Packages**
24 |
25 | * Open computer's terminal and tip these commands:
26 |
27 | ```
28 | pip install pandas
29 | pip install opencv-python
30 | ```
31 |
32 | > **Running with Python**
33 |
34 | * Download the source code;
35 | * Open cmd and go to *Python* folder;
36 | * Choice a code file;
37 | * Tip:
38 |
39 | ```
40 | python FILE_NAME.py
41 | ```
42 |
43 | > **Running with Jupyter Notebook**
44 |
45 | * Download the source code;
46 | * Open cmd and go to *Jupyter Notebook* folder;
47 | * Tip:
48 |
49 | ```
50 | jupyter notebook
51 | ```
52 |
53 | * Choice a code and run it!!
54 |
55 | ----
56 | ### `📝 Notes 📝`
57 |
58 | * **Eye Detection:** detect people' eyes and save the video in mp4 format;
59 | * **Face Detection:** detect people' faces and save the video in mp4 format;
60 | * **Face Eye Detection:** detect people' faces and eyes and save the video in mp4 format;
61 | * **Motion Detection:** detect objects' motions and save the motions' logs in csv file;
62 | * **Open WebCam:** open webcam and save the video in mp4 format.
63 |
64 | ----
65 | ### `🎁 Bonus 🎁`
66 |
67 | * If you put pictures on your cellphone\computer or portrait, the AI will detect!!
68 |
69 | * My next mission is make **Recongnition** with Detection using **MongoDB, Python, OpenCV, Java and HTML\CSS\JS**. Available coming soon...
70 |
71 | ----
72 | ### `📫 Reach Me 📫`
73 |
74 | > **Email:** **[csfelix08@gmail.com](mailto:csfelix08@gmail.com?)**
75 |
76 | > **Linkedin:** **[linkedin.com/in/csfelix/](https://www.linkedin.com/in/csfelix/)**
77 |
78 | > **Instagram:** **[instagram.com/c0deplus/](https://www.instagram.com/c0deplus/)**
79 |
80 | ----
81 |
82 | > **Portfolio:** **[CSFelix.io](https://csfelix.github.io/)**
83 |
--------------------------------------------------------------------------------
/Jupyter Notebook/faceEyeDetection.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "Face and Eye Detection
"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import cv2\n",
17 | "import os"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 4,
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "# Load and create faceCascade Classifier\n",
27 | "facePath = os.path.dirname(cv2.__file__)+\"/data/haarcascade_frontalface_default.xml\"\n",
28 | "faceCascade = cv2.CascadeClassifier(facePath)\n",
29 | "\n",
30 | "eyePath = os.path.dirname(cv2.__file__)+\"/data/haarcascade_eye.xml\"\n",
31 | "eyeCascade = cv2.CascadeClassifier(eyePath)\n",
32 | "\n",
33 | "out = cv2.VideoWriter('faceEyeDetection.mp4', -1, 20.0, (640,480))\n",
34 | "video = cv2.VideoCapture(0)\n",
35 | "\n",
36 | "while True:\n",
37 | " try:\n",
38 | " ret, frames = video.read()\n",
39 | " gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)\n",
40 | " \n",
41 | " # Face Detection\n",
42 | " faces = faceCascade.detectMultiScale(gray)\n",
43 | " for (x, y, w, h) in faces: cv2.rectangle(frames, (x, y), (x + w, y + h), (255, 255, 0), 2)\n",
44 | " \n",
45 | " # Eye Detection\n",
46 | " eyes = eyeCascade.detectMultiScale(gray)\n",
47 | " for (x, y, w, h) in eyes: cv2.rectangle(frames, (x, y), (x + w, y + h), (0, 165, 255), 2)\n",
48 | " \n",
49 | " # Save and Show the Video\n",
50 | " out.write(frames)\n",
51 | " cv2.imshow('WebCam', frames)\n",
52 | " \n",
53 | " if cv2.waitKey(1) & 0xFF == ord('q'): break\n",
54 | " \n",
55 | " except Exception as e:\n",
56 | " print(str(e))\n",
57 | " break\n",
58 | "\n",
59 | "video.release()\n",
60 | "out.release()\n",
61 | "cv2.destroyAllWindows()"
62 | ]
63 | }
64 | ],
65 | "metadata": {
66 | "kernelspec": {
67 | "display_name": "Python 3",
68 | "language": "python",
69 | "name": "python3"
70 | },
71 | "language_info": {
72 | "codemirror_mode": {
73 | "name": "ipython",
74 | "version": 3
75 | },
76 | "file_extension": ".py",
77 | "mimetype": "text/x-python",
78 | "name": "python",
79 | "nbconvert_exporter": "python",
80 | "pygments_lexer": "ipython3",
81 | "version": "3.7.3"
82 | }
83 | },
84 | "nbformat": 4,
85 | "nbformat_minor": 2
86 | }
87 |
--------------------------------------------------------------------------------
/Python/motionDetection.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import time
3 | import pandas
4 | from datetime import datetime
5 |
6 | static_back = None # static back to color
7 | motion_list = [None, None]
8 | time_list = []
9 | df = df = pandas.DataFrame(columns = ['Start Motion', 'End Motion']) # this dataframe will turn into a csv file later
10 |
11 | video = cv2.VideoCapture(0)
12 |
13 | while True:
14 | # Start the Video and motion's counter #
15 | ret, frames = video.read()
16 | motion = 0
17 |
18 | ######################################################################################################
19 |
20 | # Color Scales #
21 | gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
22 | gray = cv2.GaussianBlur(gray, (21, 21), 0)
23 |
24 | # This peace of code is executed just once at the start
25 | # where the static back is assign with gray scale color
26 | if static_back is None:
27 | static_back = gray
28 | continue
29 |
30 | differencial_frame = cv2.absdiff(static_back, gray)
31 | threshold_frame = cv2.threshold(differencial_frame, 30, 255, cv2.THRESH_BINARY)[1]
32 | threshold_frame = cv2.dilate(threshold_frame, None, iterations = 2)
33 |
34 | ######################################################################################################
35 |
36 | # Find motions and its contours #
37 | cnts, _ = cv2.findContours(threshold_frame.copy(),
38 | cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
39 |
40 | for contour in cnts:
41 | if cv2.contourArea(contour) < 10000: continue
42 | else:
43 | motion = 1
44 |
45 | (x, y, w, h) = cv2.boundingRect(contour)
46 | cv2.rectangle(frames, (x, y), (x + w, y + h), (0, 255, 255), 3)
47 |
48 | ######################################################################################################
49 |
50 | # Append list of motions for each new motion detected
51 | # and set the start\end time
52 | motion_list.append(motion)
53 | motion_list = motion_list[-2:]
54 |
55 | if motion_list[-1] == 1 and motion_list[-2] == 0: time_list.append(datetime.now()) # start motion's time
56 | if motion_list[-1] == 0 and motion_list[-2] == 1: time_list.append(datetime.now()) # end motion's time
57 |
58 | ######################################################################################################
59 |
60 | # Show the Video in four windows #
61 | cv2.imshow('GrayScale Frame', gray)
62 | cv2.imshow('Difference Frame', differencial_frame)
63 | cv2.imshow('Threshold Frame', threshold_frame)
64 | cv2.imshow('ColorFull Frame', frames) # detected motions' rectangles are shown here
65 |
66 | ######################################################################################################
67 |
68 | # Closing the Windows: press 'q' key #
69 | if cv2.waitKey(1) & 0xFF == ord('q'):
70 | if motion == 1: time_list.append(datetime.now()) # catch the last movement
71 | break
72 |
73 | # Put list of motions into the dataframe
74 | # and save it as csv file
75 | for i in range(0, len(time_list), 2):
76 | df = df.append({"Start Motion": time_list[i], "End Motion": time_list[i + 1]}, ignore_index = True)
77 |
78 | df.to_csv("Time_of_Motions.csv")
79 |
80 | # Turn of cam and Close cv2's windows
81 | video.release()
82 | cv2.destroyAllWindows()
--------------------------------------------------------------------------------
/Jupyter Notebook/MotionDetection.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "Motion Detection
"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 4,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import cv2\n",
17 | "import time\n",
18 | "import pandas\n",
19 | "from datetime import datetime"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": 5,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "static_back = None # static back to color\n",
29 | "motion_list = [None, None]\n",
30 | "time_list = []\n",
31 | "df = df = pandas.DataFrame(columns = ['Start Motion', 'End Motion']) # this dataframe will turn into a csv file later"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": 6,
37 | "metadata": {
38 | "scrolled": true
39 | },
40 | "outputs": [],
41 | "source": [
42 | "video = cv2.VideoCapture(0)\n",
43 | "\n",
44 | "while True:\n",
45 | " # Start the Video and motion's counter #\n",
46 | " ret, frames = video.read()\n",
47 | " motion = 0\n",
48 | " \n",
49 | " ######################################################################################################\n",
50 | " \n",
51 | " # Color Scales #\n",
52 | " gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)\n",
53 | " gray = cv2.GaussianBlur(gray, (21, 21), 0)\n",
54 | " \n",
55 | " # This peace of code is executed just once at the start\n",
56 | " # where the static back is assign with gray scale color\n",
57 | " if static_back is None: \n",
58 | " static_back = gray\n",
59 | " continue\n",
60 | " \n",
61 | " differencial_frame = cv2.absdiff(static_back, gray)\n",
62 | " threshold_frame = cv2.threshold(differencial_frame, 30, 255, cv2.THRESH_BINARY)[1]\n",
63 | " threshold_frame = cv2.dilate(threshold_frame, None, iterations = 2)\n",
64 | " \n",
65 | " ######################################################################################################\n",
66 | " \n",
67 | " # Find motions and its contours #\n",
68 | " cnts, _ = cv2.findContours(threshold_frame.copy(),\n",
69 | " cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
70 | " \n",
71 | " for contour in cnts:\n",
72 | " if cv2.contourArea(contour) < 10000: continue\n",
73 | " else: \n",
74 | " motion = 1\n",
75 | " \n",
76 | " (x, y, w, h) = cv2.boundingRect(contour)\n",
77 | " cv2.rectangle(frames, (x, y), (x + w, y + h), (0, 255, 255), 3)\n",
78 | " \n",
79 | " ######################################################################################################\n",
80 | " \n",
81 | " # Append list of motions for each new motion detected\n",
82 | " # and set the start\\end time\n",
83 | " motion_list.append(motion)\n",
84 | " motion_list = motion_list[-2:]\n",
85 | " \n",
86 | " if motion_list[-1] == 1 and motion_list[-2] == 0: time_list.append(datetime.now()) # start motion's time\n",
87 | " if motion_list[-1] == 0 and motion_list[-2] == 1: time_list.append(datetime.now()) # end motion's time\n",
88 | " \n",
89 | " ######################################################################################################\n",
90 | " \n",
91 | " # Show the Video in four windows #\n",
92 | " cv2.imshow('GrayScale Frame', gray)\n",
93 | " cv2.imshow('Difference Frame', differencial_frame)\n",
94 | " cv2.imshow('Threshold Frame', threshold_frame)\n",
95 | " cv2.imshow('ColorFull Frame', frames) # detected motions' rectangles are shown here\n",
96 | " \n",
97 | " ######################################################################################################\n",
98 | " \n",
99 | " # Closing the Windows: press 'q' key #\n",
100 | " if cv2.waitKey(1) & 0xFF == ord('q'): \n",
101 | " if motion == 1: time_list.append(datetime.now()) # catch the last movement\n",
102 | " break\n",
103 | " \n",
104 | "# Put list of motions into the dataframe\n",
105 | "# and save it as csv file\n",
106 | "for i in range(0, len(time_list), 2):\n",
107 | " df = df.append({\"Start Motion\": time_list[i], \"End Motion\": time_list[i + 1]}, ignore_index = True)\n",
108 | " \n",
109 | "df.to_csv(\"Time_of_Motions.csv\") \n",
110 | "\n",
111 | "# Turn of cam and Close cv2's windows\n",
112 | "video.release()\n",
113 | "cv2.destroyAllWindows()"
114 | ]
115 | }
116 | ],
117 | "metadata": {
118 | "kernelspec": {
119 | "display_name": "Python 3",
120 | "language": "python",
121 | "name": "python3"
122 | },
123 | "language_info": {
124 | "codemirror_mode": {
125 | "name": "ipython",
126 | "version": 3
127 | },
128 | "file_extension": ".py",
129 | "mimetype": "text/x-python",
130 | "name": "python",
131 | "nbconvert_exporter": "python",
132 | "pygments_lexer": "ipython3",
133 | "version": "3.7.3"
134 | }
135 | },
136 | "nbformat": 4,
137 | "nbformat_minor": 2
138 | }
139 |
--------------------------------------------------------------------------------