├── LICENSE.md ├── README.md ├── docs └── OpenCV │ └── Python │ ├── BuildingOpenCV_Python.md │ └── InstallOpenCV_Python.md └── tutorials └── opencv └── Python ├── README.md ├── sample_01_info ├── .ipynb_checkpoints │ └── ocv_info-checkpoint.ipynb ├── README.md ├── ocv_info.ipynb └── ocv_info.py ├── sample_02_build_info ├── .ipynb_checkpoints │ └── ocv_build_info-checkpoint.ipynb ├── README.md ├── ocv_build_info.ipynb └── ocv_build_info.py ├── sample_03_image_test ├── .ipynb_checkpoints │ └── ocv_image_test-checkpoint.ipynb ├── README.md ├── intel-hq.jpg ├── matplotlib_converted.png ├── matplotlib_unconverted.png ├── ocv_image_test.ipynb └── ocv_image_test.py ├── sample_04_video_test ├── .ipynb_checkpoints │ └── ocv_video_test-checkpoint.ipynb ├── README.md ├── ocv_video_test.ipynb └── ocv_video_test.py ├── sample_05_opencl ├── .ipynb_checkpoints │ └── ocv_ocl_info-checkpoint.ipynb ├── README.md ├── ocv_ocl_info.ipynb └── ocv_ocl_info.py ├── sample_06_video_capture ├── .ipynb_checkpoints │ └── ocv_vid_cap-checkpoint.ipynb ├── README.md ├── ocv_vid_cap.ipynb └── ocv_vid_cap.py ├── sample_07_hw_info ├── .ipynb_checkpoints │ └── ocv_hw_info-checkpoint.ipynb ├── README.md ├── ocv_hw_info.ipynb └── ocv_hw_info.py ├── sample_08_DOG_image ├── .ipynb_checkpoints │ └── ocv_dog_img-checkpoint.ipynb ├── README.md ├── dog_fg.png ├── dog_gray.png ├── inv_mask.png ├── ocv_dog_img.ipynb └── ocv_dog_img.py ├── sample_09_DOG_video ├── .ipynb_checkpoints │ └── ocv_dog_vid-checkpoint.ipynb ├── README.md ├── ocv_dog_vid.ipynb └── ocv_dog_vid.py ├── sample_10_face_eye_detection_still ├── .ipynb_checkpoints │ └── ocv_face_img-checkpoint.ipynb ├── Intel_Board_of_Directors.jpg ├── README.md ├── bmw-group-intel-mobileye-3.jpg ├── brian-krzanich_2.jpg ├── haarcascade_eye.xml ├── haarcascade_frontalface_default.xml ├── intel-and-nervana.jpg ├── ocv_face_img.ipynb └── ocv_face_img.py ├── sample_11_real-time_face_detection_and_tracking ├── .ipynb_checkpoints │ └── ocv_face_vid-checkpoint.ipynb ├── README.md ├── ocv_face_vid.ipynb └── ocv_face_vid.py └── sample_12_real-time_people_counter ├── .ipynb_checkpoints └── ocv_face_cnt_vid-checkpoint.ipynb ├── README.md ├── ocv_face_cnt_vid.ipynb └── ocv_face_cnt_vid.py /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 V. Rao Research Associates 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DISCONTINUATION OF PROJECT # 2 | This project will no longer be maintained by Intel. 3 | Intel has ceased development and contributions including, but not limited to, maintenance, bug fixes, new releases, or updates, to this project. 4 | Intel no longer accepts patches to this project. 5 | # Video Analytics Code Samples using OpenCV 6 | 7 | This repository contains Python\* code samples which us the Open Source Computer Vision (OpenCV) Library. These code samples are a good starting point for developers, across a wide range of markets, who wish to develop more robust computer vision and analytic solutions. 8 | 9 | The code samples are mainly in two categories: Diagnostics and Application 10 | 11 | Diagnostic Samples: 12 | 13 | Sample 01 - Version information and environment variables 14 | Sample 02 - OpenCV build information 15 | Sample 03 - Basic image test - overlay text 16 | Sample 04 - Basic video test - stream and overlay text 17 | Sample 05 - Checks for OpenCL™ availability 18 | Sample 07 - Checks for hardware extension support 19 | 20 | Application Samples: 21 | 22 | Sample 06 - Video steam and capture image 23 | Sample 08 - Watermarking still image 24 | Sample 09 - Watermarking display stream 25 | Sample 10 - Still image face and eye detection 26 | Sample 11 - Real-time video face detection and tracking 27 | Sample 12 - Real-time people counter 28 | 29 | The twelve computer vision code samples in this repository have been optimized using Intel® Integrated Performance Primitives (Intel® IPP) and the Intel® Math Kernel Library (Intel® MKL). By following these code samples with the optimized libraries, you will be able to see performance improvements over the basic installation of OpenCV. 30 | 31 | ## Supported Intel® hardware platforms 32 | 33 | **Tested:** 34 | 35 | Intel® NUC NUC6i7KYK with Intel® Core™ i7 (codename Skylake) 36 | 37 | ## Development Environments 38 | 39 | **C++** 40 | 41 | Microsoft Windows® 10 + Microsoft Visual Studio* 2015 42 | 43 | **Python\*** 44 | 45 | Ubuntu* 16.04.2 + Anaconda + Intel® Distribution for Python* 46 | Microsoft Windows 10 + Anaconda + Intel® Distribution for Python* 47 | 48 | ## Prerequisites 49 | 50 | + [Latest Integrated Windows 10 Drivers](https://downloadcenter.intel.com/) 51 | + [Intel® SDK for OpenCL™ applications for Windows\* (\*Registration Required)](https://software.intel.com/en-us/intel-opencl) 52 | + [Intel® Media SDK (\*Registration Required)](https://software.intel.com/en-us/media-sdk) 53 | + [OpenCV v3.2.0](http://opencv.org/releases.html) 54 | + [Intel® Distribution for Python\* (\*Registration Required)](https://software.seek.intel.com/python-distribution) 55 | + [Intel® Integrated Performance Primitives (\*Registration Required)](https://software.intel.com/en-us/intel-ipp) 56 | + [Intel® Math Kernel Libraries (\*Registration Required)](https://registrationcenter.intel.com/en/forms/?productid=2558&licensetype=2) 57 | + [Intel® Threading Building Blocks](https://www.threadingbuildingblocks.org/) 58 | 59 | ## Resources 60 | 61 | **Drivers:** 62 | 63 | + [Manually Install an Intel® Graphics Driver in Windows® 10 and Windows 8\*/8.1\*](http://www.intel.com/content/www/us/en/support/graphics-drivers/000005629.html) 64 | 65 | **Intel® SDK for OpenCL™ Applications:** 66 | 67 | + [Intel® SDK for OpenCL™ Applications](https://software.intel.com/en-us/intel-opencl) 68 | + [Getting Started with Intel® SDK for OpenCL™ Applications](https://software.intel.com/en-us/articles/getting-started-with-opencl-code-builder) 69 | + [Developer Guide for Intel® SDK for OpenCL™ Applications](https://software.intel.com/en-us/code-builder-user-manual) 70 | 71 | 72 | **Intel® Media SDK:** 73 | + [Intel® Media SDK](https://software.intel.com/en-us/media-sdk/download) 74 | + [Intel® Media SDK Getting Started Guide](https://software.intel.com/en-us/articles/intel-media-client-getting-started-guide) 75 | + [Intel® Media SDK Developer’s Guide](https://software.intel.com/sites/default/files/managed/09/02/Intel_Media_Developers_Guide.pdf) 76 | 77 | **OpenCV:** 78 | 79 | > _**Important:**_ OpenCV v3.2.0 release can use vendor-provided OpenVX\* and LAPACK/BLAS including Intel® Math Kernel Libraries for acceleration. Do not refer to outdated Intel® INDE documentation but directly refer to OpenCV documentation within the OpenCV v3.2.0 release. 80 | 81 | + [Pre-Built OpenCV v3.2.0 for Windows](https://sourceforge.net/projects/opencvlibrary/files/opencv-win/3.2.0/opencv-3.2.0-vc14.exe/download) 82 | + [OpenCV v3.2.0 Source Code](https://github.com/opencv/opencv/archive/3.2.0.zip) 83 | + [OpenCV v3.2.0 Documentation](http://docs.opencv.org/3.2.0/) 84 | -------------------------------------------------------------------------------- /docs/OpenCV/Python/BuildingOpenCV_Python.md: -------------------------------------------------------------------------------- 1 | # Under Construction! 2 | ##################### 3 | # Building OpenCV for Python 4 | 5 | ## On Windows 6 | 7 | OpenCV ships with pre-built Windows binaries. Refer to InstallOpenCV_Python document. 8 | 9 | ## On Ubuntu Linux 10 | ### Using Ubuntu 16.04.2 is highly recommended due to updated drivers 11 | 12 | ### Steps to Build OpenCV 13 | 14 | 1. Update apt package lists from repositories 15 | 16 | ``` 17 | sudo apt-get updates 18 | ``` 19 | 20 | 2. Upgrade all packages 21 | 22 | ``` 23 | sudo apt-get upgrades 24 | ``` 25 | 26 | 3. Install Tools such as cmake; cmake-gui; build-essential; pkg-config and git 27 | 28 | ``` 29 | sudo apt-get install build-essential cmake cmake-gui pkg-config git 30 | ``` 31 | 32 | 4. Install Intel Preformance Libraries 33 | 34 | 35 | (https://software.intel.com/en-us/articles/using-intel-distribution-for-python-with-anaconda) 36 | 37 | (conda install mkl-devel) 38 | -------------------------------------------------------------------------------- /docs/OpenCV/Python/InstallOpenCV_Python.md: -------------------------------------------------------------------------------- 1 | # Installing OpenCV for Python 2 | 3 | ## On Windows 4 | 5 | ### Using Windows 10 is highly recommended due to updated drivers 6 | Recommended Build: Windows 10 Anniversary Edition or Creators Update. 7 | 8 | ### Install Anaconda for Windows 9 | 10 | Download and install Anaconda for Windows by following this installation document (https://docs.continuum.io/anaconda/install-windows) 11 | 12 | It is recommended that you install Python 2.7.x as pre-built OpenCV 3.2.0 ships with 2.7.x compatible packages. Either 32-bit or 64-bit versions are OK depending on your OS and hardware. 13 | 14 | ### Install Intel® Python Distribution using Anaconda 15 | 16 | Install the Intel® Python Distribution and Intel® Performance libraries in a virtual environment by following this installation document (https://software.intel.com/en-us/articles/using-intel-distribution-for-python-with-anaconda) 17 | 18 | It is recommended that you install the full distibution ***intelpython2_full*** for Python 2. 19 | 20 | #### Verify that Numpy and Matplotlib are installed 21 | 22 | OpenCV uses Numpy arrays to manipulate image and video data and is a mandatory requirement. Matplotlib is used in a number of tutorials found in the OpenCV package. Installing the full distribution of Intel® Python would install optimized versions of both packages. 23 | 24 | 1. Numpy Version 25 | 26 | ``` 27 | (idp) $/>python -c "import numpy; print (numpy.__version__)" 28 | 1.12.1 29 | ``` 30 | 31 | 2. Matplotlib 32 | 33 | ``` 34 | (idp) $/>python -c "import matplotlib; print (matplotlib.__version__)" 35 | 2.0.0 36 | ``` 37 | ### OpenCV Installation Steps using pre-built library 38 | 39 | 1. Download the pre-built OpenCV release for Windows from the OpenCV repository on Github. Ensure that you get Release tag 3.2.0 or higher from 40 | (https://github.com/opencv/opencv/releases/latest) 41 | 42 | For example, download **_opencv-3.2.0-vc14.exe_** for Release 3.2.0 43 | 44 | 2. Extract the archive to a short path such as C:\OpenCV 45 | 46 | 3. Go to opencv/build/python/2.7 folder and then into x64 or x86 depending on your Windows installation. 47 | 48 | 5. Copy **_cv2.pyd_** to the **_site-packages_** folder inside your Intel Python Distribution environment. 49 | 50 | 6. Add a System Environment variable **OPENCV_DIR** that points to the location of the OpenCV folder in Step 2. For example, it should point to **C:\OpenCV** if you followed the recommendation above. 51 | 52 | The procedure to add a System Environment Variable can be found in this document. (https://msdn.microsoft.com/en-us/library/bb726962.aspx) 53 | 54 | 7. Add a System Environment variable **OPENCV_VER** with the version number of the installed OpenCV without the decimal point separators. 55 | 56 | For example, the value of **OPENCV_VER** for OpenCV 3.2.0 would be **320**. 57 | 58 | 8. Add a System Environment variable **FFMPEG_BIN** that points to the location of the FFMPEG dlls included in OpenCV appropriate for your architecture. For both 32-bit and 64-bit it should point to **C:\OpenCV\opencv\build\bin**. This allows the use of FFMPEG to encode and decode media for OpenCV. 59 | 60 | ## On Ubuntu Linux 61 | ### Using Ubuntu 16.04.2 is highly recommended due to updated drivers 62 | 63 | OpenCV is not available pre-built for Linux distributions. You will need to build OpenCV on Ubuntu and then install the Python bindings. Please refer to Building OpenCV for Python. (TBD) 64 | 65 | ## OpenCV Verification 66 | 67 | ``` 68 | (idp) $/>python -c "import cv2; print (cv2.__version__)" 69 | 3.2.0 70 | ``` 71 | This indictates that OpenCV 3.2.0 is working properly with Python. 72 | 73 | ### Sanity Tests 74 | 75 | Included in the code samples are instructions to run some basic sanity tests to ensure that your environment is properly functioning. 76 | 77 | 1. [Build Information - Sample_01](https://github.com/vraoresearch/Intel-Digital-Signage-Reference/blob/master/tutorials/opencv/Python/sample_01/ocv_info.py) 78 | 2. [Image Test - Sample_03](https://github.com/vraoresearch/Intel-Digital-Signage-Reference/blob/master/tutorials/opencv/Python/sample_03/ocv_image_test.py) 79 | 3. [Video Capture Test - Sample_04](https://github.com/vraoresearch/Intel-Digital-Signage-Reference/blob/master/tutorials/opencv/Python/sample_04/ocv_video_test.py) 80 | 81 | ### OpenCV Information 82 | 83 | To learn more about the compiler flags used to build OpenCV: 84 | 85 | [Build Information - Sample_02](https://github.com/vraoresearch/Intel-Digital-Signage-Reference/blob/master/tutorials/opencv/Python/sample_02/ocv_build_info.py) 86 | 87 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/README.md: -------------------------------------------------------------------------------- 1 | # Python Code Samples for Video Analytics with OpenCV 2 | 3 | Modern retail settings incorporate computer vision technologies for video analytics as well as attribution purposes. These tutorial samples are intended for developers of computer vision and analytics and uses the widely used Open-Source Computer Vision (OpenCV) library for computer vision. 4 | 5 | To learn more about the use of Computer Vision and Video Analytics refer to the following article: 6 | 7 | [Introduction to Developing and Optimizing Display Technology](https://software.intel.com/en-us/articles/introduction-to-developing-and-optimizing-display-technology) 8 | 9 | To learn how to install OpenCV for these tutorial samples, refer to the following document: 10 | 11 | [Installing OpenCV for Python\*](../../../docs/OpenCV/Python/InstallOpenCV_Python.md) 12 | 13 | ## Code Samples 14 | The code samples are mainly in two categories: Diagnostics and Application 15 | 16 | Diagnostic Samples: 17 | 18 | Sample 01 - Version information and environment variables 19 | Sample 02 - OpenCV build information 20 | Sample 03 - Basic image test - overlay text 21 | Sample 04 - Basic video test - stream and overlay text 22 | Sample 05 - Checks for OpenCL™ availability 23 | Sample 07 - Checks for hardware extension support 24 | 25 | Application Samples: 26 | 27 | Sample 06 - Video steam and capture image 28 | Sample 08 - Watermarking still image 29 | Sample 09 - Watermarking display stream 30 | Sample 10 - Still image face and eye detection 31 | Sample 11 - Real-time video face detection and tracking 32 | Sample 12 - Real-time people counter 33 | 34 | ### Usage 35 | All the code samples have an individual README how to use the sample as well as an Interactive Jupyter Notebook\* based tutorial. In addition, the code samples are extensively commented. 36 | 37 | ### OpenCV Information 38 | 39 | [Sample 01](sample_01_info/ocv_info.py) is a simple diagnostic program that queries the development environment and ensures that all the prerequisites have been met and displays the version information. It also checks to see if Environment variables have been set and displays the path for diagnostics if necessary. 40 | 41 | ### OpenCV Build Information 42 | 43 | [Sample 02](sample_02_build_info/ocv_build_info.py) is a simple diagnostic program that displays the detailed OpenCV build information. 44 | 45 | ### OpenCV Image Test 46 | 47 | [Sample 03](sample_03_image_test/ocv_image_test.py) is a sanity test that uses OpenCV to display an Image file. This test serves to ensure that OpenCV installation is working and validates the development environment. It also shows how to overlay text on an image. 48 | 49 | ### OpenCV Video Test 50 | 51 | [Sample 04](sample_04_video_test/ocv_video_test.py) is a sanity test that uses OpenCV to connect to a WebCam and display the video stream. This test serves to ensure that OpenCV WebCam installation is working and further validates the development environment. It also shows how to overlay text on video streams. 52 | 53 | ### OpenCV with OpenCL™ 54 | 55 | [Sample 05](sample_05_opencl/ocv_ocl_info.py) is a simple diagnostic program that determines whether OpenCL™ is available for use within OpenCL, Enables OpenCL, checks whether it has been enabled and then disables it. The program then checks if OpenCL has been disabled and exits. 56 | 57 | > _**Note:**_ OpenCV v3.2.0 pre-built binary release can use OpenCL if it has been properly installed. The Intel optimized OpenCL drivers are installed as part of the integrated Intel® Graphics Driver. OpenCL is currently supported better in C++ rather than on Python where you can only display status and enable or disable use of an OpenCL resource. 58 | 59 | ### OpenCV Video Capture 60 | 61 | [Sample 06](sample_06_video_capture/ocv_vid_cap.py) is a simple program that uses OpenCV to connect to a WebCam in order to capture and save an image. This example is the basic first step for most video analytics programs. The video output of the WebCam is displayed and when the user inputs a keystroke, the frame is captured and written to an image file. 62 | 63 | ### OpenCV Hardware Info 64 | 65 | [Sample 07](sample_07_hw_info/ocv_hw_info.py) is a simple diagnostic program that determines how many logical CPU's are available and then queries the hardware to check whether MMX™ technology and Intel® Streaming SIMD Extensions (Intel® SSE), Intel® Advanced Vector Extensions etc. are supported . 66 | 67 | > _**Note:**_ The OpenCV function cv2.checkHardwareSupport(feature) returns true if the host hardware supports the specified feature. When users call setUseOptimized(False), all the subsequent calls to cv2.checkHardwareSupport() will return false until cv2.setUseOptimized(True) is called. This way users can dynamically switch on and off the optimized code in OpenCV. 68 | 69 | ### OpenCV DOG Image 70 | 71 | [Sample 08](sample_08_DOG_image/ocv_dog_img.py) is a program that overlays a **Digital On-Screen Graphic (DOG)** onto a still image. DOG is a form of digital watermarking routinely used on broadcast TV to show the TV channel logo. 72 | 73 | ### OpenCV DOG Video 74 | 75 | [Sample 09](sample_09_DOG_video/ocv_dog_vid.py) is a program that overlays a **Digital On-Screen Graphic (DOG)** on the display stream. This program uses the same principles as used for the previous example. 76 | 77 | ### OpenCV Face and Eyes Detection - Still Image 78 | 79 | [Sample 10](sample_10_face_eye_detection_still/ocv_face_img.py) is a basic Face and Eye Detection program that uses OpenCV to analyze an image and detect human faces and eyes. The detected areas or Regions of Interest (ROI) are demarcated with rectangles. The program uses the OpenCV built-in pre-trained Haar feature-based cascade classifiers in order to perform this task. 80 | 81 | ### OpenCV Real-Time Video Face Detection and Tracking 82 | 83 | [Sample 11](sample_11_real-time_face_detection_and_tracking/ocv_face_vid.py) is a basic Face and Eye Detection program that uses OpenCV to analyze real-time video and detect human faces and eyes. The detected areas or Regions of Interest (ROI) are demarcated with rectangles. The program uses the OpenCV built-in pre-trained Haar feature-based cascade classifiers in order to perform this task. 84 | 85 | ### OpenCV Real-Time Video People Counter using Face Detection 86 | 87 | [Sample 12](sample_12_real-time_people_counter/ocv_face_cnt_vid.py) is a basic People Counter using the previous Face and Eye Detection program that uses OpenCV to analyze real-time video and detect human faces and eyes. In addition to detecting Faces and Eyes, the program also returns the number of faces detected to the console. 88 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_01_info/.ipynb_checkpoints/ocv_info-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample_01: ocv_info\n", 8 | "\n", 9 | "[Sample 01](ocv_info.py) is a simple diagnostic program that queries the development environment and ensures that all the prerequisites have been met and displays the version information. It also checks to see if Environment variables have been set and displays the path for diagnostics if necessary.\n", 10 | "\n", 11 | ">*Note:* If you are using pre-built OpenCV, you need to use Python 2.7.x to run the samples. If you have built OpenCV from source, then you can use either Python 3.x or Python 2.x.\n", 12 | "\n", 13 | "First the Standard Python Shebang" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 1, 19 | "metadata": { 20 | "collapsed": true 21 | }, 22 | "outputs": [], 23 | "source": [ 24 | "#!/usr/bin/env python2" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "Next import print_function for Python 2/3 comptibility. This allows use of print like a function in Python 2.x" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 2, 37 | "metadata": { 38 | "collapsed": true 39 | }, 40 | "outputs": [], 41 | "source": [ 42 | "from __future__ import print_function" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": {}, 48 | "source": [ 49 | "OpenCV uses Numpy arrays to manipulate image and video data and is a mandatory requirement. So import numpy module first." 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 3, 55 | "metadata": { 56 | "collapsed": true 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "import numpy as np" 61 | ] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "Now print the version. In the script this is done at the end in the Program Outputs block" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": 4, 73 | "metadata": { 74 | "collapsed": false 75 | }, 76 | "outputs": [ 77 | { 78 | "name": "stdout", 79 | "output_type": "stream", 80 | "text": [ 81 | "Numpy Version: 1.12.1\n" 82 | ] 83 | } 84 | ], 85 | "source": [ 86 | "print('Numpy Version:', np.__version__)" 87 | ] 88 | }, 89 | { 90 | "cell_type": "markdown", 91 | "metadata": {}, 92 | "source": [ 93 | "Next import the OpenCV python module" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 5, 99 | "metadata": { 100 | "collapsed": false 101 | }, 102 | "outputs": [], 103 | "source": [ 104 | "import cv2" 105 | ] 106 | }, 107 | { 108 | "cell_type": "markdown", 109 | "metadata": {}, 110 | "source": [ 111 | "Now print the version. In the script this is done at the end in the Program Outputs block" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": 6, 117 | "metadata": { 118 | "collapsed": false 119 | }, 120 | "outputs": [ 121 | { 122 | "name": "stdout", 123 | "output_type": "stream", 124 | "text": [ 125 | "OpenCV Version: 3.2.0\n" 126 | ] 127 | } 128 | ], 129 | "source": [ 130 | "print('OpenCV Version:', cv2.__version__)" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "Import the other required python modules for this script. Matplotlib is used in a number of tutorials found in the OpenCV package and OS and sys are needed to test whether the OpenCV environment variables are properly setup." 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 7, 143 | "metadata": { 144 | "collapsed": true 145 | }, 146 | "outputs": [], 147 | "source": [ 148 | "import matplotlib as mpl\n", 149 | "import os\n", 150 | "import sys" 151 | ] 152 | }, 153 | { 154 | "cell_type": "markdown", 155 | "metadata": {}, 156 | "source": [ 157 | "Now print the versions here. In the script this is done at the end in the Program Outputs block" 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": 8, 163 | "metadata": { 164 | "collapsed": false 165 | }, 166 | "outputs": [ 167 | { 168 | "name": "stdout", 169 | "output_type": "stream", 170 | "text": [ 171 | "Matplotlib Version: 2.0.1\n", 172 | "2.7.13 |Anaconda custom (32-bit)| (default, Dec 19 2016, 13:36:02) [MSC v.1500 32 bit (Intel)]\n" 173 | ] 174 | } 175 | ], 176 | "source": [ 177 | "print('Matplotlib Version:', mpl.__version__)\n", 178 | "print(sys.version)" 179 | ] 180 | }, 181 | { 182 | "cell_type": "markdown", 183 | "metadata": {}, 184 | "source": [ 185 | "Now we check to see if the OpenCV environment variables have been properly set. We need to do this in a safe way to prevent the script from crashing in case no variable has been set. So use standard python exception handling..." 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": 9, 191 | "metadata": { 192 | "collapsed": true 193 | }, 194 | "outputs": [], 195 | "source": [ 196 | "try:\n", 197 | " pyth_path = os.environ['PYTHONPATH'].split(os.pathsep)\n", 198 | "except KeyError:\n", 199 | " pyth_path = []" 200 | ] 201 | }, 202 | { 203 | "cell_type": "markdown", 204 | "metadata": {}, 205 | "source": [ 206 | "Now print the environment variable. In the script this is done at the end in the Program Outputs block." 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": 10, 212 | "metadata": { 213 | "collapsed": false 214 | }, 215 | "outputs": [ 216 | { 217 | "name": "stdout", 218 | "output_type": "stream", 219 | "text": [ 220 | "Python Environment Variable - PYTHONPATH: ['C:\\\\Users\\\\vinay\\\\Anaconda2\\\\Library\\\\bin']\n" 221 | ] 222 | } 223 | ], 224 | "source": [ 225 | "print('Python Environment Variable - PYTHONPATH:', pyth_path)" 226 | ] 227 | }, 228 | { 229 | "cell_type": "markdown", 230 | "metadata": {}, 231 | "source": [ 232 | "Next check the OpenCV environemnt variables" 233 | ] 234 | }, 235 | { 236 | "cell_type": "code", 237 | "execution_count": 18, 238 | "metadata": { 239 | "collapsed": false 240 | }, 241 | "outputs": [], 242 | "source": [ 243 | "try:\n", 244 | " ocv2_path = os.environ['OPENCV_DIR']\n", 245 | "except KeyError:\n", 246 | " ocv2_path = []\n", 247 | " \n", 248 | "try:\n", 249 | " ocv2_vers = os.environ['OPENCV_VER']\n", 250 | "except KeyError:\n", 251 | " ocv2_path = []" 252 | ] 253 | }, 254 | { 255 | "cell_type": "markdown", 256 | "metadata": {}, 257 | "source": [ 258 | "Now print the environment variable. In the script this is done at the end in the Program Outputs block" 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "execution_count": 19, 264 | "metadata": { 265 | "collapsed": false 266 | }, 267 | "outputs": [ 268 | { 269 | "name": "stdout", 270 | "output_type": "stream", 271 | "text": [ 272 | "OpenCV Environment Variable - OPENCV_DIR: C:\\opencv_pre\\\n", 273 | "OpenCV Environment Variable - OPENCV_VER: 320\n" 274 | ] 275 | } 276 | ], 277 | "source": [ 278 | "print('OpenCV Environment Variable - OPENCV_DIR:', ocv2_path)\n", 279 | "print('OpenCV Environment Variable - OPENCV_VER:', ocv2_vers)" 280 | ] 281 | }, 282 | { 283 | "cell_type": "markdown", 284 | "metadata": {}, 285 | "source": [ 286 | "Finally check the FFMPEG environment variable" 287 | ] 288 | }, 289 | { 290 | "cell_type": "code", 291 | "execution_count": 20, 292 | "metadata": { 293 | "collapsed": true 294 | }, 295 | "outputs": [], 296 | "source": [ 297 | "try:\n", 298 | " ffmp_path = os.environ['FFMPEG_BIN']\n", 299 | "except KeyError:\n", 300 | " ffmp_path = []" 301 | ] 302 | }, 303 | { 304 | "cell_type": "markdown", 305 | "metadata": {}, 306 | "source": [ 307 | "Now print the environment variable. In the script this is done at the end in the Program Outputs block" 308 | ] 309 | }, 310 | { 311 | "cell_type": "code", 312 | "execution_count": 21, 313 | "metadata": { 314 | "collapsed": false 315 | }, 316 | "outputs": [ 317 | { 318 | "name": "stdout", 319 | "output_type": "stream", 320 | "text": [ 321 | "FFMPEG Environment Variable - FFMPEG_BIN: C:\\opencv_pre\\build\\bin\n" 322 | ] 323 | } 324 | ], 325 | "source": [ 326 | "print('FFMPEG Environment Variable - FFMPEG_BIN:', ffmp_path)" 327 | ] 328 | }, 329 | { 330 | "cell_type": "markdown", 331 | "metadata": {}, 332 | "source": [ 333 | "If you did not see any errors, you are to be congratulated on setting up your OpenCV environment correctly." 334 | ] 335 | }, 336 | { 337 | "cell_type": "markdown", 338 | "metadata": {}, 339 | "source": [ 340 | "### Congratulations!" 341 | ] 342 | } 343 | ], 344 | "metadata": { 345 | "anaconda-cloud": {}, 346 | "kernelspec": { 347 | "display_name": "Python 2", 348 | "language": "python", 349 | "name": "python2" 350 | }, 351 | "language_info": { 352 | "codemirror_mode": { 353 | "name": "ipython", 354 | "version": 2 355 | }, 356 | "file_extension": ".py", 357 | "mimetype": "text/x-python", 358 | "name": "python", 359 | "nbconvert_exporter": "python", 360 | "pygments_lexer": "ipython2", 361 | "version": "2.7.13" 362 | } 363 | }, 364 | "nbformat": 4, 365 | "nbformat_minor": 1 366 | } 367 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_01_info/README.md: -------------------------------------------------------------------------------- 1 | # OpenCV Tutorial Sample_01: ocv_info 2 | 3 | [Sample 01](ocv_info.py) is a simple diagnostic program that queries the development environment and ensures that all the prerequisites have been met and displays the version information. It also checks to see if Environment variables have been set and displays the path for diagnostics if necessary. 4 | 5 | >Note: If you are using pre-built OpenCV, you need to use Python\* 2.7.x to run the samples. If you have built OpenCV from source, then you can use either Python 3.x or Python 2.x. 6 | 7 | ## Usage: 8 | Launch the interactive tutorial by typing the following command in your comand window: 9 | 10 | ``` 11 | jupyter notebook ./ocv_info.ipynb 12 | ``` 13 | OR 14 | 15 | You may run the script using the command: 16 | 17 | ``` 18 | python ./ocv_info.py 19 | ``` 20 | ## Code Walkthrough: 21 | 22 | First the Standard Python Shebang 23 | 24 | ``` 25 | #!/usr/bin/env python2 26 | ``` 27 | Next import print_function for Python 2/3 comptibility. This allows use of print like a function in Python 2.x 28 | 29 | ``` 30 | from __future__ import print_function 31 | ``` 32 | 33 | OpenCV uses Numpy arrays to manipulate image and video data and is a mandatory requirement. So import numpy module first. 34 | 35 | ``` 36 | import numpy as np 37 | ``` 38 | 39 | Now print the version. In the script this is done at the end in the Program Outputs block 40 | 41 | ``` 42 | print('Numpy Version:', np.__version__) 43 | ``` 44 | **Numpy Version: 1.12.1** 45 | 46 | Next import the OpenCV python module 47 | 48 | ``` 49 | import cv2 50 | ``` 51 | 52 | Now print the version. In the script this is done at the end in the Program Outputs block 53 | 54 | ``` 55 | print('OpenCV Version:', cv2.__version__) 56 | ``` 57 | **OpenCV Version: 3.2.0** 58 | 59 | Import the other required python modules for this script. Matplotlib is used in a number of tutorials found in the OpenCV package and OS and sys are needed to test whether the OpenCV environment variables are properly setup. 60 | 61 | ``` 62 | import matplotlib as mpl 63 | import os 64 | import sys 65 | ``` 66 | 67 | Now print the versions here. In the script this is done at the end in the Program Outputs block 68 | 69 | ``` 70 | print('Matplotlib Version:', mpl.__version__) 71 | print(sys.version) 72 | ``` 73 | **Matplotlib Version: 2.0.12.7.13 |Anaconda custom (32-bit)| (default, Dec 19 2016, 13:36:02) [MSC v.1500 32 bit (Intel)]** 74 | 75 | Now we check to see if the OpenCV environment variables have been properly set. We need to do this in a safe way to prevent the script from crashing in case no variable has been set. So use standard python exception handling... 76 | 77 | ``` 78 | try: 79 | pyth_path = os.environ['PYTHONPATH'].split(os.pathsep) 80 | except KeyError: 81 | pyth_path = [] 82 | ``` 83 | 84 | Now print the environment variable. In the script this is done at the end in the Program Outputs block. 85 | 86 | ``` 87 | print('Python Environment Variable - PYTHONPATH:', pyth_path) 88 | ``` 89 | **Python Environment Variable - PYTHONPATH: ['C:\\Users\\vinay\\Anaconda2\\Library\\bin']** 90 | 91 | Next check the OpenCV environemnt variables 92 | 93 | ``` 94 | try: 95 | ocv2_path = os.environ['OPENCV_DIR'] 96 | except KeyError: 97 | ocv2_path = [] 98 | 99 | try: 100 | ocv2_vers = os.environ['OPENCV_VER'] 101 | except KeyError: 102 | ocv2_path = [] 103 | ``` 104 | 105 | Now print the environment variable. In the script this is done at the end in the Program Outputs block 106 | 107 | ``` 108 | print('OpenCV Environment Variable - OPENCV_DIR:', ocv2_path) 109 | print('OpenCV Environment Variable - OPENCV_VER:', ocv2_vers) 110 | OpenCV Environment Variable - OPENCV_DIR: C:\opencv_pre\ 111 | OpenCV Environment Variable - OPENCV_VER: 320 112 | ``` 113 | 114 | Finally check the FFMPEG environment variable 115 | 116 | ``` 117 | try: 118 | ffmp_path = os.environ['FFMPEG_BIN'] 119 | except KeyError: 120 | ffmp_path = [] 121 | ``` 122 | 123 | Now print the environment variable. In the script this is done at the end in the Program Outputs block 124 | 125 | ``` 126 | print('FFMPEG Environment Variable - FFMPEG_BIN:', ffmp_path) 127 | ``` 128 | 129 | **FFMPEG Environment Variable - FFMPEG_BIN: C:\opencv_pre\build\bin** 130 | 131 | If you did not see any errors, you are to be congratulated on setting up your OpenCV environment correctly. 132 | 133 | **Congratulations!** 134 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_01_info/ocv_info.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample_01: ocv_info\n", 8 | "\n", 9 | "[Sample 01](ocv_info.py) is a simple diagnostic program that queries the development environment and ensures that all the prerequisites have been met and displays the version information. It also checks to see if Environment variables have been set and displays the path for diagnostics if necessary.\n", 10 | "\n", 11 | ">*Note:* If you are using pre-built OpenCV, you need to use Python 2.7.x to run the samples. If you have built OpenCV from source, then you can use either Python 3.x or Python 2.x.\n", 12 | "\n", 13 | "First the Standard Python Shebang" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 1, 19 | "metadata": { 20 | "collapsed": true 21 | }, 22 | "outputs": [], 23 | "source": [ 24 | "#!/usr/bin/env python2" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "Next import print_function for Python 2/3 comptibility. This allows use of print like a function in Python 2.x" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 2, 37 | "metadata": { 38 | "collapsed": true 39 | }, 40 | "outputs": [], 41 | "source": [ 42 | "from __future__ import print_function" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": {}, 48 | "source": [ 49 | "OpenCV uses Numpy arrays to manipulate image and video data and is a mandatory requirement. So import numpy module first." 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 3, 55 | "metadata": { 56 | "collapsed": true 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "import numpy as np" 61 | ] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "Now print the version. In the script this is done at the end in the Program Outputs block" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": 4, 73 | "metadata": { 74 | "collapsed": false 75 | }, 76 | "outputs": [ 77 | { 78 | "name": "stdout", 79 | "output_type": "stream", 80 | "text": [ 81 | "Numpy Version: 1.12.1\n" 82 | ] 83 | } 84 | ], 85 | "source": [ 86 | "print('Numpy Version:', np.__version__)" 87 | ] 88 | }, 89 | { 90 | "cell_type": "markdown", 91 | "metadata": {}, 92 | "source": [ 93 | "Next import the OpenCV python module" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 5, 99 | "metadata": { 100 | "collapsed": false 101 | }, 102 | "outputs": [], 103 | "source": [ 104 | "import cv2" 105 | ] 106 | }, 107 | { 108 | "cell_type": "markdown", 109 | "metadata": {}, 110 | "source": [ 111 | "Now print the version. In the script this is done at the end in the Program Outputs block" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": 6, 117 | "metadata": { 118 | "collapsed": false 119 | }, 120 | "outputs": [ 121 | { 122 | "name": "stdout", 123 | "output_type": "stream", 124 | "text": [ 125 | "OpenCV Version: 3.2.0\n" 126 | ] 127 | } 128 | ], 129 | "source": [ 130 | "print('OpenCV Version:', cv2.__version__)" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "Import the other required python modules for this script. Matplotlib is used in a number of tutorials found in the OpenCV package and OS and sys are needed to test whether the OpenCV environment variables are properly setup." 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 7, 143 | "metadata": { 144 | "collapsed": true 145 | }, 146 | "outputs": [], 147 | "source": [ 148 | "import matplotlib as mpl\n", 149 | "import os\n", 150 | "import sys" 151 | ] 152 | }, 153 | { 154 | "cell_type": "markdown", 155 | "metadata": {}, 156 | "source": [ 157 | "Now print the versions here. In the script this is done at the end in the Program Outputs block" 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": 8, 163 | "metadata": { 164 | "collapsed": false 165 | }, 166 | "outputs": [ 167 | { 168 | "name": "stdout", 169 | "output_type": "stream", 170 | "text": [ 171 | "Matplotlib Version: 2.0.1\n", 172 | "2.7.13 |Anaconda custom (32-bit)| (default, Dec 19 2016, 13:36:02) [MSC v.1500 32 bit (Intel)]\n" 173 | ] 174 | } 175 | ], 176 | "source": [ 177 | "print('Matplotlib Version:', mpl.__version__)\n", 178 | "print(sys.version)" 179 | ] 180 | }, 181 | { 182 | "cell_type": "markdown", 183 | "metadata": {}, 184 | "source": [ 185 | "Now we check to see if the OpenCV environment variables have been properly set. We need to do this in a safe way to prevent the script from crashing in case no variable has been set. So use standard python exception handling..." 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": 9, 191 | "metadata": { 192 | "collapsed": true 193 | }, 194 | "outputs": [], 195 | "source": [ 196 | "try:\n", 197 | " pyth_path = os.environ['PYTHONPATH'].split(os.pathsep)\n", 198 | "except KeyError:\n", 199 | " pyth_path = []" 200 | ] 201 | }, 202 | { 203 | "cell_type": "markdown", 204 | "metadata": {}, 205 | "source": [ 206 | "Now print the environment variable. In the script this is done at the end in the Program Outputs block." 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": 10, 212 | "metadata": { 213 | "collapsed": false 214 | }, 215 | "outputs": [ 216 | { 217 | "name": "stdout", 218 | "output_type": "stream", 219 | "text": [ 220 | "Python Environment Variable - PYTHONPATH: ['C:\\\\Users\\\\vinay\\\\Anaconda2\\\\Library\\\\bin']\n" 221 | ] 222 | } 223 | ], 224 | "source": [ 225 | "print('Python Environment Variable - PYTHONPATH:', pyth_path)" 226 | ] 227 | }, 228 | { 229 | "cell_type": "markdown", 230 | "metadata": {}, 231 | "source": [ 232 | "Next check the OpenCV environemnt variables" 233 | ] 234 | }, 235 | { 236 | "cell_type": "code", 237 | "execution_count": 18, 238 | "metadata": { 239 | "collapsed": false 240 | }, 241 | "outputs": [], 242 | "source": [ 243 | "try:\n", 244 | " ocv2_path = os.environ['OPENCV_DIR']\n", 245 | "except KeyError:\n", 246 | " ocv2_path = []\n", 247 | " \n", 248 | "try:\n", 249 | " ocv2_vers = os.environ['OPENCV_VER']\n", 250 | "except KeyError:\n", 251 | " ocv2_path = []" 252 | ] 253 | }, 254 | { 255 | "cell_type": "markdown", 256 | "metadata": {}, 257 | "source": [ 258 | "Now print the environment variable. In the script this is done at the end in the Program Outputs block" 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "execution_count": 19, 264 | "metadata": { 265 | "collapsed": false 266 | }, 267 | "outputs": [ 268 | { 269 | "name": "stdout", 270 | "output_type": "stream", 271 | "text": [ 272 | "OpenCV Environment Variable - OPENCV_DIR: C:\\opencv_pre\\\n", 273 | "OpenCV Environment Variable - OPENCV_VER: 320\n" 274 | ] 275 | } 276 | ], 277 | "source": [ 278 | "print('OpenCV Environment Variable - OPENCV_DIR:', ocv2_path)\n", 279 | "print('OpenCV Environment Variable - OPENCV_VER:', ocv2_vers)" 280 | ] 281 | }, 282 | { 283 | "cell_type": "markdown", 284 | "metadata": {}, 285 | "source": [ 286 | "Finally check the FFMPEG environment variable" 287 | ] 288 | }, 289 | { 290 | "cell_type": "code", 291 | "execution_count": 20, 292 | "metadata": { 293 | "collapsed": true 294 | }, 295 | "outputs": [], 296 | "source": [ 297 | "try:\n", 298 | " ffmp_path = os.environ['FFMPEG_BIN']\n", 299 | "except KeyError:\n", 300 | " ffmp_path = []" 301 | ] 302 | }, 303 | { 304 | "cell_type": "markdown", 305 | "metadata": {}, 306 | "source": [ 307 | "Now print the environment variable. In the script this is done at the end in the Program Outputs block" 308 | ] 309 | }, 310 | { 311 | "cell_type": "code", 312 | "execution_count": 21, 313 | "metadata": { 314 | "collapsed": false 315 | }, 316 | "outputs": [ 317 | { 318 | "name": "stdout", 319 | "output_type": "stream", 320 | "text": [ 321 | "FFMPEG Environment Variable - FFMPEG_BIN: C:\\opencv_pre\\build\\bin\n" 322 | ] 323 | } 324 | ], 325 | "source": [ 326 | "print('FFMPEG Environment Variable - FFMPEG_BIN:', ffmp_path)" 327 | ] 328 | }, 329 | { 330 | "cell_type": "markdown", 331 | "metadata": {}, 332 | "source": [ 333 | "If you did not see any errors, you are to be congratulated on setting up your OpenCV environment correctly." 334 | ] 335 | }, 336 | { 337 | "cell_type": "markdown", 338 | "metadata": {}, 339 | "source": [ 340 | "### Congratulations!" 341 | ] 342 | } 343 | ], 344 | "metadata": { 345 | "anaconda-cloud": {}, 346 | "kernelspec": { 347 | "display_name": "Python 2", 348 | "language": "python", 349 | "name": "python2" 350 | }, 351 | "language_info": { 352 | "codemirror_mode": { 353 | "name": "ipython", 354 | "version": 2 355 | }, 356 | "file_extension": ".py", 357 | "mimetype": "text/x-python", 358 | "name": "python", 359 | "nbconvert_exporter": "python", 360 | "pygments_lexer": "ipython2", 361 | "version": "2.7.13" 362 | } 363 | }, 364 | "nbformat": 4, 365 | "nbformat_minor": 1 366 | } 367 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_01_info/ocv_info.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Python 2/3 compatibility 4 | from __future__ import print_function 5 | # Allows use of print like a function in Python 2.x 6 | 7 | # Import Python modules 8 | import numpy as np 9 | import cv2 10 | import matplotlib as mpl 11 | import os 12 | import sys 13 | 14 | # Safe way to check if Environmental Variable is present 15 | try: 16 | pyth_path = os.environ['PYTHONPATH'].split(os.pathsep) 17 | except KeyError: 18 | pyth_path = [] 19 | 20 | try: 21 | ocv2_path = os.environ['OPENCV_DIR'] 22 | except KeyError: 23 | ocv2_path = [] 24 | 25 | try: 26 | ffmp_path = os.environ['FFMPEG_BIN'] 27 | except KeyError: 28 | ffmp_path = [] 29 | 30 | # Program Outputs 31 | print() 32 | print('OpenCV Environment Information Sample') 33 | print() 34 | print('Please ensure that the following Environment Variables are set correctly before proceeding!') 35 | print() 36 | print('Python Environment Variable - PYTHONPATH:', pyth_path) 37 | print('OpenCV Environment Variable - OPENCV_DIR:', ocv2_path) 38 | print('FFMPEG Environment Variable - FFMPEG_BIN:', ffmp_path) 39 | print() 40 | print('Version Information:') 41 | print() 42 | print(sys.version) 43 | print('OpenCV Version:', cv2.__version__) 44 | print('Numpy Version:', np.__version__) 45 | print('Matplotlib Version:', mpl.__version__) 46 | print() -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_02_build_info/README.md: -------------------------------------------------------------------------------- 1 | # OpenCV Tutorial Sample 2: ocv_build_info 2 | 3 | [Sample 02](sample_01/ocv_build_info.py) is a simple diagnostic program that displays the detailed OpenCV build information. 4 | 5 | ## Usage: 6 | Launch the interactive tutorial by typing the following command in your comand window: 7 | 8 | ``` 9 | jupyter notebook ./ocv_build_info.ipynb 10 | ``` 11 | OR 12 | 13 | You may run the script using the command: 14 | 15 | ``` 16 | python ./ocv_build_info.py 17 | ``` 18 | ## Code Walkthrough: 19 | 20 | First the standard Python\* shebang 21 | 22 | ``` 23 | #!/usr/bin/env python2 24 | ``` 25 | Next import print_function for Python 2/3 compatibility. This allows use of print like a function in Python 2.x 26 | 27 | ``` 28 | from __future__ import print_function 29 | ``` 30 | Import the OpenCV Python module 31 | 32 | ``` 33 | import cv2 34 | ``` 35 | Now obtain and print OpenCV Build Configuration. The function getBuildInformation() returns the full configuration time cmake output 36 | 37 | ``` 38 | try: 39 | buildinfo = cv2.getBuildInformation() 40 | print(buildinfo) 41 | 42 | except cv2.error as e: 43 | print('Error:') 44 | ``` 45 | 46 | **Program Output:** 47 | 48 | ``` 49 | General configuration for OpenCV 3.2.0 ===================================== 50 | Version control: 3.2.0 51 | 52 | Platform: 53 | Timestamp: 2016-12-23T14:50:36Z 54 | Host: Windows 10.0.14393 AMD64 55 | CMake: 3.7.0 56 | CMake generator: Visual Studio 14 2015 57 | CMake build tool: C:/Program Files (x86)/MSBuild/14.0/bin/MSBuild.exe 58 | MSVC: 1900 59 | 60 | C/C++: 61 | Built as dynamic libs?: NO 62 | C++ Compiler: C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/cl.exe (ver 19.0.24215.1) 63 | C++ flags (Release): /DWIN32 /D_WINDOWS /W4 /GR /EHa /D _CRT_SECURE_NO_DEPRECATE /D _CRT_NONSTDC_NO_DEPRECATE /D _SCL_SECURE_NO_WARNINGS /Gy /bigobj /arch:SSE2 /Oi /fp:fast /wd4251 /wd4324 /wd4275 /wd4589 /MP8 /MT /O2 /Ob2 /DNDEBUG /Zi 64 | C++ flags (Debug): /DWIN32 /D_WINDOWS /W4 /GR /EHa /D _CRT_SECURE_NO_DEPRECATE /D _CRT_NONSTDC_NO_DEPRECATE /D _SCL_SECURE_NO_WARNINGS /Gy /bigobj /arch:SSE2 /Oi /fp:fast /wd4251 /wd4324 /wd4275 /wd4589 /MP8 /D_DEBUG /MTd /Zi /Ob0 /Od /RTC1 65 | C Compiler: C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/cl.exe 66 | C flags (Release): /DWIN32 /D_WINDOWS /W3 /D _CRT_SECURE_NO_DEPRECATE /D _CRT_NONSTDC_NO_DEPRECATE /D _SCL_SECURE_NO_WARNINGS /Gy /bigobj /arch:SSE2 /Oi /fp:fast /MP8 /MT /O2 /Ob2 /DNDEBUG /Zi 67 | C flags (Debug): /DWIN32 /D_WINDOWS /W3 /D _CRT_SECURE_NO_DEPRECATE /D _CRT_NONSTDC_NO_DEPRECATE /D _SCL_SECURE_NO_WARNINGS /Gy /bigobj /arch:SSE2 /Oi /fp:fast /MP8 /D_DEBUG /MTd /Zi /Ob0 /Od /RTC1 68 | Linker flags (Release): /machine:X86 /NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:msvcrt.lib /NODEFAULTLIB:msvcrtd.lib /INCREMENTAL:NO /debug /NODEFAULTLIB:libcmtd.lib 69 | Linker flags (Debug): /machine:X86 /NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:msvcrt.lib /NODEFAULTLIB:msvcrtd.lib /debug /INCREMENTAL /NODEFAULTLIB:libcmt.lib 70 | ccache: NO 71 | Precompiled headers: YES 72 | Extra dependencies: comctl32 gdi32 ole32 setupapi ws2_32 vfw32 73 | 3rdparty dependencies: zlib libjpeg libwebp libpng libtiff libjasper IlmImf ippicv 74 | 75 | OpenCV modules: 76 | To be built: core flann imgproc ml photo video imgcodecs shape videoio highgui objdetect superres features2d calib3d java stitching videostab python2 77 | Disabled: python3 world 78 | Disabled by dependency: - 79 | Unavailable: cudaarithm cudabgsegm cudacodec cudafeatures2d cudafilters cudaimgproc cudalegacy cudaobjdetect cudaoptflow cudastereo cudawarping cudev ts viz 80 | 81 | Windows RT support: NO 82 | 83 | GUI: 84 | QT: NO 85 | Win32 UI: YES 86 | OpenGL support: NO 87 | VTK support: NO 88 | 89 | Media I/O: 90 | ZLib: build (ver 1.2.8) 91 | JPEG: build (ver 90) 92 | WEBP: build (ver 0.3.1) 93 | PNG: build (ver 1.6.24) 94 | TIFF: build (ver 42 - 4.0.2) 95 | JPEG 2000: build (ver 1.900.1) 96 | OpenEXR: build (ver 1.7.1) 97 | GDAL: NO 98 | GDCM: NO 99 | 100 | Video I/O: 101 | Video for Windows: YES 102 | DC1394 1.x: NO 103 | DC1394 2.x: NO 104 | FFMPEG: YES (prebuilt binaries) 105 | avcodec: YES (ver 57.64.101) 106 | avformat: YES (ver 57.56.100) 107 | avutil: YES (ver 55.34.100) 108 | swscale: YES (ver 4.2.100) 109 | avresample: YES (ver 3.1.0) 110 | GStreamer: NO 111 | OpenNI: NO 112 | OpenNI PrimeSensor Modules: NO 113 | OpenNI2: NO 114 | PvAPI: NO 115 | GigEVisionSDK: NO 116 | DirectShow: YES 117 | Media Foundation: NO 118 | XIMEA: NO 119 | Intel PerC: NO 120 | 121 | Parallel framework: Concurrency 122 | 123 | Other third-party libraries: 124 | Use IPP: 9.0.1 [9.0.1] 125 | at: C:/build/master_winpack-bindings-win32-vc14-static/build/3rdparty/ippicv/ippicv_win 126 | Use IPP Async: NO 127 | Use Lapack: NO 128 | Use Eigen: NO 129 | Use Cuda: NO 130 | Use OpenCL: YES 131 | Use OpenVX: NO 132 | Use custom HAL: NO 133 | 134 | OpenCL: 135 | Include path: C:/build/master_winpack-bindings-win32-vc14-static/opencv/3rdparty/include/opencl/1.2 136 | Use AMDFFT: NO 137 | Use AMDBLAS: NO 138 | 139 | Python 2: 140 | Interpreter: C:/utils/soft/python27-x86/python.exe (ver 2.7.12) 141 | Libraries: C:/utils/soft/python27-x86/Libs/python27.lib (ver 2.7.12) 142 | numpy: C:/utils/soft/python27-x86/lib/site-packages/numpy/core/include (ver 1.11.2) 143 | packages path: C:/utils/soft/python27-x86/Lib/site-packages 144 | 145 | Python 3: 146 | Interpreter: C:/utils/soft/python35-x86/python.exe (ver 3.5.2) 147 | 148 | Python (for build): C:/utils/soft/python27-x86/python.exe 149 | 150 | Java: 151 | ant: C:/utils/soft/apache-ant-1.9.7/bin/ant.bat (ver 1.9.7) 152 | JNI: C:/Program Files (x86)/Java/jdk1.8.0_112/include C:/Program Files (x86)/Java/jdk1.8.0_112/include/win32 C:/Program Files (x86)/Java/jdk1.8.0_112/include 153 | Java wrappers: YES 154 | Java tests: NO 155 | 156 | Matlab: Matlab not found or implicitly disabled 157 | 158 | Tests and samples: 159 | Tests: NO 160 | Performance tests: NO 161 | C/C++ Examples: NO 162 | 163 | Install path: C:/build/master_winpack-bindings-win32-vc14-static/install 164 | 165 | cvconfig.h is in: C:/build/master_winpack-bindings-win32-vc14-static/build 166 | ----------------------------------------------------------------- 167 | ``` 168 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_02_build_info/ocv_build_info.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Python 2/3 compatibility 4 | from __future__ import print_function 5 | # Allows use of print like a function in Python 2.x 6 | 7 | # Import OpenCV Python module 8 | import cv2 9 | 10 | # This function returns the full configuration time cmake output 11 | try: 12 | buildinfo = cv2.getBuildInformation() 13 | print(buildinfo) 14 | 15 | except cv2.error as e: 16 | print('Error:') 17 | 18 | 19 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_03_image_test/README.md: -------------------------------------------------------------------------------- 1 | # OpenCV Tutorial Sample 3: ocv_image_test 2 | [Sample 03](sample_03/ocv_image_test.py) is a sanity test that uses OpenCV to display an Image file. This test serves to ensure that OpenCV installation is working and validates the development environment. It also shows how to overlay text on an image. 3 | 4 | ## Usage: 5 | 6 | Launch the interactive tutorial by typing the following command in your comand window: 7 | 8 | ``` 9 | jupyter notebook ./ocv_image_test.ipynb 10 | ``` 11 | OR 12 | 13 | You may run the script using the command: 14 | 15 | ``` 16 | python ./ocv_image_test.py 17 | ``` 18 | ## Code Walkthrough: 19 | 20 | First the Standard Python\* Shebang 21 | 22 | ``` 23 | #!/usr/bin/env python2 24 | ``` 25 | Next import print_function for Python 2/3 comptibility. This allows use of print like a function in Python 2.x 26 | ``` 27 | from __future__ import print_function 28 | ``` 29 | 30 | Import Numpy and OpenCV Python modules 31 | ``` 32 | import numpy as np 33 | import cv2 34 | ``` 35 | 36 | Load an image from file using the OpenCV cv2.imread() function. 37 | ``` 38 | cv2.imread(filename[, flags]) -> retval 39 | ``` 40 | _**Parameters:**_ 41 | 42 | **filename – Name and path of file to be loaded.** 43 | 44 | **flags:** 45 | 46 | Flags specify how the image should be read: 47 | ``` 48 | 1 = cv2.IMREAD_COLOR - The Default flag, loads a color image. Any transparency data of image will be ignored. 49 | 0 = cv2.IMREAD_GRAYSCALE : Loads image in grayscale mode 50 | -1 = cv2.IMREAD_UNCHANGED : Loads image without any modifications including alpha channel**_ 51 | ``` 52 | Here we use the function to open a JPG file called intel-hq.jpg present in the sample_03 folder. 53 | ``` 54 | img = cv2.imread('intel-hq.jpg',1) 55 | ``` 56 | _The color information for images loaded by OpenCV are in reverse order (BGR) and are stored as a numpy array._ 57 | 58 | >Note: The following section between the lines, is only available in the interactive tutorial and not in the script. 59 | ---------------------------------------------------------------------------------------------------------------------------------------- 60 | To see how img is stored, see it's type. It is a Numpy ndarray. 61 | 62 | ``` 63 | print(type(img)) 64 | ``` 65 | 66 | **** 67 | 68 | Now print the array 69 | 70 | ``` 71 | print(img) 72 | [[[147 95 54] 73 | [140 90 44] 74 | [156 108 60] 75 | ..., 76 | [165 132 93] 77 | [161 127 91] 78 | [153 119 83]] 79 | 80 | [[142 93 45] 81 | [142 94 46] 82 | [139 96 47] 83 | ..., 84 | [160 126 90] 85 | [156 122 86] 86 | [148 114 78]] 87 | 88 | [[136 91 40] 89 | [136 96 44] 90 | [116 80 34] 91 | ..., 92 | [153 119 83] 93 | [148 113 79] 94 | [141 106 72]] 95 | 96 | ..., 97 | [[180 177 192] 98 | [178 175 190] 99 | [177 174 189] 100 | ..., 101 | [199 198 214] 102 | [200 199 215] 103 | [199 197 216]] 104 | 105 | [[179 176 191] 106 | [180 177 192] 107 | [179 176 191] 108 | ..., 109 | [197 196 212] 110 | [198 196 215] 111 | [200 198 217]] 112 | 113 | [[178 175 190] 114 | [180 177 192] 115 | [181 178 193] 116 | ..., 117 | [194 192 211] 118 | [195 193 212] 119 | [199 197 216]]] 120 | ``` 121 | You can also display this image inline with the interactive tutorial using matplotlib which processes images in RGB format to see that OpenCV loads images in BGR format 122 | 123 | ``` 124 | from matplotlib import pyplot as plt 125 | plt.imshow(img) 126 | plt.show() 127 | ``` 128 | This produces the image shown below: 129 | 130 | ![Image displayed in BGR format](https://github.com/vraoresearch/Intel-Digital-Signage-Reference/blob/master/tutorials/opencv/Python/sample_03/matplotlib_unconverted.png) 131 | 132 | After conversion to RGB, the image properly displays. Here we use an OpenCV cv2.cvtColor() function to convert colors... 133 | 134 | ``` 135 | img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 136 | plt.imshow(img2) 137 | plt.show() 138 | ``` 139 | This produces the converted image shown below: 140 | 141 | ![Image displayed in BGR format](https://github.com/vraoresearch/Intel-Digital-Signage-Reference/blob/master/tutorials/opencv/Python/sample_03/matplotlib_converted.png) 142 | 143 | >Note: From this point on, we return to the script ... 144 | ---------------------------------------------------------------------------------------------------------------------------------------- 145 | 146 | Next we overlay text on top of the image using the OpenCV cv2.putText() function. 147 | ``` 148 | font = cv2.FONT_HERSHEY_SIMPLEX 149 | cv2.putText(img, "OpenCV Version:",(10,100), font, 2,(255,255,255),2,cv2.LINE_AA) 150 | cv2.putText(img, cv2.__version__,(525,100), font, 2,(255,255,255),2,cv2.LINE_AA) 151 | ``` 152 | Now we create a GUI window to display the loaded image. We do this using the **cv2.namedWindow()** function. It is not necessary to use this function to display the image but using this function allows for better control over GUI windows. 153 | 154 | >Note: Sometimes Jupyter Notebook\* based interactive tutorials may not open a GUI window when run locally. In such cases, use the function **cv2.startWindowThread()** as shown below, before calling namedWindow function. 155 | 156 | ``` 157 | cv2.startWindowThread() 158 | cv2.namedWindow('image', cv2.WINDOW_AUTOSIZE) 159 | ``` 160 | An empty window will open until an image is displayed. So lets create a GUI window to display the image 161 | 162 | ``` 163 | cv2.namedWindow('Sample_03: Displaying an Image', cv2.WINDOW_AUTOSIZE) 164 | ``` 165 | 166 | Now we display the previously loaded image with the overlay text inside this window using the OpenCV cv2.imshow() function. 167 | Note: Since we are using an OpenCV function to display the image, there is no need for conversion from BGR to RGB. The image will not load until the event handler is called. 168 | 169 | ``` 170 | cv2.imshow('Sample_03: Displaying an Image',img) 171 | ``` 172 | 173 | _The cv2.imshow() function should be followed by the **cv2.waitKey()** which displays the image for the specified number of milliseconds. This function is also an event handler and can be bound to the keyboard as seen in our example. 174 | 175 | ``` 176 | # Exit on any keystroke 177 | if cv2.waitKey(0): 178 | print('Exiting ...') 179 | Exiting ... 180 | ``` 181 | 182 | Now we perform a cleanup and release all resources used using the function cv2.destroyAllWindows() 183 | 184 | ``` 185 | # Release resources used 186 | cv2.destroyAllWindows() 187 | ``` 188 | Putting it all together with Exception handling: 189 | ``` 190 | try: 191 | # Load the image from file using OpenCV. The 1 means with color info 192 | example = cv2.imread('intel-hq.jpg',1) 193 | # Overlay text on the image using OpenCV 194 | font = cv2.FONT_HERSHEY_SIMPLEX 195 | cv2.putText(example, "OpenCV Version:",(10,100), font, 2,(255,255,255),2,cv2.LINE_AA) 196 | cv2.putText(example, cv2.__version__,(525,100), font, 2,(255,255,255),2,cv2.LINE_AA) 197 | 198 | # Create a GUI window to display the image 199 | cv2.namedWindow('Image display', cv2.WINDOW_AUTOSIZE) 200 | 201 | # Display the Image 202 | cv2.imshow('Image display',example) 203 | # Exit on any keystroke 204 | if cv2.waitKey(0): 205 | print('Exiting ...') 206 | 207 | # Release resources used 208 | cv2.destroyAllWindows() 209 | 210 | except cv2.error as e: 211 | print('Error:') 212 | ``` 213 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_03_image_test/intel-hq.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/Video-Analytics-OpenCV/7a89c3d4c8bd105a48ab701deda656343e770f4e/tutorials/opencv/Python/sample_03_image_test/intel-hq.jpg -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_03_image_test/matplotlib_converted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/Video-Analytics-OpenCV/7a89c3d4c8bd105a48ab701deda656343e770f4e/tutorials/opencv/Python/sample_03_image_test/matplotlib_converted.png -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_03_image_test/matplotlib_unconverted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/Video-Analytics-OpenCV/7a89c3d4c8bd105a48ab701deda656343e770f4e/tutorials/opencv/Python/sample_03_image_test/matplotlib_unconverted.png -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_03_image_test/ocv_image_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Python 2/3 compatibility 4 | from __future__ import print_function 5 | # Allows use of print like a function in Python 2.x 6 | 7 | # Import OpenCV and other needed Python modules 8 | import numpy as np 9 | import cv2 10 | 11 | try: 12 | # Load the image from file using OpenCV. The 1 means with color info 13 | img = cv2.imread('intel-hq.jpg',1) 14 | # Overlay text on the image using OpenCV 15 | font = cv2.FONT_HERSHEY_SIMPLEX 16 | cv2.putText(img, "OpenCV Version:",(10,100), font, 2,(255,255,255),2,cv2.LINE_AA) 17 | cv2.putText(img, cv2.__version__,(525,100), font, 2,(255,255,255),2,cv2.LINE_AA) 18 | 19 | # Create a GUI window to display the image 20 | cv2.namedWindow('image', cv2.WINDOW_AUTOSIZE) 21 | 22 | # Display the Image 23 | cv2.imshow('image',img) 24 | # Exit on any keystroke 25 | if cv2.waitKey(0): 26 | print('Exiting ...') 27 | 28 | # Release resources used 29 | cv2.destroyAllWindows() 30 | 31 | except cv2.error as e: 32 | print('Error:') 33 | 34 | 35 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_04_video_test/README.md: -------------------------------------------------------------------------------- 1 | # OpenCV Tutorial Sample 4: ocv_video_test 2 | [Sample 04](ocv_video_test.py) is a sanity test that uses OpenCV to connect to a WebCam and display the video stream. This test serves to ensure that OpenCV WebCam installation is working and further validates the development environment. It also shows how to overlay text on video streams. 3 | 4 | ## Usage: 5 | Launch the interactive tutorial by typing the following command in your comand window: 6 | 7 | ``` 8 | jupyter notebook ./ocv_video_test.ipynb 9 | ``` 10 | OR 11 | 12 | You may run the script using the command: 13 | 14 | ``` 15 | python ./ocv_video_test.py 16 | ``` 17 | ## Code Walkthrough: 18 | We start by performing the basic initializations 19 | 20 | ``` 21 | #!/usr/bin/env python2 22 | 23 | # Python 2/3 compatibility 24 | from __future__ import print_function 25 | # Allows use of print like a function in Python 2.x 26 | 27 | # Import OpenCV and Numpy modules 28 | import numpy as np 29 | import cv2 30 | ``` 31 | First we need to initialize a Video Web Camera for capturing video with OpenCV. We do this transparently by using an OpenCV API 32 | **cv2.VideoCapture()** 33 | 34 | **cv2.VideoCapture(Parameters)** 35 | 36 | **Parameters:** 37 | 38 | **filename – Name and path of file to be loaded.** 39 | 40 | **device_id - Id of the opened video capturing device (i.e. a camera index)** 41 | 42 | >Note about Device Id: The default camera is 0 (usually built-in).The second camera would be 1 and so on 43 | 44 | >On the Intel® NUC which has no camera, the default Id of "0" should work. On a Laptop, you may need to try "0" or "1" if you have two cameras for front and back. 45 | 46 | ``` 47 | webcam = cv2.VideoCapture(0) 48 | ``` 49 | **cv2.videoCapture()** method has many calls and **isOpened()** returns **(True)** if the device is opened sucessfully 50 | 51 | So we can check if Camera was initialized correctly 52 | 53 | ``` 54 | success = webcam.isOpened() 55 | if success == False: 56 | print('Error: Camera could not be opened') 57 | else: 58 | print('Success: Grabbed the camera') 59 | Success: Grabbed the camera 60 | ``` 61 | 62 | Next we use the read() function from cv2.VideoCapure to read a video frame while this is (True) 63 | 64 | To Read each frame in video stream: 65 | 66 | ``` 67 | ret, frame = webcam.read() 68 | ``` 69 | 70 | Once the frame is read, it is usually converted to grayscale before performing further operations. This avoids having to process color information in real-time. For this we use the same **cv2.cvtColor()** method from our previous example with just a different color space conversion code. 71 | 72 | ``` 73 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 74 | ``` 75 | 76 | Overlay Text on the video frame with Exit instructions 77 | 78 | ``` 79 | font = cv2.FONT_HERSHEY_SIMPLEX 80 | cv2.putText(gray, "Type q to Quit:",(50,50), font, 1,(0,0,0),2,cv2.LINE_AA) 81 | ``` 82 | Now display the captured frame with overlay text in a GUI window 83 | ``` 84 | cv2.namedWindow('Output', cv2.WINDOW_AUTOSIZE) 85 | cv2.imshow('Output',gray) 86 | ``` 87 | 88 | Next comes the event handler where we wait for the q key and then release the devices and resources used 89 | ``` 90 | if cv2.waitKey(1) & 0xFF == ord('q'): 91 | print('Exiting ...') 92 | ``` 93 | >Note: Since the interactive tutorial mode is not well suited for handling video, the While(True) loop has been omited and so you will only see a still image. But you can see this working for video in the consolidated example and script. 94 | Next we release the devices and all resources used. 95 | ``` 96 | webcam.release() 97 | cv2.destroyAllWindows() 98 | ``` 99 | 100 | >Note: Ensure that the camera was released in the previous step. The camera light should go off. If not restart the kernel before continuing to the next step. 101 | 102 | Now putting it all together with exception handling: 103 | 104 | ``` 105 | #!/usr/bin/env python2 106 | 107 | # Python 2/3 compatibility 108 | from __future__ import print_function 109 | # Allows use of print like a function in Python 2.x 110 | 111 | # Import OpenCV and Numpy modules 112 | import numpy as np 113 | import cv2 114 | try: 115 | webcam = cv2.VideoCapture(0) 116 | # Check if Camera initialized correctly 117 | success = webcam.isOpened() 118 | if success == False: 119 | print('Error: Camera could not be opened') 120 | else: 121 | print('Success: Grabbed the camera') 122 | 123 | 124 | while(True): 125 | # Read each frame in video stream 126 | ret, frame = webcam.read() 127 | # Perform operations on the frame here 128 | # For example convert to Grayscale 129 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 130 | # Overlay Text on the video frame with Exit instructions 131 | font = cv2.FONT_HERSHEY_SIMPLEX 132 | cv2.putText(gray, "Type q to Quit:",(50,50), font, 1,(0,0,0),2,cv2.LINE_AA) 133 | # Display the resulting frame 134 | cv2.imshow('frame',gray) 135 | # Wait for exit key "q" to quit 136 | if cv2.waitKey(1) & 0xFF == ord('q'): 137 | print('Exiting ...') 138 | break 139 | # Release all resources used 140 | webcam.release() 141 | cv2.destroyAllWindows() 142 | 143 | except cv2.error as e: 144 | print('Please correct OpenCV Error') 145 | Success: Grabbed the camera 146 | ``` 147 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_04_video_test/ocv_video_test.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample 4: ocv_video_test\n", 8 | "\n", 9 | "[Sample 04](ocv_video_test.py) is a sanity test that uses OpenCV to connect to a WebCam and display the video stream. This test serves to ensure that OpenCV WebCam installation is working and further validates the development environment. It also shows how to overlay text on video streams.\n", 10 | "\n", 11 | "We start by performing the basic initializations" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 14, 17 | "metadata": { 18 | "collapsed": true 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "#!/usr/bin/env python2\n", 23 | "\n", 24 | "# Python 2/3 compatibility\n", 25 | "from __future__ import print_function\n", 26 | "# Allows use of print like a function in Python 2.x\n", 27 | "\n", 28 | "# Import OpenCV and Numpy modules\n", 29 | "import numpy as np\n", 30 | "import cv2" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "First we need to initialize a Video Web Camera for capturing video with OpenCV. We do this transparently by using an OpenCV API [cv2.VideoCapture()](http://docs.opencv.org/3.0-last-rst/modules/videoio/doc/reading_and_writing_video.html#cv2.VideoCapture)\n", 38 | "\n", 39 | "```\n", 40 | " cv2.VideoCapture(Parameters)\n", 41 | "```\n", 42 | "\n", 43 | " Parameters:\n", 44 | "\n", 45 | " filename – Name and path of file to be loaded.\n", 46 | " device_id - Id of the opened video capturing device (i.e. a camera index).\n", 47 | "\n", 48 | " Device Id:\n", 49 | " \n", 50 | " The default camera is 0 (usually built-in).The second camera would be 1 and so on\n", 51 | "\n", 52 | ">Note: On the Nuc which has no camera, the default Id of \"0\" should work. On a Laptop, you may need to try \"0\" or \"1\" if you have two cameras for front and back." 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 15, 58 | "metadata": { 59 | "collapsed": true 60 | }, 61 | "outputs": [], 62 | "source": [ 63 | "webcam = cv2.VideoCapture(0)" 64 | ] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "metadata": {}, 69 | "source": [ 70 | "cv2.videoCapture() method has many calls and isOpened() returns (True) if the device is opened sucessfully" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 16, 76 | "metadata": { 77 | "collapsed": false 78 | }, 79 | "outputs": [ 80 | { 81 | "name": "stdout", 82 | "output_type": "stream", 83 | "text": [ 84 | "Success: Grabbed the camera\n" 85 | ] 86 | } 87 | ], 88 | "source": [ 89 | "# Check if Camera was initialized correctly\n", 90 | "success = webcam.isOpened()\n", 91 | "if success == False:\n", 92 | " print('Error: Camera could not be opened')\n", 93 | "else:\n", 94 | " print('Success: Grabbed the camera')" 95 | ] 96 | }, 97 | { 98 | "cell_type": "markdown", 99 | "metadata": {}, 100 | "source": [ 101 | "Next we use the read() function from cv2.VideoCapure to read a video frame while this is (True)" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": 17, 107 | "metadata": { 108 | "collapsed": false 109 | }, 110 | "outputs": [], 111 | "source": [ 112 | "# Read each frame in video stream\n", 113 | "ret, frame = webcam.read() " 114 | ] 115 | }, 116 | { 117 | "cell_type": "markdown", 118 | "metadata": {}, 119 | "source": [ 120 | "Once the frame is read, it is usually converted to grayscale before performing further operations. This avoids having to process color information in real-time. For this we use the same cv2.cvtColor() method from our previous example with just a different color space conversion code." 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": 18, 126 | "metadata": { 127 | "collapsed": true 128 | }, 129 | "outputs": [], 130 | "source": [ 131 | "gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)" 132 | ] 133 | }, 134 | { 135 | "cell_type": "markdown", 136 | "metadata": {}, 137 | "source": [ 138 | "Overlay Text on the video frame with Exit instructions" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": 19, 144 | "metadata": { 145 | "collapsed": false 146 | }, 147 | "outputs": [ 148 | { 149 | "data": { 150 | "text/plain": [ 151 | "array([[ 7, 7, 6, ..., 158, 157, 159],\n", 152 | " [ 6, 6, 5, ..., 157, 158, 158],\n", 153 | " [ 10, 6, 7, ..., 158, 158, 158],\n", 154 | " ..., \n", 155 | " [ 7, 5, 5, ..., 145, 145, 147],\n", 156 | " [ 6, 6, 9, ..., 143, 144, 146],\n", 157 | " [ 8, 9, 7, ..., 139, 144, 145]], dtype=uint8)" 158 | ] 159 | }, 160 | "execution_count": 19, 161 | "metadata": {}, 162 | "output_type": "execute_result" 163 | } 164 | ], 165 | "source": [ 166 | "font = cv2.FONT_HERSHEY_SIMPLEX\n", 167 | "cv2.putText(gray, \"Type q to Quit:\",(50,50), font, 1,(0,0,0),2,cv2.LINE_AA)" 168 | ] 169 | }, 170 | { 171 | "cell_type": "markdown", 172 | "metadata": {}, 173 | "source": [ 174 | "Now display the captured frame with overlay text in a GUI window" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": 20, 180 | "metadata": { 181 | "collapsed": true 182 | }, 183 | "outputs": [], 184 | "source": [ 185 | "cv2.namedWindow('Output', cv2.WINDOW_AUTOSIZE)\n", 186 | "cv2.imshow('Output',gray)" 187 | ] 188 | }, 189 | { 190 | "cell_type": "markdown", 191 | "metadata": {}, 192 | "source": [ 193 | "Next comes the event handler where we wait for the q key and then release the devices and resources used" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": 21, 199 | "metadata": { 200 | "collapsed": false 201 | }, 202 | "outputs": [], 203 | "source": [ 204 | "# Wait for exit key \"q\" to quit\n", 205 | "if cv2.waitKey(1) & 0xFF == ord('q'):\n", 206 | " print('Exiting ...')" 207 | ] 208 | }, 209 | { 210 | "cell_type": "markdown", 211 | "metadata": {}, 212 | "source": [ 213 | ">Note: Since the interactive tutorial mode is not well suited for handling video, the While(True) loop has been omited and so you will only see a still image. But you can see this working for video in the consolidated example and script.\n", 214 | "\n", 215 | "Next we release the devices and all resources used." 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": 22, 221 | "metadata": { 222 | "collapsed": true 223 | }, 224 | "outputs": [], 225 | "source": [ 226 | "webcam.release()\n", 227 | "cv2.destroyAllWindows()" 228 | ] 229 | }, 230 | { 231 | "cell_type": "markdown", 232 | "metadata": {}, 233 | "source": [ 234 | ">Note: Ensure that the camera was released in the previous step. The camera light should go off. If not restart the kernel before continuing to the next step.\n", 235 | "\n", 236 | "Now putting it all together with exception handling:" 237 | ] 238 | }, 239 | { 240 | "cell_type": "code", 241 | "execution_count": 26, 242 | "metadata": { 243 | "collapsed": false 244 | }, 245 | "outputs": [ 246 | { 247 | "name": "stdout", 248 | "output_type": "stream", 249 | "text": [ 250 | "Success: Grabbed the camera\n", 251 | "Exiting ...\n" 252 | ] 253 | } 254 | ], 255 | "source": [ 256 | "#!/usr/bin/env python2\n", 257 | "\n", 258 | "# Python 2/3 compatibility\n", 259 | "from __future__ import print_function\n", 260 | "# Allows use of print like a function in Python 2.x\n", 261 | "\n", 262 | "# Import OpenCV and Numpy modules\n", 263 | "import numpy as np\n", 264 | "import cv2\n", 265 | "try:\n", 266 | " webcam = cv2.VideoCapture(0)\n", 267 | " # Check if Camera initialized correctly\n", 268 | " success = webcam.isOpened()\n", 269 | " if success == False:\n", 270 | " print('Error: Camera could not be opened')\n", 271 | " else:\n", 272 | " print('Success: Grabbed the camera')\n", 273 | "\n", 274 | "\n", 275 | " while(True):\n", 276 | " # Read each frame in video stream\n", 277 | " ret, frame = webcam.read()\n", 278 | " # Perform operations on the frame here\n", 279 | " # For example convert to Grayscale \n", 280 | " gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n", 281 | " # Overlay Text on the video frame with Exit instructions\n", 282 | " font = cv2.FONT_HERSHEY_SIMPLEX\n", 283 | " cv2.putText(gray, \"Type q to Quit:\",(50,50), font, 1,(0,0,0),2,cv2.LINE_AA)\n", 284 | " # Display the resulting frame\n", 285 | " cv2.imshow('frame',gray)\n", 286 | " # Wait for exit key \"q\" to quit\n", 287 | " if cv2.waitKey(1) & 0xFF == ord('q'):\n", 288 | " print('Exiting ...')\n", 289 | " break\n", 290 | " # Release all resources used\n", 291 | " webcam.release()\n", 292 | " cv2.destroyAllWindows()\n", 293 | "\n", 294 | "except cv2.error as e:\n", 295 | " print('Please correct OpenCV Error')" 296 | ] 297 | } 298 | ], 299 | "metadata": { 300 | "kernelspec": { 301 | "display_name": "Python 2", 302 | "language": "python", 303 | "name": "python2" 304 | }, 305 | "language_info": { 306 | "codemirror_mode": { 307 | "name": "ipython", 308 | "version": 2 309 | }, 310 | "file_extension": ".py", 311 | "mimetype": "text/x-python", 312 | "name": "python", 313 | "nbconvert_exporter": "python", 314 | "pygments_lexer": "ipython2", 315 | "version": "2.7.13" 316 | } 317 | }, 318 | "nbformat": 4, 319 | "nbformat_minor": 2 320 | } 321 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_04_video_test/ocv_video_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Python 2/3 compatibility 4 | from __future__ import print_function 5 | # Allows use of print like a function in Python 2.x 6 | 7 | # Import OpenCV and Numpy modules 8 | import numpy as np 9 | import cv2 10 | 11 | # Initialize Video Web Camera for capture. 12 | # The default camera is 0 (usually built-in) 13 | # The second camera would be 1 and so on 14 | 15 | try: 16 | webcam = cv2.VideoCapture(0) 17 | # Check if Camera initialized correctly 18 | success = webcam.isOpened() 19 | if success == False: 20 | print('Error: Camera could not be opened') 21 | 22 | 23 | while(True): 24 | # Read each frame in video stream 25 | ret, frame = webcam.read() 26 | # Perform operations on the frame here 27 | # For example convert to Grayscale 28 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 29 | # Overlay Text on the video frame with Exit instructions 30 | font = cv2.FONT_HERSHEY_SIMPLEX 31 | cv2.putText(gray, "Type q to Quit:",(50,50), font, 1,(0,0,0),2,cv2.LINE_AA) 32 | # Display the resulting frame 33 | cv2.imshow('frame',gray) 34 | # Wait for exit key "q" to quit 35 | if cv2.waitKey(1) & 0xFF == ord('q'): 36 | break 37 | # Release all resources used 38 | webcam.release() 39 | cv2.destroyAllWindows() 40 | 41 | except cv2.error as e: 42 | print('Please correct OpenCV Error') -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_05_opencl/.ipynb_checkpoints/ocv_ocl_info-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample_05: ocv_ocl_info\n", 8 | "\n", 9 | "[Sample 05](ocv_ocl_info.py) is a simple diagnostic program that determines whether OpenCL is available for use within OpenCV, Enables OpenCL, checks whether it has been enabled and then disables it. The program then checks if OpenCL has been disabled and exits.\n", 10 | "\n", 11 | "### What is OpenCL?\n", 12 | "OpenCL™ (Open Computing Language) is the open standard for parallel programming. Using OpenCL, one can use the GPU for parallel computing tasks other than just for graphics programming. Once can also use DSP's, FPGA's and other types of processors using OpenCL.\n", 13 | "\n", 14 | "### How does OpenCV use OpenCL?\n", 15 | "In Computer Vision many algorithms can run on a GPU much more effectively than on a CPU: e.g. image processing, matrix arithmetic, computational photography, object detection etc. OpenCV 3.x is able to accelerate and optimize performaance by using an architectural concept called Transparent API (T-API) to transparently speed up certain tasks if supported by the underlying hardware.\n", 16 | "\n", 17 | "## Sample Diagnostic Code\n", 18 | "\n", 19 | "Start with the usual initialization\n" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "metadata": { 26 | "collapsed": true 27 | }, 28 | "outputs": [], 29 | "source": [ 30 | "#!/usr/bin/env python2\n", 31 | "\n", 32 | "# Python 2/3 compatibility\n", 33 | "from __future__ import print_function\n", 34 | "# Allows use of print like a function in Python 2.x\n", 35 | "\n", 36 | "# Import the OpenCV2 module\n", 37 | "import cv2" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "metadata": {}, 43 | "source": [ 44 | "Using the OpenCV API cv2.ocl.haveOpenCL() returns True if OpenCL is supported. If it is supported, OpenCL can be enabled using cv2.ocl.setUseOpenCL(True) and disabled using cv2.ocl.setUseOpenCL(False). To check if OpenCL has been enabled or disabled, use cv2.ocl.useOpenCL() which will return True or False as the case may be.\n", 45 | "\n", 46 | ">Note: OpenCV Python module does not currently support enumeration of OpenCL devices.\n", 47 | "\n", 48 | "The enable OpenCL with exception handling and check whether it was enabled, run the following code:" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": { 55 | "collapsed": true 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "try:\n", 60 | " # Returns True if OpenCL is present\n", 61 | " ocl = cv2.ocl.haveOpenCL()\n", 62 | " # Prints whether OpenCL is present\n", 63 | " print(\"OpenCL Supported?: \", end='')\n", 64 | " print(ocl)\n", 65 | " print()\n", 66 | " # Enables use of OpenCL by OpenCV if present\n", 67 | " if ocl == True:\n", 68 | " print('Now enabling OpenCL support')\n", 69 | " cv2.ocl.setUseOpenCL(True)\n", 70 | " print(\"Has OpenCL been Enabled?: \", end='')\n", 71 | " print(cv2.ocl.useOpenCL())\n", 72 | "\n", 73 | "except cv2.error as e:\n", 74 | " print('Error:')" 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "metadata": {}, 80 | "source": [ 81 | "The disable OpenCL with exception handling and check whether it has been disabled, run the following code:" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": null, 87 | "metadata": { 88 | "collapsed": true 89 | }, 90 | "outputs": [], 91 | "source": [ 92 | "try:\n", 93 | " ocl_en = cv2.ocl.useOpenCL()\n", 94 | " if ocl_en ==True:\n", 95 | " print('Now disabling OpenCL support')\n", 96 | " cv2.ocl.setUseOpenCL(False)\n", 97 | "\n", 98 | " print(\"Checking - Is OpenCL still Enabled?: \", end='')\n", 99 | " print(cv2.ocl.useOpenCL())\n", 100 | " print()\n", 101 | "\n", 102 | "except cv2.error as e:\n", 103 | " print('Error:')" 104 | ] 105 | } 106 | ], 107 | "metadata": { 108 | "anaconda-cloud": {}, 109 | "kernelspec": { 110 | "display_name": "Python [default]", 111 | "language": "python", 112 | "name": "python3" 113 | }, 114 | "language_info": { 115 | "codemirror_mode": { 116 | "name": "ipython", 117 | "version": 3 118 | }, 119 | "file_extension": ".py", 120 | "mimetype": "text/x-python", 121 | "name": "python", 122 | "nbconvert_exporter": "python", 123 | "pygments_lexer": "ipython3", 124 | "version": "3.5.2" 125 | } 126 | }, 127 | "nbformat": 4, 128 | "nbformat_minor": 1 129 | } 130 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_05_opencl/README.md: -------------------------------------------------------------------------------- 1 | # OpenCV Tutorial Sample_05: ocv_ocl_info 2 | [Sample 05](ocv_ocl_info.py) is a simple diagnostic program that determines whether OpenCL™ is available for use within OpenCV, Enables OpenCL, checks whether it has been enabled and then disables it. The program then checks if OpenCL has been disabled and exits. 3 | 4 | ## What is OpenCL™? 5 | **OpenCL™ (Open Computing Language)** is the open standard for parallel programming. Using OpenCL, one can use the GPU for parallel computing tasks other than just for graphics programming. Once can also use DSP's, FPGA's and other types of processors using OpenCL. 6 | 7 | ## How does OpenCV use OpenCL™? 8 | In Computer Vision many algorithms can run on a GPU much more effectively than on a CPU: e.g. image processing, matrix arithmetic, computational photography, object detection etc. OpenCV 3.x is able to accelerate and optimize performaance by using an architectural concept called _Transparent API (T-API)_ to transparently speed up certain tasks if supported by the underlying hardware. 9 | 10 | ## Usage: 11 | Launch the interactive tutorial by typing the following command in your comand window: 12 | 13 | ``` 14 | jupyter notebook ./ocv_ocl_info.ipynb 15 | ``` 16 | OR 17 | 18 | You may run the script using the command: 19 | 20 | ``` 21 | python ./ocv_ocl_info.py 22 | ``` 23 | ## Code Walkthrough: 24 | 25 | Start with the usual initialization 26 | 27 | ``` 28 | #!/usr/bin/env python2 29 | 30 | # Python 2/3 compatibility 31 | from __future__ import print_function 32 | # Allows use of print like a function in Python 2.x 33 | 34 | # Import the OpenCV2 module 35 | import cv2 36 | ``` 37 | 38 | 39 | Using the OpenCV API **cv2.ocl.haveOpenCL()** returns **(True) if OpenCL is supported. 40 | 41 | If it is supported, OpenCL can be enabled using **cv2.ocl.setUseOpenCL(True)** and disabled using **cv2.ocl.setUseOpenCL(False)**. 42 | 43 | To check if OpenCL has been enabled or disabled, use **cv2.ocl.useOpenCL()** which will return **(True)** or **(False)** as the case may be. 44 | 45 | >Note: OpenCV Python module does not currently support enumeration of OpenCL devices. 46 | 47 | The enable OpenCL with exception handling and check whether it was enabled, run the following code: 48 | 49 | ``` 50 | try: 51 | # Returns True if OpenCL is present 52 | ocl = cv2.ocl.haveOpenCL() 53 | # Prints whether OpenCL is present 54 | print("OpenCL Supported?: ", end='') 55 | print(ocl) 56 | print() 57 | # Enables use of OpenCL by OpenCV if present 58 | if ocl == True: 59 | print('Now enabling OpenCL support') 60 | cv2.ocl.setUseOpenCL(True) 61 | print("Has OpenCL been Enabled?: ", end='') 62 | print(cv2.ocl.useOpenCL()) 63 | 64 | except cv2.error as e: 65 | print('Error:') 66 | ``` 67 | 68 | The disable OpenCL with exception handling and check whether it has been disabled, run the following code: 69 | 70 | ``` 71 | try: 72 | ocl_en = cv2.ocl.useOpenCL() 73 | if ocl_en ==True: 74 | print('Now disabling OpenCL support') 75 | cv2.ocl.setUseOpenCL(False) 76 | 77 | print("Checking - Is OpenCL still Enabled?: ", end='') 78 | print(cv2.ocl.useOpenCL()) 79 | print() 80 | 81 | except cv2.error as e: 82 | print('Error:') 83 | ``` 84 | 85 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_05_opencl/ocv_ocl_info.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample_05: ocv_ocl_info\n", 8 | "\n", 9 | "[Sample 05](ocv_ocl_info.py) is a simple diagnostic program that determines whether OpenCL is available for use within OpenCV, Enables OpenCL, checks whether it has been enabled and then disables it. The program then checks if OpenCL has been disabled and exits.\n", 10 | "\n", 11 | "### What is OpenCL?\n", 12 | "OpenCL™ (Open Computing Language) is the open standard for parallel programming. Using OpenCL, one can use the GPU for parallel computing tasks other than just for graphics programming. Once can also use DSP's, FPGA's and other types of processors using OpenCL.\n", 13 | "\n", 14 | "### How does OpenCV use OpenCL?\n", 15 | "In Computer Vision many algorithms can run on a GPU much more effectively than on a CPU: e.g. image processing, matrix arithmetic, computational photography, object detection etc. OpenCV 3.x is able to accelerate and optimize performaance by using an architectural concept called Transparent API (T-API) to transparently speed up certain tasks if supported by the underlying hardware.\n", 16 | "\n", 17 | "## Sample Diagnostic Code\n", 18 | "\n", 19 | "Start with the usual initialization\n" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "metadata": { 26 | "collapsed": true 27 | }, 28 | "outputs": [], 29 | "source": [ 30 | "#!/usr/bin/env python2\n", 31 | "\n", 32 | "# Python 2/3 compatibility\n", 33 | "from __future__ import print_function\n", 34 | "# Allows use of print like a function in Python 2.x\n", 35 | "\n", 36 | "# Import the OpenCV2 module\n", 37 | "import cv2" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "metadata": {}, 43 | "source": [ 44 | "Using the OpenCV API cv2.ocl.haveOpenCL() returns True if OpenCL is supported. If it is supported, OpenCL can be enabled using cv2.ocl.setUseOpenCL(True) and disabled using cv2.ocl.setUseOpenCL(False). To check if OpenCL has been enabled or disabled, use cv2.ocl.useOpenCL() which will return True or False as the case may be.\n", 45 | "\n", 46 | ">Note: OpenCV Python module does not currently support enumeration of OpenCL devices.\n", 47 | "\n", 48 | "The enable OpenCL with exception handling and check whether it was enabled, run the following code:" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": { 55 | "collapsed": true 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "try:\n", 60 | " # Returns True if OpenCL is present\n", 61 | " ocl = cv2.ocl.haveOpenCL()\n", 62 | " # Prints whether OpenCL is present\n", 63 | " print(\"OpenCL Supported?: \", end='')\n", 64 | " print(ocl)\n", 65 | " print()\n", 66 | " # Enables use of OpenCL by OpenCV if present\n", 67 | " if ocl == True:\n", 68 | " print('Now enabling OpenCL support')\n", 69 | " cv2.ocl.setUseOpenCL(True)\n", 70 | " print(\"Has OpenCL been Enabled?: \", end='')\n", 71 | " print(cv2.ocl.useOpenCL())\n", 72 | "\n", 73 | "except cv2.error as e:\n", 74 | " print('Error:')" 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "metadata": {}, 80 | "source": [ 81 | "The disable OpenCL with exception handling and check whether it has been disabled, run the following code:" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": null, 87 | "metadata": { 88 | "collapsed": true 89 | }, 90 | "outputs": [], 91 | "source": [ 92 | "try:\n", 93 | " ocl_en = cv2.ocl.useOpenCL()\n", 94 | " if ocl_en ==True:\n", 95 | " print('Now disabling OpenCL support')\n", 96 | " cv2.ocl.setUseOpenCL(False)\n", 97 | "\n", 98 | " print(\"Checking - Is OpenCL still Enabled?: \", end='')\n", 99 | " print(cv2.ocl.useOpenCL())\n", 100 | " print()\n", 101 | "\n", 102 | "except cv2.error as e:\n", 103 | " print('Error:')" 104 | ] 105 | } 106 | ], 107 | "metadata": { 108 | "anaconda-cloud": {}, 109 | "kernelspec": { 110 | "display_name": "Python [default]", 111 | "language": "python", 112 | "name": "python3" 113 | }, 114 | "language_info": { 115 | "codemirror_mode": { 116 | "name": "ipython", 117 | "version": 3 118 | }, 119 | "file_extension": ".py", 120 | "mimetype": "text/x-python", 121 | "name": "python", 122 | "nbconvert_exporter": "python", 123 | "pygments_lexer": "ipython3", 124 | "version": "3.5.2" 125 | } 126 | }, 127 | "nbformat": 4, 128 | "nbformat_minor": 1 129 | } 130 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_05_opencl/ocv_ocl_info.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Python 2/3 compatibility 4 | from __future__ import print_function 5 | # Allows use of print like a function in Python 2.x 6 | 7 | # Import the OpenCV2 module 8 | import cv2 9 | print() 10 | print('OpenCV - OpenCL Info sample') 11 | print() 12 | 13 | try: 14 | # Returns True if OpenCL is present 15 | ocl = cv2.ocl.haveOpenCL() 16 | # Prints whether OpenCL is present 17 | print("OpenCL Supported?: ", end='') 18 | print(ocl) 19 | print() 20 | # Enables use of OpenCL by OpenCV if present 21 | if ocl == True: 22 | print('Now enabling OpenCL support') 23 | cv2.ocl.setUseOpenCL(True) 24 | print("Has OpenCL been Enabled?: ", end='') 25 | print(cv2.ocl.useOpenCL()) 26 | 27 | except cv2.error as e: 28 | print('Error:') 29 | 30 | # OpenCV Python module does not currently support enumeration of OpenCL devices. 31 | # Disabling OpenCL can be done as follows: 32 | 33 | try: 34 | ocl_en = cv2.ocl.useOpenCL() 35 | if ocl_en ==True: 36 | print('Now disabling OpenCL support') 37 | cv2.ocl.setUseOpenCL(False) 38 | 39 | print("Checking - Is OpenCL still Enabled?: ", end='') 40 | print(cv2.ocl.useOpenCL()) 41 | print() 42 | 43 | except cv2.error as e: 44 | print('Error:') -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_06_video_capture/.ipynb_checkpoints/ocv_vid_cap-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample 6: ocv_vid_cap\n", 8 | "\n", 9 | "[Sample 06](ocv_vid_cap.py) is a simple program that uses OpenCV to connect to a WebCam in order to capture and save an image. This example is the basic first step for most video analytics programs. The video output of the WebCam is displayed and when the user inputs a keystroke, the frame is captured and written to an image file.\n", 10 | "\n", 11 | "Perform the usual initialization" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 1, 17 | "metadata": { 18 | "collapsed": false 19 | }, 20 | "outputs": [ 21 | { 22 | "name": "stdout", 23 | "output_type": "stream", 24 | "text": [ 25 | "OpenCV Version: 3.2.0\n", 26 | "Numpy Version: 1.12.1\n", 27 | "OpenCV Video Capture Sample\n", 28 | "Type c to Capture and q to Quit\n" 29 | ] 30 | } 31 | ], 32 | "source": [ 33 | "#!/usr/bin/env python\n", 34 | "\n", 35 | "# Python 2/3 compatibility\n", 36 | "from __future__ import print_function\n", 37 | "# Allows use of print like a function in Python 2.x\n", 38 | "\n", 39 | "# Import Numpy and OpenCV modules\n", 40 | "import numpy as np\n", 41 | "import cv2\n", 42 | "# Print Debug Info\n", 43 | "print('OpenCV Version:', cv2.__version__)\n", 44 | "print('Numpy Version:', np.__version__)\n", 45 | "print('OpenCV Video Capture Sample')\n", 46 | "print('Type c to Capture and q to Quit')" 47 | ] 48 | }, 49 | { 50 | "cell_type": "markdown", 51 | "metadata": {}, 52 | "source": [ 53 | "Next, open a named GUI window for displaying the webcam video in real-time. Initialize a counter to keep track of captures and initialize the webcam. These are the same steps taken in sample_04 to test the camera." 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": 6, 59 | "metadata": { 60 | "collapsed": false 61 | }, 62 | "outputs": [ 63 | { 64 | "name": "stdout", 65 | "output_type": "stream", 66 | "text": [ 67 | "Success: Grabbed the camera\n" 68 | ] 69 | } 70 | ], 71 | "source": [ 72 | "# Initialize GUI window to grab keystrokes when it has focus.\n", 73 | "cv2.namedWindow(\"Capture\")\n", 74 | "# Initialize Capture Counter\n", 75 | "cap_cnt = 0\n", 76 | "# Initialize Video Web Camera for capture. The default camera is 0 (usually built-in) \n", 77 | "# The second camera would be 1 and so on\n", 78 | "webcam = cv2.VideoCapture(0)\n", 79 | "# Check if Camera initialized correctly\n", 80 | "success = webcam.isOpened()\n", 81 | "if success == False:\n", 82 | " print('Error: Camera could not be opened')\n", 83 | "else:\n", 84 | " print('Success: Grabbed the camera')" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "Next we setup a loop that reads each frame and then displays it. We also setup an event handler that monitors the keyboard for the c and q keys to capture a framegrab or quit the program respectively. If the c key is pressed, we use the OpenCV API [cv2.imwrite()](http://docs.opencv.org/3.0-last-rst/modules/imgcodecs/doc/reading_and_writing_images.html#cv2.imwrite) to write the frame as an image file to disk. The filename is incremented with the counter we initialized before." 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 7, 97 | "metadata": { 98 | "collapsed": false 99 | }, 100 | "outputs": [ 101 | { 102 | "name": "stdout", 103 | "output_type": "stream", 104 | "text": [ 105 | "Quitting...\n" 106 | ] 107 | } 108 | ], 109 | "source": [ 110 | "while True:\n", 111 | " # Read each frame in video stream\n", 112 | " ret, frame = webcam.read()\n", 113 | " # Display each frame in video stream\n", 114 | " cv2.imshow(\"Capture\", frame)\n", 115 | " if not ret:\n", 116 | " break\n", 117 | "# Monitor keystrokes\n", 118 | " k = cv2.waitKey(1)\n", 119 | "\n", 120 | " if k & 0xFF == ord('q'):\n", 121 | " # q key pressed so quit\n", 122 | " print(\"Quitting...\")\n", 123 | " break\n", 124 | " elif k & 0xFF == ord('c'):\n", 125 | " # c key pressed so capture frame to image file\n", 126 | " cap_name = \"capture_{}.png\".format(cap_cnt)\n", 127 | " cv2.imwrite(cap_name, frame)\n", 128 | " print(\"Saving {}!\".format(cap_name))\n", 129 | " # Increment Capture Counter for next frame to capture\n", 130 | " cap_cnt += 1" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "Now release all devices and resources used before exiting." 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 10, 143 | "metadata": { 144 | "collapsed": true 145 | }, 146 | "outputs": [], 147 | "source": [ 148 | "# Release all resources used\n", 149 | "webcam.release()\n", 150 | "cv2.destroyAllWindows()" 151 | ] 152 | } 153 | ], 154 | "metadata": { 155 | "anaconda-cloud": {}, 156 | "kernelspec": { 157 | "display_name": "Python 2", 158 | "language": "python", 159 | "name": "python2" 160 | }, 161 | "language_info": { 162 | "codemirror_mode": { 163 | "name": "ipython", 164 | "version": 2 165 | }, 166 | "file_extension": ".py", 167 | "mimetype": "text/x-python", 168 | "name": "python", 169 | "nbconvert_exporter": "python", 170 | "pygments_lexer": "ipython2", 171 | "version": "2.7.13" 172 | } 173 | }, 174 | "nbformat": 4, 175 | "nbformat_minor": 1 176 | } 177 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_06_video_capture/README.md: -------------------------------------------------------------------------------- 1 | 2 | # OpenCV Tutorial Sample 6: ocv_vid_cap 3 | [Sample 06](ocv_vid_cap.py) is a simple program that uses OpenCV to connect to a WebCam in order to capture and save an image. This example is the basic first step for most video analytics programs. The video output of the WebCam is displayed and when the user inputs a keystroke, the frame is captured and written to an image file. 4 | 5 | ## Usage: 6 | Launch the interactive tutorial by typing the following command in your comand window: 7 | 8 | ``` 9 | jupyter notebook ./ocv_vid_cap.ipynb 10 | ``` 11 | OR 12 | 13 | You may run the script using the command: 14 | 15 | ``` 16 | python ./ocv_vid_cap.py 17 | ``` 18 | ## Code Walkthrough: 19 | 20 | Perform the usual initializations and print some debug info. 21 | 22 | ``` 23 | #!/usr/bin/env python 24 | 25 | # Python 2/3 compatibility 26 | from __future__ import print_function 27 | # Allows use of print like a function in Python 2.x 28 | 29 | # Import Numpy and OpenCV modules 30 | import numpy as np 31 | import cv2 32 | # Print Debug Info 33 | print('OpenCV Version:', cv2.__version__) 34 | print('Numpy Version:', np.__version__) 35 | print('OpenCV Video Capture Sample') 36 | print('Type c to Capture and q to Quit') 37 | ``` 38 | **Console Output:** 39 | ``` 40 | 41 | OpenCV Version: 3.2.0 42 | Numpy Version: 1.12.1 43 | OpenCV Video Capture Sample 44 | Type c to Capture and q to Quit 45 | ``` 46 | 47 | Next, open a named GUI window for displaying the webcam video in real-time. Initialize a counter to keep track of captures and initialize the webcam. These are the same steps taken in sample_04 to test the camera. 48 | 49 | ``` 50 | # Initialize GUI window to grab keystrokes when it has focus. 51 | cv2.namedWindow("Capture") 52 | # Initialize Capture Counter 53 | cap_cnt = 0 54 | # Initialize Video Web Camera for capture. The default camera is 0 (usually built-in) 55 | # The second camera would be 1 and so on 56 | webcam = cv2.VideoCapture(0) 57 | # Check if Camera initialized correctly 58 | success = webcam.isOpened() 59 | if success == False: 60 | print('Error: Camera could not be opened') 61 | else: 62 | print('Success: Grabbed the camera') 63 | ``` 64 | **Console output:** Assuming this was sucessful ... 65 | 66 | ``` 67 | Success: Grabbed the camera 68 | ``` 69 | 70 | Next we setup a loop that reads each frame and then displays it. We also setup an event handler that monitors the keyboard for the c and q keys to capture a framegrab or quit the program respectively. 71 | 72 | If the c key is pressed, we use the OpenCV API **cv2.imwrite()** to write the frame as an image file to disk. The filename is incremented with the counter we initialized before. 73 | 74 | ``` 75 | while True: 76 | # Read each frame in video stream 77 | ret, frame = webcam.read() 78 | # Display each frame in video stream 79 | cv2.imshow("Capture", frame) 80 | if not ret: 81 | break 82 | # Monitor keystrokes 83 | k = cv2.waitKey(1) 84 | 85 | if k & 0xFF == ord('q'): 86 | # q key pressed so quit 87 | print("Quitting...") 88 | break 89 | elif k & 0xFF == ord('c'): 90 | # c key pressed so capture frame to image file 91 | cap_name = "capture_{}.png".format(cap_cnt) 92 | cv2.imwrite(cap_name, frame) 93 | print("Saving {}!".format(cap_name)) 94 | # Increment Capture Counter for next frame to capture 95 | cap_cnt += 1 96 | ``` 97 | 98 | Now release all devices and resources used before exiting. 99 | 100 | ``` 101 | webcam.release() 102 | cv2.destroyAllWindows() 103 | ``` 104 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_06_video_capture/ocv_vid_cap.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample 6: ocv_vid_cap\n", 8 | "\n", 9 | "[Sample 06](ocv_vid_cap.py) is a simple program that uses OpenCV to connect to a WebCam in order to capture and save an image. This example is the basic first step for most video analytics programs. The video output of the WebCam is displayed and when the user inputs a keystroke, the frame is captured and written to an image file.\n", 10 | "\n", 11 | "Perform the usual initialization" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 1, 17 | "metadata": { 18 | "collapsed": false 19 | }, 20 | "outputs": [ 21 | { 22 | "name": "stdout", 23 | "output_type": "stream", 24 | "text": [ 25 | "OpenCV Version: 3.2.0\n", 26 | "Numpy Version: 1.12.1\n", 27 | "OpenCV Video Capture Sample\n", 28 | "Type c to Capture and q to Quit\n" 29 | ] 30 | } 31 | ], 32 | "source": [ 33 | "#!/usr/bin/env python\n", 34 | "\n", 35 | "# Python 2/3 compatibility\n", 36 | "from __future__ import print_function\n", 37 | "# Allows use of print like a function in Python 2.x\n", 38 | "\n", 39 | "# Import Numpy and OpenCV modules\n", 40 | "import numpy as np\n", 41 | "import cv2\n", 42 | "# Print Debug Info\n", 43 | "print('OpenCV Version:', cv2.__version__)\n", 44 | "print('Numpy Version:', np.__version__)\n", 45 | "print('OpenCV Video Capture Sample')\n", 46 | "print('Type c to Capture and q to Quit')" 47 | ] 48 | }, 49 | { 50 | "cell_type": "markdown", 51 | "metadata": {}, 52 | "source": [ 53 | "Next, open a named GUI window for displaying the webcam video in real-time. Initialize a counter to keep track of captures and initialize the webcam. These are the same steps taken in sample_04 to test the camera." 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": 6, 59 | "metadata": { 60 | "collapsed": false 61 | }, 62 | "outputs": [ 63 | { 64 | "name": "stdout", 65 | "output_type": "stream", 66 | "text": [ 67 | "Success: Grabbed the camera\n" 68 | ] 69 | } 70 | ], 71 | "source": [ 72 | "# Initialize GUI window to grab keystrokes when it has focus.\n", 73 | "cv2.namedWindow(\"Capture\")\n", 74 | "# Initialize Capture Counter\n", 75 | "cap_cnt = 0\n", 76 | "# Initialize Video Web Camera for capture. The default camera is 0 (usually built-in) \n", 77 | "# The second camera would be 1 and so on\n", 78 | "webcam = cv2.VideoCapture(0)\n", 79 | "# Check if Camera initialized correctly\n", 80 | "success = webcam.isOpened()\n", 81 | "if success == False:\n", 82 | " print('Error: Camera could not be opened')\n", 83 | "else:\n", 84 | " print('Success: Grabbed the camera')" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "Next we setup a loop that reads each frame and then displays it. We also setup an event handler that monitors the keyboard for the c and q keys to capture a framegrab or quit the program respectively. If the c key is pressed, we use the OpenCV API [cv2.imwrite()](http://docs.opencv.org/3.0-last-rst/modules/imgcodecs/doc/reading_and_writing_images.html#cv2.imwrite) to write the frame as an image file to disk. The filename is incremented with the counter we initialized before." 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 7, 97 | "metadata": { 98 | "collapsed": false 99 | }, 100 | "outputs": [ 101 | { 102 | "name": "stdout", 103 | "output_type": "stream", 104 | "text": [ 105 | "Quitting...\n" 106 | ] 107 | } 108 | ], 109 | "source": [ 110 | "while True:\n", 111 | " # Read each frame in video stream\n", 112 | " ret, frame = webcam.read()\n", 113 | " # Display each frame in video stream\n", 114 | " cv2.imshow(\"Capture\", frame)\n", 115 | " if not ret:\n", 116 | " break\n", 117 | "# Monitor keystrokes\n", 118 | " k = cv2.waitKey(1)\n", 119 | "\n", 120 | " if k & 0xFF == ord('q'):\n", 121 | " # q key pressed so quit\n", 122 | " print(\"Quitting...\")\n", 123 | " break\n", 124 | " elif k & 0xFF == ord('c'):\n", 125 | " # c key pressed so capture frame to image file\n", 126 | " cap_name = \"capture_{}.png\".format(cap_cnt)\n", 127 | " cv2.imwrite(cap_name, frame)\n", 128 | " print(\"Saving {}!\".format(cap_name))\n", 129 | " # Increment Capture Counter for next frame to capture\n", 130 | " cap_cnt += 1" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "Now release all devices and resources used before exiting." 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 10, 143 | "metadata": { 144 | "collapsed": true 145 | }, 146 | "outputs": [], 147 | "source": [ 148 | "# Release all resources used\n", 149 | "webcam.release()\n", 150 | "cv2.destroyAllWindows()" 151 | ] 152 | } 153 | ], 154 | "metadata": { 155 | "anaconda-cloud": {}, 156 | "kernelspec": { 157 | "display_name": "Python 2", 158 | "language": "python", 159 | "name": "python2" 160 | }, 161 | "language_info": { 162 | "codemirror_mode": { 163 | "name": "ipython", 164 | "version": 2 165 | }, 166 | "file_extension": ".py", 167 | "mimetype": "text/x-python", 168 | "name": "python", 169 | "nbconvert_exporter": "python", 170 | "pygments_lexer": "ipython2", 171 | "version": "2.7.13" 172 | } 173 | }, 174 | "nbformat": 4, 175 | "nbformat_minor": 1 176 | } 177 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_06_video_capture/ocv_vid_cap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Python 2/3 compatibility 4 | from __future__ import print_function 5 | # Allows use of print like a function in Python 2.x 6 | 7 | # Import Numpy and OpenCV modules 8 | import numpy as np 9 | import cv2 10 | # Print Debug Info 11 | print('OpenCV Version:', cv2.__version__) 12 | print('Numpy Version:', np.__version__) 13 | print('OpenCV Video Capture Sample') 14 | print('Type c to Capture and q to Quit') 15 | 16 | try: 17 | # Initialize GUI window to grab keystrokes when it has focus 18 | cv2.namedWindow("Capture") 19 | # Initialize Capture Counter 20 | cap_cnt = 0 21 | # Initialize Video Web Camera for capture. 22 | # The default camera is 0 (usually built-in) 23 | # The second camera would be 1 and so on 24 | webcam = cv2.VideoCapture(0) 25 | # Check if Camera initialized correctly 26 | success = webcam.isOpened() 27 | if success == False: 28 | print('Error: Camera could not be opened') 29 | 30 | while True: 31 | # Read each frame in video stream 32 | ret, frame = webcam.read() 33 | # Display each frame in video stream 34 | cv2.imshow("Capture", frame) 35 | if not ret: 36 | break 37 | # Monitor keystrokes 38 | k = cv2.waitKey(1) 39 | 40 | if k & 0xFF == ord('q'): 41 | # q key pressed so quit 42 | print("Quitting...") 43 | break 44 | elif k & 0xFF == ord('c'): 45 | # c key pressed so capture frame to image file 46 | cap_name = "capture_{}.png".format(cap_cnt) 47 | cv2.imwrite(cap_name, frame) 48 | print("Saving {}!".format(cap_name)) 49 | # Increment Capture Counter for next frame to capture 50 | cap_cnt += 1 51 | 52 | # Release all resources used 53 | webcam.release() 54 | cv2.destroyAllWindows() 55 | 56 | except cv2.error as e: 57 | print('Please correct OpenCV Error') 58 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_07_hw_info/.ipynb_checkpoints/ocv_hw_info-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample 7: ocv_hw_info\n", 8 | "[Sample 07](ocv_hw_info.py) is a simple diagnostic program that determines how many logical CPU's are available and then queries the hardware to check whether MMX and Streaming SIMD Extensions (SSE), Advanced Vector Extensions etc. are supported .\n", 9 | "\n", 10 | "> _**Note:**_ The OpenCV function cv2.checkHardwareSupport(feature) returns true if the host hardware supports the specified feature. When users call setUseOptimized(False), all the subsequent calls to cv2.checkHardwareSupport() will return false until cv2.setUseOptimized(True) is called. This way users can dynamically switch on and off the optimized code in OpenCV.\n", 11 | "\n", 12 | "Start with the usual initializations" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 1, 18 | "metadata": { 19 | "collapsed": true 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "#!/usr/bin/env python\n", 24 | "\n", 25 | "# Python 2/3 compatibility\n", 26 | "from __future__ import print_function\n", 27 | "# Allows use of print like a function in Python 2.x\n", 28 | "\n", 29 | "# Import the OpenCV2 module\n", 30 | "import cv2" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "Check the number of logical CPUs available" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 2, 43 | "metadata": { 44 | "collapsed": false 45 | }, 46 | "outputs": [ 47 | { 48 | "name": "stdout", 49 | "output_type": "stream", 50 | "text": [ 51 | "\n", 52 | "Number of logical CPU's Available: 8\n", 53 | "\n" 54 | ] 55 | } 56 | ], 57 | "source": [ 58 | "# Returns the number of logical CPUs available\n", 59 | "cpu = cv2.getNumberOfCPUs()\n", 60 | "print()\n", 61 | "print(\"Number of logical CPU's Available: \", end='')\n", 62 | "print(cpu)\n", 63 | "print()" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": 3, 69 | "metadata": { 70 | "collapsed": true 71 | }, 72 | "outputs": [], 73 | "source": [ 74 | "#Reset Hardware Optimization Flags\n", 75 | "cv2.setUseOptimized(True)" 76 | ] 77 | }, 78 | { 79 | "cell_type": "markdown", 80 | "metadata": {}, 81 | "source": [ 82 | "The functions below return true if the host hardware supports the specified feature.\n", 83 | "When user calls setUseOptimized(false), all the subsequent calls to checkHardwareSupport() will return false until setUseOptimized(true) is called. \n", 84 | "This way user can dynamically switch on and off the optimized code in OpenCV." 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": 4, 90 | "metadata": { 91 | "collapsed": true 92 | }, 93 | "outputs": [], 94 | "source": [ 95 | "# Assign ID to Features as this doesn't seem to be working currently for Python\n", 96 | "cv2.CPU_MMX = 1\n", 97 | "cv2.CPU_SSE = 2\n", 98 | "cv2.CPU_SSE2 = 3\n", 99 | "cv2.CPU_SSE3 = 4\n", 100 | "cv2.CPU_SSSE3 = 5\n", 101 | "cv2.CPU_SSE4_1 = 6\n", 102 | "cv2.CPU_SSE4_2 = 7\n", 103 | "cv2.CPU_POPCNT = 8\n", 104 | "cv2.CPU_AVX = 9" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 5, 110 | "metadata": { 111 | "collapsed": false 112 | }, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | "CPU MMX Capable?: True\n" 119 | ] 120 | } 121 | ], 122 | "source": [ 123 | "# Returns True if CPU is MMX capable\n", 124 | "mmx = cv2.checkHardwareSupport(cv2.CPU_MMX)\n", 125 | "print(\"CPU MMX Capable?: \", end='')\n", 126 | "print(mmx)" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": 6, 132 | "metadata": { 133 | "collapsed": false 134 | }, 135 | "outputs": [ 136 | { 137 | "name": "stdout", 138 | "output_type": "stream", 139 | "text": [ 140 | "CPU SSE Capable?: True\n" 141 | ] 142 | } 143 | ], 144 | "source": [ 145 | "# Returns True if CPU is SSE capable\n", 146 | "sse = cv2.checkHardwareSupport(cv2.CPU_SSE)\n", 147 | "print(\"CPU SSE Capable?: \", end='')\n", 148 | "print(sse)" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": 7, 154 | "metadata": { 155 | "collapsed": false 156 | }, 157 | "outputs": [ 158 | { 159 | "name": "stdout", 160 | "output_type": "stream", 161 | "text": [ 162 | "CPU SSE 2 Capable?: True\n" 163 | ] 164 | } 165 | ], 166 | "source": [ 167 | "# Returns True if CPU is SSE2 capable\n", 168 | "sse2 = cv2.checkHardwareSupport(cv2.CPU_SSE2)\n", 169 | "print(\"CPU SSE 2 Capable?: \", end='')\n", 170 | "print(sse2)" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": 8, 176 | "metadata": { 177 | "collapsed": false 178 | }, 179 | "outputs": [ 180 | { 181 | "name": "stdout", 182 | "output_type": "stream", 183 | "text": [ 184 | "CPU SSE 3 Capable?: True\n" 185 | ] 186 | } 187 | ], 188 | "source": [ 189 | "# Returns True if CPU is SSE3 capable\n", 190 | "sse3 = cv2.checkHardwareSupport(cv2.CPU_SSE3)\n", 191 | "print(\"CPU SSE 3 Capable?: \", end='')\n", 192 | "print(sse3)" 193 | ] 194 | }, 195 | { 196 | "cell_type": "code", 197 | "execution_count": 9, 198 | "metadata": { 199 | "collapsed": false 200 | }, 201 | "outputs": [ 202 | { 203 | "name": "stdout", 204 | "output_type": "stream", 205 | "text": [ 206 | "CPU SSSE 3 Capable?: True\n" 207 | ] 208 | } 209 | ], 210 | "source": [ 211 | "# Returns True if CPU is SSSE3 capable\n", 212 | "ssse3 = cv2.checkHardwareSupport(cv2.CPU_SSSE3)\n", 213 | "print(\"CPU SSSE 3 Capable?: \", end='')\n", 214 | "print(ssse3)" 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": 10, 220 | "metadata": { 221 | "collapsed": false 222 | }, 223 | "outputs": [ 224 | { 225 | "name": "stdout", 226 | "output_type": "stream", 227 | "text": [ 228 | "CPU SSE 4.1 Capable?: True\n" 229 | ] 230 | } 231 | ], 232 | "source": [ 233 | "# Returns True if CPU is SSE4.1 capable\n", 234 | "sse4_1 = cv2.checkHardwareSupport(cv2.CPU_SSE4_1)\n", 235 | "print(\"CPU SSE 4.1 Capable?: \", end='')\n", 236 | "#print(cv2.getNumberOfCPUs())\n", 237 | "print(sse4_1)" 238 | ] 239 | }, 240 | { 241 | "cell_type": "code", 242 | "execution_count": 11, 243 | "metadata": { 244 | "collapsed": false 245 | }, 246 | "outputs": [ 247 | { 248 | "name": "stdout", 249 | "output_type": "stream", 250 | "text": [ 251 | "CPU SSE 4.2 Capable?: True\n" 252 | ] 253 | } 254 | ], 255 | "source": [ 256 | "# Returns True if CPU is SSE4.2 capable\n", 257 | "sse4_2 = cv2.checkHardwareSupport(cv2.CPU_SSE4_2)\n", 258 | "print(\"CPU SSE 4.2 Capable?: \", end='')\n", 259 | "print(sse4_2)" 260 | ] 261 | }, 262 | { 263 | "cell_type": "code", 264 | "execution_count": 12, 265 | "metadata": { 266 | "collapsed": false 267 | }, 268 | "outputs": [ 269 | { 270 | "name": "stdout", 271 | "output_type": "stream", 272 | "text": [ 273 | "CPU POPCNT Capable?: True\n" 274 | ] 275 | } 276 | ], 277 | "source": [ 278 | "# Returns True if CPU is POP capable\n", 279 | "popcnt = cv2.checkHardwareSupport(cv2.CPU_POPCNT)\n", 280 | "print(\"CPU POPCNT Capable?: \", end='')\n", 281 | "print(popcnt)" 282 | ] 283 | }, 284 | { 285 | "cell_type": "code", 286 | "execution_count": 13, 287 | "metadata": { 288 | "collapsed": false 289 | }, 290 | "outputs": [ 291 | { 292 | "name": "stdout", 293 | "output_type": "stream", 294 | "text": [ 295 | "CPU AVX Capable?: True\n" 296 | ] 297 | } 298 | ], 299 | "source": [ 300 | "# Returns True if CPU is AVX capable\n", 301 | "avx = cv2.checkHardwareSupport(cv2.CPU_AVX)\n", 302 | "print(\"CPU AVX Capable?: \", end='')\n", 303 | "print(avx)" 304 | ] 305 | } 306 | ], 307 | "metadata": { 308 | "anaconda-cloud": {}, 309 | "kernelspec": { 310 | "display_name": "Python 2", 311 | "language": "python", 312 | "name": "python2" 313 | }, 314 | "language_info": { 315 | "codemirror_mode": { 316 | "name": "ipython", 317 | "version": 2 318 | }, 319 | "file_extension": ".py", 320 | "mimetype": "text/x-python", 321 | "name": "python", 322 | "nbconvert_exporter": "python", 323 | "pygments_lexer": "ipython2", 324 | "version": "2.7.13" 325 | } 326 | }, 327 | "nbformat": 4, 328 | "nbformat_minor": 1 329 | } 330 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_07_hw_info/README.md: -------------------------------------------------------------------------------- 1 | # OpenCV Tutorial Sample 7: ocv_hw_info 2 | [Sample 07](ocv_hw_info.py) is a simple diagnostic program that determines how many logical CPU's are available and then queries the hardware to check whether MMX™ technology and Intel® Streaming SIMD Extensions (Intel® SSE), Intel® Advanced Vector Extensions etc. are supported . 3 | 4 | >Note: The OpenCV function cv2.checkHardwareSupport(feature) returns true if the host hardware supports the specified feature. When users call setUseOptimized(False), all the subsequent calls to cv2.checkHardwareSupport() will return false until cv2.setUseOptimized(True) is called. This way users can dynamically switch on and off the optimized code in OpenCV. 5 | 6 | ## Usage: 7 | Launch the interactive tutorial by typing the following command in your comand window: 8 | 9 | ``` 10 | jupyter notebook ./ocv_hw_info.ipynb 11 | ``` 12 | OR 13 | 14 | You may run the script using the command: 15 | 16 | ``` 17 | python ./ocv_hw_info.py 18 | ``` 19 | ## Code Walkthrough: 20 | Start with the usual initializations 21 | 22 | ``` 23 | #!/usr/bin/env python 24 | 25 | # Python 2/3 compatibility 26 | from __future__ import print_function 27 | # Allows use of print like a function in Python 2.x 28 | 29 | # Import the OpenCV2 module 30 | import cv2 31 | ``` 32 | Check the number of logical CPUs available. This function returns the number of logical CPUs available 33 | 34 | ``` 35 | cpu = cv2.getNumberOfCPUs() 36 | print() 37 | print("Number of logical CPU's Available: ", end='') 38 | print(cpu) 39 | print() 40 | ``` 41 | **Console Output:** 42 | ``` 43 | Number of logical CPU's Available: 8 44 | ``` 45 | 46 | Now Reset Hardware Optimization Flags in case they were set by some other program. 47 | ``` 48 | cv2.setUseOptimized(True) 49 | ``` 50 | The functions below return true if the host hardware supports the specified feature. When user calls **setUseOptimized(False)**, all the subsequent calls to **checkHardwareSupport()** will return **(False)** until **setUseOptimized(True)** is called. This way user can dynamically switch on and off the optimized code in OpenCV. 51 | 52 | ``` 53 | # Assign ID to Features as labels don't seem to be working currently for Python 54 | cv2.CPU_MMX = 1 55 | cv2.CPU_SSE = 2 56 | cv2.CPU_SSE2 = 3 57 | cv2.CPU_SSE3 = 4 58 | cv2.CPU_SSSE3 = 5 59 | cv2.CPU_SSE4_1 = 6 60 | cv2.CPU_SSE4_2 = 7 61 | cv2.CPU_POPCNT = 8 62 | cv2.CPU_AVX = 9 63 | ``` 64 | Now we can test and print support status for each hardware feature: 65 | 66 | Returns True if CPU is **MMX** capable 67 | ``` 68 | mmx = cv2.checkHardwareSupport(cv2.CPU_MMX) 69 | print("CPU MMX Capable?: ", end='') 70 | print(mmx) 71 | ``` 72 | **Console Output:** 73 | ``` 74 | CPU MMX Capable?: True 75 | ``` 76 | 77 | Returns True if CPU is **SSE** capable 78 | ``` 79 | sse = cv2.checkHardwareSupport(cv2.CPU_SSE) 80 | print("CPU SSE Capable?: ", end='') 81 | print(sse) 82 | ``` 83 | 84 | **Console Output:** 85 | ``` 86 | CPU SSE Capable?: True 87 | ``` 88 | 89 | Returns True if CPU is **SSE2** capable 90 | ``` 91 | sse2 = cv2.checkHardwareSupport(cv2.CPU_SSE2) 92 | print("CPU SSE 2 Capable?: ", end='') 93 | print(sse2) 94 | ``` 95 | **Console Output:** 96 | ``` 97 | CPU SSE 2 Capable?: True 98 | ``` 99 | 100 | Returns True if CPU is **SSE3** capable 101 | ``` 102 | sse3 = cv2.checkHardwareSupport(cv2.CPU_SSE3) 103 | print("CPU SSE 3 Capable?: ", end='') 104 | print(sse3) 105 | ``` 106 | **Console Output:** 107 | ``` 108 | CPU SSE 3 Capable?: True 109 | ``` 110 | 111 | Returns True if CPU is **SSSE3** capable 112 | ``` 113 | ssse3 = cv2.checkHardwareSupport(cv2.CPU_SSSE3) 114 | print("CPU SSSE 3 Capable?: ", end='') 115 | print(ssse3) 116 | ``` 117 | **Console Output:** 118 | ``` 119 | CPU SSSE 3 Capable?: True 120 | ``` 121 | 122 | Returns True if CPU is **SSE4.1** capable 123 | ``` 124 | sse4_1 = cv2.checkHardwareSupport(cv2.CPU_SSE4_1) 125 | print("CPU SSE 4.1 Capable?: ", end='') 126 | print(sse4_1) 127 | ``` 128 | **Console Output:** 129 | ``` 130 | CPU SSE 4.1 Capable?: True 131 | ``` 132 | 133 | Returns True if CPU is **SSE4.2** capable 134 | ``` 135 | sse4_2 = cv2.checkHardwareSupport(cv2.CPU_SSE4_2) 136 | print("CPU SSE 4.2 Capable?: ", end='') 137 | print(sse4_2) 138 | ``` 139 | **Console Output:** 140 | ``` 141 | CPU SSE 4.2 Capable?: True 142 | ``` 143 | 144 | Returns True if CPU is **POP** capable 145 | ``` 146 | popcnt = cv2.checkHardwareSupport(cv2.CPU_POPCNT) 147 | print("CPU POPCNT Capable?: ", end='') 148 | print(popcnt) 149 | ``` 150 | **Console Output:** 151 | ``` 152 | CPU POPCNT Capable?: True 153 | ``` 154 | 155 | Returns True if CPU is **AVX** capable 156 | ``` 157 | avx = cv2.checkHardwareSupport(cv2.CPU_AVX) 158 | print("CPU AVX Capable?: ", end='') 159 | print(avx) 160 | ``` 161 | **Console Output:** 162 | ``` 163 | CPU AVX Capable?: True 164 | ``` 165 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_07_hw_info/ocv_hw_info.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample 7: ocv_hw_info\n", 8 | "[Sample 07](ocv_hw_info.py) is a simple diagnostic program that determines how many logical CPU's are available and then queries the hardware to check whether MMX and Streaming SIMD Extensions (SSE), Advanced Vector Extensions etc. are supported .\n", 9 | "\n", 10 | "> _**Note:**_ The OpenCV function cv2.checkHardwareSupport(feature) returns true if the host hardware supports the specified feature. When users call setUseOptimized(False), all the subsequent calls to cv2.checkHardwareSupport() will return false until cv2.setUseOptimized(True) is called. This way users can dynamically switch on and off the optimized code in OpenCV.\n", 11 | "\n", 12 | "Start with the usual initializations" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 1, 18 | "metadata": { 19 | "collapsed": true 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "#!/usr/bin/env python\n", 24 | "\n", 25 | "# Python 2/3 compatibility\n", 26 | "from __future__ import print_function\n", 27 | "# Allows use of print like a function in Python 2.x\n", 28 | "\n", 29 | "# Import the OpenCV2 module\n", 30 | "import cv2" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "Check the number of logical CPUs available" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 2, 43 | "metadata": { 44 | "collapsed": false 45 | }, 46 | "outputs": [ 47 | { 48 | "name": "stdout", 49 | "output_type": "stream", 50 | "text": [ 51 | "\n", 52 | "Number of logical CPU's Available: 8\n", 53 | "\n" 54 | ] 55 | } 56 | ], 57 | "source": [ 58 | "# Returns the number of logical CPUs available\n", 59 | "cpu = cv2.getNumberOfCPUs()\n", 60 | "print()\n", 61 | "print(\"Number of logical CPU's Available: \", end='')\n", 62 | "print(cpu)\n", 63 | "print()" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": 3, 69 | "metadata": { 70 | "collapsed": true 71 | }, 72 | "outputs": [], 73 | "source": [ 74 | "#Reset Hardware Optimization Flags\n", 75 | "cv2.setUseOptimized(True)" 76 | ] 77 | }, 78 | { 79 | "cell_type": "markdown", 80 | "metadata": {}, 81 | "source": [ 82 | "The functions below return true if the host hardware supports the specified feature.\n", 83 | "When user calls setUseOptimized(false), all the subsequent calls to checkHardwareSupport() will return false until setUseOptimized(true) is called. \n", 84 | "This way user can dynamically switch on and off the optimized code in OpenCV." 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": 4, 90 | "metadata": { 91 | "collapsed": true 92 | }, 93 | "outputs": [], 94 | "source": [ 95 | "# Assign ID to Features as this doesn't seem to be working currently for Python\n", 96 | "cv2.CPU_MMX = 1\n", 97 | "cv2.CPU_SSE = 2\n", 98 | "cv2.CPU_SSE2 = 3\n", 99 | "cv2.CPU_SSE3 = 4\n", 100 | "cv2.CPU_SSSE3 = 5\n", 101 | "cv2.CPU_SSE4_1 = 6\n", 102 | "cv2.CPU_SSE4_2 = 7\n", 103 | "cv2.CPU_POPCNT = 8\n", 104 | "cv2.CPU_AVX = 9" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 5, 110 | "metadata": { 111 | "collapsed": false 112 | }, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | "CPU MMX Capable?: True\n" 119 | ] 120 | } 121 | ], 122 | "source": [ 123 | "# Returns True if CPU is MMX capable\n", 124 | "mmx = cv2.checkHardwareSupport(cv2.CPU_MMX)\n", 125 | "print(\"CPU MMX Capable?: \", end='')\n", 126 | "print(mmx)" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": 6, 132 | "metadata": { 133 | "collapsed": false 134 | }, 135 | "outputs": [ 136 | { 137 | "name": "stdout", 138 | "output_type": "stream", 139 | "text": [ 140 | "CPU SSE Capable?: True\n" 141 | ] 142 | } 143 | ], 144 | "source": [ 145 | "# Returns True if CPU is SSE capable\n", 146 | "sse = cv2.checkHardwareSupport(cv2.CPU_SSE)\n", 147 | "print(\"CPU SSE Capable?: \", end='')\n", 148 | "print(sse)" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": 7, 154 | "metadata": { 155 | "collapsed": false 156 | }, 157 | "outputs": [ 158 | { 159 | "name": "stdout", 160 | "output_type": "stream", 161 | "text": [ 162 | "CPU SSE 2 Capable?: True\n" 163 | ] 164 | } 165 | ], 166 | "source": [ 167 | "# Returns True if CPU is SSE2 capable\n", 168 | "sse2 = cv2.checkHardwareSupport(cv2.CPU_SSE2)\n", 169 | "print(\"CPU SSE 2 Capable?: \", end='')\n", 170 | "print(sse2)" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": 8, 176 | "metadata": { 177 | "collapsed": false 178 | }, 179 | "outputs": [ 180 | { 181 | "name": "stdout", 182 | "output_type": "stream", 183 | "text": [ 184 | "CPU SSE 3 Capable?: True\n" 185 | ] 186 | } 187 | ], 188 | "source": [ 189 | "# Returns True if CPU is SSE3 capable\n", 190 | "sse3 = cv2.checkHardwareSupport(cv2.CPU_SSE3)\n", 191 | "print(\"CPU SSE 3 Capable?: \", end='')\n", 192 | "print(sse3)" 193 | ] 194 | }, 195 | { 196 | "cell_type": "code", 197 | "execution_count": 9, 198 | "metadata": { 199 | "collapsed": false 200 | }, 201 | "outputs": [ 202 | { 203 | "name": "stdout", 204 | "output_type": "stream", 205 | "text": [ 206 | "CPU SSSE 3 Capable?: True\n" 207 | ] 208 | } 209 | ], 210 | "source": [ 211 | "# Returns True if CPU is SSSE3 capable\n", 212 | "ssse3 = cv2.checkHardwareSupport(cv2.CPU_SSSE3)\n", 213 | "print(\"CPU SSSE 3 Capable?: \", end='')\n", 214 | "print(ssse3)" 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": 10, 220 | "metadata": { 221 | "collapsed": false 222 | }, 223 | "outputs": [ 224 | { 225 | "name": "stdout", 226 | "output_type": "stream", 227 | "text": [ 228 | "CPU SSE 4.1 Capable?: True\n" 229 | ] 230 | } 231 | ], 232 | "source": [ 233 | "# Returns True if CPU is SSE4.1 capable\n", 234 | "sse4_1 = cv2.checkHardwareSupport(cv2.CPU_SSE4_1)\n", 235 | "print(\"CPU SSE 4.1 Capable?: \", end='')\n", 236 | "#print(cv2.getNumberOfCPUs())\n", 237 | "print(sse4_1)" 238 | ] 239 | }, 240 | { 241 | "cell_type": "code", 242 | "execution_count": 11, 243 | "metadata": { 244 | "collapsed": false 245 | }, 246 | "outputs": [ 247 | { 248 | "name": "stdout", 249 | "output_type": "stream", 250 | "text": [ 251 | "CPU SSE 4.2 Capable?: True\n" 252 | ] 253 | } 254 | ], 255 | "source": [ 256 | "# Returns True if CPU is SSE4.2 capable\n", 257 | "sse4_2 = cv2.checkHardwareSupport(cv2.CPU_SSE4_2)\n", 258 | "print(\"CPU SSE 4.2 Capable?: \", end='')\n", 259 | "print(sse4_2)" 260 | ] 261 | }, 262 | { 263 | "cell_type": "code", 264 | "execution_count": 12, 265 | "metadata": { 266 | "collapsed": false 267 | }, 268 | "outputs": [ 269 | { 270 | "name": "stdout", 271 | "output_type": "stream", 272 | "text": [ 273 | "CPU POPCNT Capable?: True\n" 274 | ] 275 | } 276 | ], 277 | "source": [ 278 | "# Returns True if CPU is POP capable\n", 279 | "popcnt = cv2.checkHardwareSupport(cv2.CPU_POPCNT)\n", 280 | "print(\"CPU POPCNT Capable?: \", end='')\n", 281 | "print(popcnt)" 282 | ] 283 | }, 284 | { 285 | "cell_type": "code", 286 | "execution_count": 13, 287 | "metadata": { 288 | "collapsed": false 289 | }, 290 | "outputs": [ 291 | { 292 | "name": "stdout", 293 | "output_type": "stream", 294 | "text": [ 295 | "CPU AVX Capable?: True\n" 296 | ] 297 | } 298 | ], 299 | "source": [ 300 | "# Returns True if CPU is AVX capable\n", 301 | "avx = cv2.checkHardwareSupport(cv2.CPU_AVX)\n", 302 | "print(\"CPU AVX Capable?: \", end='')\n", 303 | "print(avx)" 304 | ] 305 | } 306 | ], 307 | "metadata": { 308 | "anaconda-cloud": {}, 309 | "kernelspec": { 310 | "display_name": "Python 2", 311 | "language": "python", 312 | "name": "python2" 313 | }, 314 | "language_info": { 315 | "codemirror_mode": { 316 | "name": "ipython", 317 | "version": 2 318 | }, 319 | "file_extension": ".py", 320 | "mimetype": "text/x-python", 321 | "name": "python", 322 | "nbconvert_exporter": "python", 323 | "pygments_lexer": "ipython2", 324 | "version": "2.7.13" 325 | } 326 | }, 327 | "nbformat": 4, 328 | "nbformat_minor": 1 329 | } 330 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_07_hw_info/ocv_hw_info.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Python 2/3 compatibility 4 | from __future__ import print_function 5 | # Allows use of print like a function in Python 2.x 6 | 7 | # Import the OpenCV2 module 8 | import cv2 9 | 10 | try: 11 | # Returns the number of logical CPUs available 12 | cpu = cv2.getNumberOfCPUs() 13 | #Reset Hardware Optimization Flags 14 | cv2.setUseOptimized(True) 15 | # The functions below return true if the host hardware 16 | # supports the specified feature 17 | # When user calls setUseOptimized(false), 18 | # all the subsequent calls to checkHardwareSupport() will return false 19 | # until setUseOptimized(true) is called. 20 | # This way user can dynamically switch on and off the optimized code in OpenCV. 21 | 22 | # Assign ID to Features as this doesn't seem to be working currently 23 | cv2.CPU_MMX = 1 24 | cv2.CPU_SSE = 2 25 | cv2.CPU_SSE2 = 3 26 | cv2.CPU_SSE3 = 4 27 | cv2.CPU_SSSE3 = 5 28 | cv2.CPU_SSE4_1 = 6 29 | cv2.CPU_SSE4_2 = 7 30 | cv2.CPU_POPCNT = 8 31 | cv2.CPU_AVX = 9 32 | 33 | # Returns True if CPU is MMX capable 34 | mmx = cv2.checkHardwareSupport(cv2.CPU_MMX) 35 | # Returns True if CPU is SSE capable 36 | sse = cv2.checkHardwareSupport(cv2.CPU_SSE) 37 | # Returns True if CPU is SSE2 capable 38 | sse2 = cv2.checkHardwareSupport(cv2.CPU_SSE2) 39 | # Returns True if CPU is SSE3 capable 40 | sse3 = cv2.checkHardwareSupport(cv2.CPU_SSE3) 41 | # Returns True if CPU is SSSE3 capable 42 | ssse3 = cv2.checkHardwareSupport(cv2.CPU_SSSE3) 43 | # Returns True if CPU is SSE4.1 capable 44 | sse4_1 = cv2.checkHardwareSupport(cv2.CPU_SSE4_1) 45 | # Returns True if CPU is SSE4.2 capable 46 | sse4_2 = cv2.checkHardwareSupport(cv2.CPU_SSE4_2) 47 | # Returns True if CPU is POP capable 48 | popcnt = cv2.checkHardwareSupport(cv2.CPU_POPCNT) 49 | # Returns True if CPU is AVX capable 50 | avx = cv2.checkHardwareSupport(cv2.CPU_AVX) 51 | 52 | #Program Output 53 | print() 54 | print('OpenCV - HW Info sample') 55 | print() 56 | 57 | print("Number of logical CPU's Available: ", end='') 58 | #print(cv2.getNumberOfCPUs()) 59 | print(cpu) 60 | print() 61 | print("CPU MMX Capable?: ", end='') 62 | #print(cv2.getNumberOfCPUs()) 63 | print(mmx) 64 | print("CPU SSE Capable?: ", end='') 65 | #print(cv2.getNumberOfCPUs()) 66 | print(sse) 67 | print("CPU SSE 2 Capable?: ", end='') 68 | #print(cv2.getNumberOfCPUs()) 69 | print(sse2) 70 | print("CPU SSE 3 Capable?: ", end='') 71 | #print(cv2.getNumberOfCPUs()) 72 | print(sse3) 73 | print("CPU SSSE 3 Capable?: ", end='') 74 | #print(cv2.getNumberOfCPUs()) 75 | print(ssse3) 76 | print("CPU SSE 4.1 Capable?: ", end='') 77 | #print(cv2.getNumberOfCPUs()) 78 | print(sse4_1) 79 | print("CPU SSE 4.2 Capable?: ", end='') 80 | #print(cv2.getNumberOfCPUs()) 81 | print(sse4_2) 82 | print("CPU POPCNT Capable?: ", end='') 83 | #print(cv2.getNumberOfCPUs()) 84 | print(popcnt) 85 | print("CPU AVX Capable?: ", end='') 86 | #print(cv2.getNumberOfCPUs()) 87 | print(avx) 88 | print() 89 | 90 | except cv2.error as e: 91 | print('Error:') -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_08_DOG_image/README.md: -------------------------------------------------------------------------------- 1 | # OpenCV Tutorial Sample 8: ocv_dog_img 2 | [Sample 08](sample_08/ocv_dog_img.py) is a program that overlays a **Digital On-Screen Graphic (DOG)** or logo onto a still image. DOG is a form of digital watermarking routinely used on broadcast TV to show the TV channel logo. It can also be used on digital signage to watermark content. 3 | In previous samples, we have seen how to overlay text on images and video. This sample shows how to overlay and image on another image. 4 | The logo image or DOG is usually a PNG file that is capable of preserving transparency information, in other words, the alpha channel. 5 | In the interactive tutorial, we will use matplotlib to display some of the intermediate results. 6 | 7 | ## Update: 8 | Launch the interactive tutorial by typing the following command in your comand window: 9 | 10 | ``` 11 | jupyter notebook ./ocv_dog_img.ipynb 12 | ``` 13 | OR 14 | 15 | You may run the script using the command: 16 | 17 | ``` 18 | python ./ocv_dog_img.py 19 | ``` 20 | ## Code Walkthrough: 21 | First we start off with the usual initializations... 22 | 23 | ``` 24 | #!/usr/bin/env python 25 | 26 | # Python 2/3 compatibility 27 | from __future__ import print_function 28 | # Allows use of print like a function in Python 2.x 29 | 30 | # Import OpenCV and other needed Python modules 31 | import numpy as np 32 | import cv2 33 | ``` 34 | 35 | Next load the image to be watermarked. We will call this the source image. For illustrative purposes, we will display this image in a named window called "Source Image". Remember the window will remained grayed out until the event handler **cv2.waitkey()** is called. 36 | 37 | ``` 38 | # Load the source image 39 | img = cv2.imread('Intel_Wall.jpg') 40 | # Create a named window to show the source image 41 | cv2.namedWindow('Source Image', cv2.WINDOW_NORMAL) 42 | # Display the source image 43 | cv2.imshow('Source Image',img) 44 | ``` 45 | Next load the logo image with which the source image will be watermarked. A second named window called "Result Image" will help serve as a placeholder to handle intermediate outputs, resizing and the final image. 46 | ``` 47 | # Load the logo image 48 | dog = cv2.imread('Intel_Logo.png') 49 | # Create a named window to handle intermediate outputs and resizing 50 | cv2.namedWindow('Result Image', cv2.WINDOW_NORMAL) 51 | ``` 52 | The Logo image and source image are not te same size. So we need to first find the size of the logo. We do this using the numpy shape object. 53 | 54 | ``` 55 | # To put logo on top-left corner, create a Region of Interest (ROI) 56 | rows,cols,channels = dog.shape 57 | roi = img[0:rows, 0:cols ] 58 | # Print out the dimensions of the logo... 59 | print(dog.shape) 60 | ``` 61 | **Console Output:** 62 | ``` 63 | (270, 270, 3) 64 | ``` 65 | Now convert the logo image to grayscale for faster processing... Only in the interactive tutorial, we will use matplotlib to display the result. 66 | 67 | ``` 68 | dog_gray = cv2.cvtColor(dog,cv2.COLOR_BGR2GRAY) 69 | ``` 70 | > Note: The next three lines of code below are only to display the intermediate result and not in the script 71 | 72 | ``` 73 | from matplotlib import pyplot as plt 74 | plt.imshow(dog_gray) 75 | plt.show() 76 | ``` 77 | ![Logo in Grayscale](https://github.com/vraoresearch/Intel-Digital-Signage-Reference/blob/master/tutorials/opencv/Python/sample_08/dog_gray.png) 78 | 79 | Next create a mask and inverse mask of the logo image ... 80 | ``` 81 | ret, mask = cv2.threshold(dog_gray, 10, 255, cv2.THRESH_BINARY) 82 | mask_inv = cv2.bitwise_not(mask) 83 | ``` 84 | > Note: The next two lines of code below are only to display the intermediate result and not in the script 85 | 86 | ``` 87 | plt.imshow(mask_inv) 88 | plt.show() 89 | ``` 90 | 91 | ![Logo inverse mask](https://github.com/vraoresearch/Intel-Digital-Signage-Reference/blob/master/tutorials/opencv/Python/sample_08/inv_mask.png) 92 | 93 | Now we blackout the logo within the ROI so that we can extract it from its background. 94 | ``` 95 | img_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) 96 | ``` 97 | Perform the extraction 98 | ``` 99 | dog_fg = cv2.bitwise_and(dog,dog,mask = mask) 100 | ``` 101 | >Note: The next two lines of code below are only to display the intermediate result and not in the script 102 | ``` 103 | plt.imshow(dog_fg) 104 | plt.show() 105 | ``` 106 | ![Logo outline foreground](https://github.com/vraoresearch/Intel-Digital-Signage-Reference/blob/master/tutorials/opencv/Python/sample_08/dog_fg.png) 107 | 108 | Now we add the logo to the source image. We can use the OpenCV **cv2.add()** function. 109 | ``` 110 | dst = cv2.add(img_bg,dog_fg) 111 | img[0:rows, 0:cols ] = dst 112 | ``` 113 | Time to display the result 114 | ``` 115 | # Display the Result 116 | cv2.imshow('Result Image',img) 117 | # Wait until windows are dismissed 118 | cv2.waitKey(0) 119 | ``` 120 | Now release all resources used 121 | ``` 122 | cv2.destroyAllWindows() 123 | ``` 124 | In this example, we used some OpenCV image processing API's to extract the logo from its background. Using the alpha channel or transaprency of the PNG can also be exploited to produce the same effect. You can also reduce the opacity of the logo itself. 125 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_08_DOG_image/dog_fg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/Video-Analytics-OpenCV/7a89c3d4c8bd105a48ab701deda656343e770f4e/tutorials/opencv/Python/sample_08_DOG_image/dog_fg.png -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_08_DOG_image/dog_gray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/Video-Analytics-OpenCV/7a89c3d4c8bd105a48ab701deda656343e770f4e/tutorials/opencv/Python/sample_08_DOG_image/dog_gray.png -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_08_DOG_image/inv_mask.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/Video-Analytics-OpenCV/7a89c3d4c8bd105a48ab701deda656343e770f4e/tutorials/opencv/Python/sample_08_DOG_image/inv_mask.png -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_08_DOG_image/ocv_dog_img.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Python 2/3 compatibility 4 | from __future__ import print_function 5 | # Allows use of print like a function in Python 2.x 6 | 7 | # Import OpenCV and other needed Python modules 8 | import numpy as np 9 | import cv2 10 | 11 | try: 12 | # Digital On-Screen Graphic aka DOG 13 | # Load two images 14 | # First image is the one that needs to be watermarked 15 | # Image Download URL - https://iqglobal.intel.com/iq-content-library/wp-content/uploads/sites/18/2017/04/Blue-Ridge-2-Skyglow-Desktop-Wallpapers.jpg 16 | # Rename as Intel_Wall.jpg 17 | img = cv2.imread('Intel_Wall.jpg') 18 | # Create a named window to show the source image 19 | cv2.namedWindow('Source Image', cv2.WINDOW_NORMAL) 20 | # Display the source image 21 | cv2.imshow('Source Image',img) 22 | # Second image is a watermark. Note that the image is PNG with transparent background. 23 | # https://newsroom.intel.com/wp-content/themes/newsroom/dist/images/favicon/mstile-150x150.png 24 | # Rename as Intel_Logo.png 25 | dog = cv2.imread('Intel_Logo.png') 26 | # Create a named window to handle intermediate outputs and resizing 27 | cv2.namedWindow('Result Image', cv2.WINDOW_NORMAL) 28 | # To put logo on top-left corner, create a Region of Interest (ROI) 29 | rows,cols,channels = dog.shape 30 | roi = img[0:rows, 0:cols ] 31 | # Convert the logo to grayscale 32 | dog_gray = cv2.cvtColor(dog,cv2.COLOR_BGR2GRAY) 33 | # Create a mask of the logo and its inverse mask 34 | ret, mask = cv2.threshold(dog_gray, 10, 255, cv2.THRESH_BINARY) 35 | mask_inv = cv2.bitwise_not(mask) 36 | # Now blackout the area of logo in ROI 37 | img_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) 38 | # Now just extract the logo 39 | dog_fg = cv2.bitwise_and(dog,dog,mask = mask) 40 | # Next add the logo to the source image 41 | dst = cv2.add(img_bg,dog_fg) 42 | img[0:rows, 0:cols ] = dst 43 | # Display the Result 44 | cv2.imshow('Result Image',img) 45 | # Wait until windows are dismissed 46 | cv2.waitKey(0) 47 | # Release all resources used 48 | cv2.destroyAllWindows() 49 | 50 | except cv2.error as e: 51 | print('Please correct OpenCV Error') -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_09_DOG_video/.ipynb_checkpoints/ocv_dog_vid-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample 9: ocv_dog_vid\n", 8 | "\n", 9 | "[Sample 09](ocv_dog_vid.py) is a program that overlays a Digital On-Screen Graphic (DOG) on the video display stream. This program uses the same principles as used for the previous example for still images.\n", 10 | "\n", 11 | "In fact, you can mash sample_04 and sample_08 together to create this sample. It's so simple! The procedure to load and process the image and to extract it from the background is only done once outside of the while loop. This is so you don't slow down the frame rate of the video. \n", 12 | "\n", 13 | "Inside the while loop, all that is done is lacking out the logo area and adding the logo to each frame. Replacing the camera device id with a filename and path in cv2.VideoCapture() function allows you to watermark any video file from disk. You can write the resulting video back to disk with the watermark added using the write() method from cv2.VideoCapture().\n", 14 | "\n", 15 | "Since it is so simple we will just run the program:" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 3, 21 | "metadata": { 22 | "collapsed": false 23 | }, 24 | "outputs": [ 25 | { 26 | "name": "stdout", 27 | "output_type": "stream", 28 | "text": [ 29 | "Sucess: Grabbing the camera\n", 30 | "Quitting ...\n" 31 | ] 32 | } 33 | ], 34 | "source": [ 35 | "#!/usr/bin/env python\n", 36 | "\n", 37 | "# Python 2/3 compatibility\n", 38 | "from __future__ import print_function\n", 39 | "# Allows use of print like a function in Python 2.x\n", 40 | "\n", 41 | "# Import OpenCV and Numpy modules\n", 42 | "import numpy as np\n", 43 | "import cv2\n", 44 | "\n", 45 | " \n", 46 | "try:\n", 47 | " # Create a named window to display video output\n", 48 | " cv2.namedWindow('Watermark', cv2.WINDOW_NORMAL)\n", 49 | " # Load logo image\n", 50 | " dog = cv2.imread('Intel_Logo.png')\n", 51 | " # \n", 52 | " rows,cols,channels = dog.shape\n", 53 | " # Convert the logo to grayscale\n", 54 | " dog_gray = cv2.cvtColor(dog,cv2.COLOR_BGR2GRAY)\n", 55 | " # Create a mask of the logo and its inverse mask\n", 56 | " ret, mask = cv2.threshold(dog_gray, 10, 255, cv2.THRESH_BINARY)\n", 57 | " mask_inv = cv2.bitwise_not(mask)\n", 58 | " # Now just extract the logo\n", 59 | " dog_fg = cv2.bitwise_and(dog,dog,mask = mask)\n", 60 | " # Initialize Default Video Web Camera for capture.\n", 61 | " webcam = cv2.VideoCapture(0)\n", 62 | " # Check if Camera initialized correctly\n", 63 | " success = webcam.isOpened()\n", 64 | " if success == False:\n", 65 | " print('Error: Camera could not be opened')\n", 66 | " else:\n", 67 | " print('Sucess: Grabbing the camera')\n", 68 | " webcam.set(cv2.CAP_PROP_FPS,30);\n", 69 | " webcam.set(cv2.CAP_PROP_FRAME_WIDTH,1024);\n", 70 | " webcam.set(cv2.CAP_PROP_FRAME_HEIGHT,768);\n", 71 | "\n", 72 | " while(True):\n", 73 | " # Read each frame in video stream\n", 74 | " ret, frame = webcam.read()\n", 75 | " # Perform operations on the video frames here\n", 76 | " # To put logo on top-left corner, create a Region of Interest (ROI)\n", 77 | " roi = frame[0:rows, 0:cols ] \n", 78 | " # Now blackout the area of logo in ROI\n", 79 | " frm_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)\n", 80 | " # Next add the logo to each video frame\n", 81 | " dst = cv2.add(frm_bg,dog_fg)\n", 82 | " frame[0:rows, 0:cols ] = dst\n", 83 | " # Overlay Text on the video frame with Exit instructions\n", 84 | " font = cv2.FONT_HERSHEY_SIMPLEX\n", 85 | " cv2.putText(frame, \"Type q to Quit:\",(50,700), font, 1,(255,255,255),2,cv2.LINE_AA)\n", 86 | " # Display the resulting frame\n", 87 | " cv2.imshow('Watermark',frame)\n", 88 | " # Wait for exit key \"q\" to quit\n", 89 | " if cv2.waitKey(1) & 0xFF == ord('q'):\n", 90 | " print('Quitting ...')\n", 91 | " break\n", 92 | "\n", 93 | " # Release all resources used\n", 94 | " webcam.release()\n", 95 | " cv2.destroyAllWindows()\n", 96 | "\n", 97 | "except cv2.error as e:\n", 98 | " print('Please correct OpenCV Error')" 99 | ] 100 | } 101 | ], 102 | "metadata": { 103 | "kernelspec": { 104 | "display_name": "Python 2", 105 | "language": "python", 106 | "name": "python2" 107 | }, 108 | "language_info": { 109 | "codemirror_mode": { 110 | "name": "ipython", 111 | "version": 2 112 | }, 113 | "file_extension": ".py", 114 | "mimetype": "text/x-python", 115 | "name": "python", 116 | "nbconvert_exporter": "python", 117 | "pygments_lexer": "ipython2", 118 | "version": "2.7.13" 119 | } 120 | }, 121 | "nbformat": 4, 122 | "nbformat_minor": 2 123 | } 124 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_09_DOG_video/README.md: -------------------------------------------------------------------------------- 1 | # OpenCV Tutorial Sample 9: ocv_dog_vid 2 | [Sample 09](ocv_dog_vid.py) is a program that overlays a **Digital On-Screen Graphic (DOG)** on the video display stream. 3 | 4 | ## Usage: 5 | Launch the interactive tutorial by typing the following command in your comand window: 6 | 7 | ``` 8 | jupyter notebook ./ocv_dog_vid.ipynb 9 | ``` 10 | OR 11 | 12 | You may run the script using the command: 13 | 14 | ``` 15 | python ./ocv_dog_vid.py 16 | ``` 17 | ## Code Walkthrough: 18 | This program uses the same principles as used for the previous example for still images. 19 | 20 | In fact, you can mash sample_04 and sample_08 together to create this sample. It's so simple! The procedure to load and process the image and to extract it from the background is only done once outside of the while loop. This is so you don't slow down the frame rate of the video. 21 | 22 | Inside the while loop, all that is done is blacking out the logo area and adding the logo to each frame. Replacing the camera device id with a filename and path in **cv2.VideoCapture()** function allows you to watermark any video file from disk. You can write the resulting video back to disk with the watermark added using the **write()** method from **cv2.VideoCapture()**. 23 | 24 | ``` 25 | #!/usr/bin/env python 26 | 27 | # Python 2/3 compatibility 28 | from __future__ import print_function 29 | # Allows use of print like a function in Python 2.x 30 | 31 | # Import OpenCV and Numpy modules 32 | import numpy as np 33 | import cv2 34 | 35 | try: 36 | # Create a named window to display video output 37 | cv2.namedWindow('Watermark', cv2.WINDOW_NORMAL) 38 | # This section is the same from previous Image example. 39 | # Load logo image 40 | dog = cv2.imread('Intel_Logo.png') 41 | # 42 | rows,cols,channels = dog.shape 43 | # Convert the logo to grayscale 44 | dog_gray = cv2.cvtColor(dog,cv2.COLOR_BGR2GRAY) 45 | # Create a mask of the logo and its inverse mask 46 | ret, mask = cv2.threshold(dog_gray, 10, 255, cv2.THRESH_BINARY) 47 | mask_inv = cv2.bitwise_not(mask) 48 | # Now just extract the logo 49 | dog_fg = cv2.bitwise_and(dog,dog,mask = mask) 50 | 51 | # Initialize Default Video Web Camera for capture. 52 | webcam = cv2.VideoCapture(0) 53 | # Check if Camera initialized correctly 54 | success = webcam.isOpened() 55 | if success == False: 56 | print('Error: Camera could not be opened') 57 | else: 58 | print('Sucess: Grabbing the camera') 59 | webcam.set(cv2.CAP_PROP_FPS,30); 60 | webcam.set(cv2.CAP_PROP_FRAME_WIDTH,1024); 61 | webcam.set(cv2.CAP_PROP_FRAME_HEIGHT,768); 62 | 63 | while(True): 64 | # Read each frame in video stream 65 | ret, frame = webcam.read() 66 | # Perform operations on the video frames here 67 | # To put logo on top-left corner, create a Region of Interest (ROI) 68 | roi = frame[0:rows, 0:cols ] 69 | # Now blackout the area of logo in ROI 70 | frm_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) 71 | # Next add the logo to each video frame 72 | dst = cv2.add(frm_bg,dog_fg) 73 | frame[0:rows, 0:cols ] = dst 74 | # Overlay Text on the video frame with Exit instructions 75 | font = cv2.FONT_HERSHEY_SIMPLEX 76 | cv2.putText(frame, "Type q to Quit:",(50,700), font, 1,(255,255,255),2,cv2.LINE_AA) 77 | # Display the resulting frame 78 | cv2.imshow('Watermark',frame) 79 | # Wait for exit key "q" to quit 80 | if cv2.waitKey(1) & 0xFF == ord('q'): 81 | print('Quitting ...') 82 | break 83 | 84 | # Release all resources used 85 | webcam.release() 86 | cv2.destroyAllWindows() 87 | 88 | except cv2.error as e: 89 | print('Please correct OpenCV Error') 90 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_09_DOG_video/ocv_dog_vid.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample 9: ocv_dog_vid\n", 8 | "\n", 9 | "[Sample 09](ocv_dog_vid.py) is a program that overlays a Digital On-Screen Graphic (DOG) on the video display stream. This program uses the same principles as used for the previous example for still images.\n", 10 | "\n", 11 | "In fact, you can mash sample_04 and sample_08 together to create this sample. It's so simple! The procedure to load and process the image and to extract it from the background is only done once outside of the while loop. This is so you don't slow down the frame rate of the video. \n", 12 | "\n", 13 | "Inside the while loop, all that is done is blacking out the logo area and adding the logo to each frame. Replacing the camera device id with a filename and path in cv2.VideoCapture() function allows you to watermark any video file from disk. You can write the resulting video back to disk with the watermark added using the write() method from cv2.VideoCapture().\n", 14 | "\n", 15 | "Since it is so simple we will just run the program:" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 3, 21 | "metadata": { 22 | "collapsed": false 23 | }, 24 | "outputs": [ 25 | { 26 | "name": "stdout", 27 | "output_type": "stream", 28 | "text": [ 29 | "Sucess: Grabbing the camera\n", 30 | "Quitting ...\n" 31 | ] 32 | } 33 | ], 34 | "source": [ 35 | "#!/usr/bin/env python\n", 36 | "\n", 37 | "# Python 2/3 compatibility\n", 38 | "from __future__ import print_function\n", 39 | "# Allows use of print like a function in Python 2.x\n", 40 | "\n", 41 | "# Import OpenCV and Numpy modules\n", 42 | "import numpy as np\n", 43 | "import cv2\n", 44 | "\n", 45 | " \n", 46 | "try:\n", 47 | " # Create a named window to display video output\n", 48 | " cv2.namedWindow('Watermark', cv2.WINDOW_NORMAL)\n", 49 | " # Load logo image\n", 50 | " dog = cv2.imread('Intel_Logo.png')\n", 51 | " # \n", 52 | " rows,cols,channels = dog.shape\n", 53 | " # Convert the logo to grayscale\n", 54 | " dog_gray = cv2.cvtColor(dog,cv2.COLOR_BGR2GRAY)\n", 55 | " # Create a mask of the logo and its inverse mask\n", 56 | " ret, mask = cv2.threshold(dog_gray, 10, 255, cv2.THRESH_BINARY)\n", 57 | " mask_inv = cv2.bitwise_not(mask)\n", 58 | " # Now just extract the logo\n", 59 | " dog_fg = cv2.bitwise_and(dog,dog,mask = mask)\n", 60 | " # Initialize Default Video Web Camera for capture.\n", 61 | " webcam = cv2.VideoCapture(0)\n", 62 | " # Check if Camera initialized correctly\n", 63 | " success = webcam.isOpened()\n", 64 | " if success == False:\n", 65 | " print('Error: Camera could not be opened')\n", 66 | " else:\n", 67 | " print('Sucess: Grabbing the camera')\n", 68 | " webcam.set(cv2.CAP_PROP_FPS,30);\n", 69 | " webcam.set(cv2.CAP_PROP_FRAME_WIDTH,1024);\n", 70 | " webcam.set(cv2.CAP_PROP_FRAME_HEIGHT,768);\n", 71 | "\n", 72 | " while(True):\n", 73 | " # Read each frame in video stream\n", 74 | " ret, frame = webcam.read()\n", 75 | " # Perform operations on the video frames here\n", 76 | " # To put logo on top-left corner, create a Region of Interest (ROI)\n", 77 | " roi = frame[0:rows, 0:cols ] \n", 78 | " # Now blackout the area of logo in ROI\n", 79 | " frm_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)\n", 80 | " # Next add the logo to each video frame\n", 81 | " dst = cv2.add(frm_bg,dog_fg)\n", 82 | " frame[0:rows, 0:cols ] = dst\n", 83 | " # Overlay Text on the video frame with Exit instructions\n", 84 | " font = cv2.FONT_HERSHEY_SIMPLEX\n", 85 | " cv2.putText(frame, \"Type q to Quit:\",(50,700), font, 1,(255,255,255),2,cv2.LINE_AA)\n", 86 | " # Display the resulting frame\n", 87 | " cv2.imshow('Watermark',frame)\n", 88 | " # Wait for exit key \"q\" to quit\n", 89 | " if cv2.waitKey(1) & 0xFF == ord('q'):\n", 90 | " print('Quitting ...')\n", 91 | " break\n", 92 | "\n", 93 | " # Release all resources used\n", 94 | " webcam.release()\n", 95 | " cv2.destroyAllWindows()\n", 96 | "\n", 97 | "except cv2.error as e:\n", 98 | " print('Please correct OpenCV Error')" 99 | ] 100 | } 101 | ], 102 | "metadata": { 103 | "kernelspec": { 104 | "display_name": "Python 2", 105 | "language": "python", 106 | "name": "python2" 107 | }, 108 | "language_info": { 109 | "codemirror_mode": { 110 | "name": "ipython", 111 | "version": 2 112 | }, 113 | "file_extension": ".py", 114 | "mimetype": "text/x-python", 115 | "name": "python", 116 | "nbconvert_exporter": "python", 117 | "pygments_lexer": "ipython2", 118 | "version": "2.7.13" 119 | } 120 | }, 121 | "nbformat": 4, 122 | "nbformat_minor": 2 123 | } 124 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_09_DOG_video/ocv_dog_vid.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Python 2/3 compatibility 4 | from __future__ import print_function 5 | # Allows use of print like a function in Python 2.x 6 | 7 | # Import OpenCV and Numpy modules 8 | import numpy as np 9 | import cv2 10 | 11 | 12 | try: 13 | # Create a named window to display video output 14 | cv2.namedWindow('Watermark', cv2.WINDOW_NORMAL) 15 | # Load logo image 16 | dog = cv2.imread('Intel_Logo.png') 17 | # 18 | rows,cols,channels = dog.shape 19 | # Convert the logo to grayscale 20 | dog_gray = cv2.cvtColor(dog,cv2.COLOR_BGR2GRAY) 21 | # Create a mask of the logo and its inverse mask 22 | ret, mask = cv2.threshold(dog_gray, 10, 255, cv2.THRESH_BINARY) 23 | mask_inv = cv2.bitwise_not(mask) 24 | # Now just extract the logo 25 | dog_fg = cv2.bitwise_and(dog,dog,mask = mask) 26 | # Initialize Default Video Web Camera for capture. 27 | webcam = cv2.VideoCapture(0) 28 | # Check if Camera initialized correctly 29 | success = webcam.isOpened() 30 | if success == False: 31 | print('Error: Camera could not be opened') 32 | else: 33 | print('Sucess: Grabbing the camera') 34 | webcam.set(cv2.CAP_PROP_FPS,30); 35 | webcam.set(cv2.CAP_PROP_FRAME_WIDTH,1024); 36 | webcam.set(cv2.CAP_PROP_FRAME_HEIGHT,768); 37 | 38 | while(True): 39 | # Read each frame in video stream 40 | ret, frame = webcam.read() 41 | # Perform operations on the video frames here 42 | # To put logo on top-left corner, create a Region of Interest (ROI) 43 | roi = frame[0:rows, 0:cols ] 44 | # Now blackout the area of logo in ROI 45 | frm_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) 46 | # Next add the logo to each video frame 47 | dst = cv2.add(frm_bg,dog_fg) 48 | frame[0:rows, 0:cols ] = dst 49 | # Overlay Text on the video frame with Exit instructions 50 | font = cv2.FONT_HERSHEY_SIMPLEX 51 | cv2.putText(frame, "Type q to Quit:",(50,700), font, 1,(255,255,255),2,cv2.LINE_AA) 52 | # Display the resulting frame 53 | # Display the resulting frame 54 | cv2.imshow('Watermark',frame) 55 | # Wait for exit key "q" to quit 56 | if cv2.waitKey(1) & 0xFF == ord('q'): 57 | break 58 | 59 | # Release all resources used 60 | webcam.release() 61 | cv2.destroyAllWindows() 62 | 63 | except cv2.error as e: 64 | print('Please correct OpenCV Error') -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_10_face_eye_detection_still/.ipynb_checkpoints/ocv_face_img-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample 10: ocv_face_img\n", 8 | "\n", 9 | "[Sample 10](ocv_face_img.py) is a basic Face and Eye Detection program that uses OpenCV to analyze an image and detect human faces and eyes. The detected areas or Regions of Interest (ROI) are demarcated with rectangles. The program uses the OpenCV built-in pre-trained Haar feature-based cascade classifiers in order to perform this task.\n", 10 | "\n", 11 | "### What are Cascade Classifiers?\n", 12 | "\n", 13 | "Cascade Classifiers are a form of ensemble learning systems. Such systems use a collection of a large number of simple classifiers in a cascade. This leads to accurate yet computationally efficient detection systems.\n", 14 | "\n", 15 | "### What are Haar feature-based Cascade Classifiers?\n", 16 | "\n", 17 | "Haar features are named after Haar wavelets in mathematics. The are patterns in the pixel values of an image such as edges, lines and neighbors that are used with a windowing technique to extract features from an image. Since the features could be different, a collection of specialized but simple pattern classifiers are used in a cascade to perform the feature detection.\n", 18 | "\n", 19 | "### References:\n", 20 | "\n", 21 | "1. Rapid Object Detection using a Boosted Cascade of Simple Features [pdf](http://wearables.cc.gatech.edu/paper_of_week/viola01rapid.pdf) \n", 22 | " [_This is the original paper by Prof. Viola and Prof. Jones_]\n", 23 | "2. An Analysis of the Viola-Jones Face Detection Algorithm [pdf](http://www.ipol.im/pub/art/2014/104/article.pdf)\n", 24 | "3. A review on Face Detection and study of Viola Jones method [pdf](http://www.ijcttjournal.org/2015/Volume25/number-1/IJCTT-V25P110.pdf)\n", 25 | "4. Explaining AdaBoost [pdf](http://rob.schapire.net/papers/explaining-adaboost.pdf)\n", 26 | "5. Face detection using Haar Cascades [Tutorial link](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html)\n", 27 | "\n", 28 | "## Sample Code\n", 29 | "\n", 30 | "First we do the usual initializations ..." 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 1, 36 | "metadata": { 37 | "collapsed": true 38 | }, 39 | "outputs": [], 40 | "source": [ 41 | "from __future__ import print_function\n", 42 | "#!/usr/bin/env python\n", 43 | "\n", 44 | "# Python 2/3 compatibility\n", 45 | "from __future__ import print_function\n", 46 | "# Allows use of print like a function in Python 2.x\n", 47 | "\n", 48 | "# Import the Numby and OpenCV2 Python modules\n", 49 | "import numpy as np\n", 50 | "import cv2" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "Select the pre-trained Haar Cascade Classifier file to use for face and eye detection respectively and pass it to the OpenCV API [cv2.CascadeClassifier()](http://docs.opencv.org/3.0-last-rst/modules/objdetect/doc/cascade_classification.html#cv2.CascadeClassifier)" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 2, 63 | "metadata": { 64 | "collapsed": true 65 | }, 66 | "outputs": [], 67 | "source": [ 68 | "# This section selects the Haar Cascade Classifer File to use\n", 69 | "# Ensure that the path to the xml files are correct\n", 70 | "# In this example, the files have been copied to the local folder\n", 71 | "face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n", 72 | "eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')" 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "metadata": {}, 78 | "source": [ 79 | "Next load an image to analyze. Several examples are provided. Make sure that only one cv2.imread() command is active and all the rest are commented out. The example images have all been copied to the local folder." 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 3, 85 | "metadata": { 86 | "collapsed": true 87 | }, 88 | "outputs": [], 89 | "source": [ 90 | "img = cv2.imread('brian-krzanich_2.jpg')\n", 91 | "#img = cv2.imread('Intel_Board_of_Directors.jpg')\n", 92 | "#img = cv2.imread('bmw-group-intel-mobileye-3.jpg')" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "metadata": {}, 98 | "source": [ 99 | "Now convert the image to Grayscale to make it easier to process" 100 | ] 101 | }, 102 | { 103 | "cell_type": "code", 104 | "execution_count": 4, 105 | "metadata": { 106 | "collapsed": true 107 | }, 108 | "outputs": [], 109 | "source": [ 110 | "gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)" 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "metadata": {}, 116 | "source": [ 117 | "The [detectMultiScale](http://docs.opencv.org/3.0-last-rst/modules/objdetect/doc/cascade_classification.html#cv2.CascadeClassifier.detectMultiScale) method of the OpenCV Cascade Classifier API detects features of different sizes in the input image. The detected objects are returned as a list of rectangles.\n", 118 | "\n", 119 | "cv2.CascadeClassifier.detectMultiScale(image[,scaleFactor[,minNeighbors[,flags[,minSize[,maxSize]]]]]) -> objects" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": 5, 125 | "metadata": { 126 | "collapsed": true 127 | }, 128 | "outputs": [], 129 | "source": [ 130 | "faces = face_cascade.detectMultiScale(gray, 1.3, 5)" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "Draw the rectangles around detected Regions of Interest [ROI], namely faces amd eyes using [cv2.rectangle()](http://docs.opencv.org/3.0-last-rst/modules/imgproc/doc/drawing_functions.html#cv2.rectangle) for all detected objects in the image returned by the classifiers.\n", 138 | "\n", 139 | "cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img\n", 140 | "\n", 141 | ">Note: Since the eyes are a part of the face, we nest the classifier for the eyes. So we only look for eyes in areas identified as the face. This improves the accuracy." 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": 6, 147 | "metadata": { 148 | "collapsed": true 149 | }, 150 | "outputs": [], 151 | "source": [ 152 | "# Draw the rectangles around detected Regions of Interest [ROI] - faces\n", 153 | "for (x,y,w,h) in faces:\n", 154 | " cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n", 155 | " roi_gray = gray[y:y+h, x:x+w]\n", 156 | " roi_color = img[y:y+h, x:x+w]\n", 157 | " # Since eyes are a part of face, limit eye detection to face regions to improve accuracy\n", 158 | " eyes = eye_cascade.detectMultiScale(roi_gray)\n", 159 | " for (ex,ey,ew,eh) in eyes:\n", 160 | " # Draw the rectangles around detected Regions of Interest [ROI] - eyes\n", 161 | " cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": {}, 167 | "source": [ 168 | "Finally display the result until dismissed and release all reseources used." 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": 7, 174 | "metadata": { 175 | "collapsed": true 176 | }, 177 | "outputs": [], 178 | "source": [ 179 | "# Display the result \n", 180 | "cv2.imshow('img',img)\n", 181 | "# Show image until dismissed using GUI exit window\n", 182 | "cv2.waitKey(0)\n", 183 | "# Release all resources used\n", 184 | "cv2.destroyAllWindows()" 185 | ] 186 | } 187 | ], 188 | "metadata": { 189 | "kernelspec": { 190 | "display_name": "Python 2", 191 | "language": "python", 192 | "name": "python2" 193 | }, 194 | "language_info": { 195 | "codemirror_mode": { 196 | "name": "ipython", 197 | "version": 2 198 | }, 199 | "file_extension": ".py", 200 | "mimetype": "text/x-python", 201 | "name": "python", 202 | "nbconvert_exporter": "python", 203 | "pygments_lexer": "ipython2", 204 | "version": "2.7.13" 205 | } 206 | }, 207 | "nbformat": 4, 208 | "nbformat_minor": 2 209 | } 210 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_10_face_eye_detection_still/Intel_Board_of_Directors.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/Video-Analytics-OpenCV/7a89c3d4c8bd105a48ab701deda656343e770f4e/tutorials/opencv/Python/sample_10_face_eye_detection_still/Intel_Board_of_Directors.jpg -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_10_face_eye_detection_still/README.md: -------------------------------------------------------------------------------- 1 | # OpenCV Tutorial Sample 10: ocv_face_img 2 | 3 | [Sample 10](ocv_face_img.py) is a basic Face and Eye Detection program that uses OpenCV to analyze an image and detect human faces and eyes. The detected areas or Regions of Interest (ROI) are demarcated with rectangles. The program uses the OpenCV built-in pre-trained Haar feature-based cascade classifiers in order to perform this task. 4 | 5 | ## What are Cascade Classifiers? 6 | Cascade Classifiers are a form of ensemble learning systems. Such systems use a collection of a large number of simple classifiers in a cascade. This leads to accurate yet computationally efficient detection systems. 7 | 8 | ## What are Haar feature-based Cascade Classifiers? 9 | Haar features are named after Haar wavelets in mathematics. The are patterns in the pixel values of an image such as edges, lines and neighbors that are used with a windowing technique to extract features from an image. Since the features could be different, a collection of specialized but simple pattern classifiers are used in a cascade to perform the feature detection. 10 | 11 | The Viola–Jones object detection framework was the first object detection framework using these Haar features to provide acceptable object detection rates in real-time and were proposed in 2001 by Prof. Paul Viola and Prof. Michael Jones. 12 | 13 | ## References: 14 | 1. Rapid Object Detection using a Boosted Cascade of Simple Features [pdf](http://wearables.cc.gatech.edu/paper_of_week/viola01rapid.pdf) 15 | [_This is the original paper by Prof. Viola and Prof. Jones_] 16 | 2. An Analysis of the Viola-Jones Face Detection Algorithm [pdf](http://www.ipol.im/pub/art/2014/104/article.pdf) 17 | 3. A review on Face Detection and study of Viola Jones method [pdf](http://www.ijcttjournal.org/2015/Volume25/number-1/IJCTT-V25P110.pdf) 18 | 4. Explaining AdaBoost [pdf](http://rob.schapire.net/papers/explaining-adaboost.pdf) 19 | 5. Face detection using Haar Cascades [Tutorial link](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) 20 | 21 | ## Usage: 22 | Launch the interactive tutorial by typing the following command in your comand window: 23 | 24 | ``` 25 | jupyter notebook ./ocv_face_img.ipynb 26 | ``` 27 | OR 28 | 29 | You may run the script using the command: 30 | 31 | ``` 32 | python ./ocv_face_img.py 33 | ``` 34 | ## Code Walkthrough: 35 | 36 | First we do the usual initializations ... 37 | ``` 38 | from __future__ import print_function 39 | #!/usr/bin/env python 40 | 41 | # Python 2/3 compatibility 42 | from __future__ import print_function 43 | # Allows use of print like a function in Python 2.x 44 | 45 | # Import the Numby and OpenCV2 Python modules 46 | import numpy as np 47 | import cv2 48 | ``` 49 | Select the pre-trained Haar Cascade Classifier file to use for face and eye detection respectively and pass it to the OpenCV API cv2.CascadeClassifier() 50 | 51 | This section selects the Haar Cascade Classifer File to use. Ensure that the path to the xml files are correct. In this example, the files have been copied to the local folder 52 | 53 | ``` 54 | face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 55 | eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') 56 | ``` 57 | Next load an image to analyze. Several examples are provided. Make sure that only one **cv2.imread()** command is active and all the rest are commented out. These example images have all been copied to the local folder. 58 | ``` 59 | img = cv2.imread('brian-krzanich_2.jpg') 60 | #img = cv2.imread('Intel_Board_of_Directors.jpg') 61 | #img = cv2.imread('bmw-group-intel-mobileye-3.jpg') 62 | ``` 63 | Now convert the image to Grayscale to make it easier to process 64 | ``` 65 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 66 | ``` 67 | The detectMultiScale method of the OpenCV Cascade Classifier API detects features of different sizes in the input image. The detected objects are returned as a list of rectangles. 68 | 69 | **cv2.CascadeClassifier.detectMultiScale(image[,scaleFactor[,minNeighbors[,flags[,minSize[,maxSize]]]]]) -> objects** 70 | ``` 71 | faces = face_cascade.detectMultiScale(gray, 1.3, 5) 72 | ``` 73 | Draw the rectangles around detected Regions of Interest [ROI], namely faces amd eyes using cv2.rectangle() for all detected objects in the image returned by the classifiers. 74 | ``` 75 | cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img 76 | ``` 77 | >Note: Since the eyes are a part of the face, we nest the classifier for the eyes. So we only look for eyes in areas identified as the face. This improves the accuracy. 78 | 79 | Next, draw the rectangles around detected Regions of Interest [ROI] - faces 80 | ``` 81 | for (x,y,w,h) in faces: 82 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 83 | roi_gray = gray[y:y+h, x:x+w] 84 | roi_color = img[y:y+h, x:x+w] 85 | ``` 86 | 87 | Since eyes are a part of face, limit eye detection to face regions to improve accuracy 88 | 89 | ``` 90 | eyes = eye_cascade.detectMultiScale(roi_gray) 91 | for (ex,ey,ew,eh) in eyes: 92 | # Draw the rectangles around detected Regions of Interest [ROI] - eyes 93 | cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) 94 | ``` 95 | Finally display the result until dismissed and release all reseources used. 96 | ``` 97 | cv2.imshow('img',img) 98 | # Show image until dismissed using GUI exit window 99 | cv2.waitKey(0) 100 | # Release all resources used 101 | cv2.destroyAllWindows() 102 | ``` 103 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_10_face_eye_detection_still/bmw-group-intel-mobileye-3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/Video-Analytics-OpenCV/7a89c3d4c8bd105a48ab701deda656343e770f4e/tutorials/opencv/Python/sample_10_face_eye_detection_still/bmw-group-intel-mobileye-3.jpg -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_10_face_eye_detection_still/brian-krzanich_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/Video-Analytics-OpenCV/7a89c3d4c8bd105a48ab701deda656343e770f4e/tutorials/opencv/Python/sample_10_face_eye_detection_still/brian-krzanich_2.jpg -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_10_face_eye_detection_still/intel-and-nervana.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/Video-Analytics-OpenCV/7a89c3d4c8bd105a48ab701deda656343e770f4e/tutorials/opencv/Python/sample_10_face_eye_detection_still/intel-and-nervana.jpg -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_10_face_eye_detection_still/ocv_face_img.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample 10: ocv_face_img\n", 8 | "\n", 9 | "[Sample 10](ocv_face_img.py) is a basic Face and Eye Detection program that uses OpenCV to analyze an image and detect human faces and eyes. The detected areas or Regions of Interest (ROI) are demarcated with rectangles. The program uses the OpenCV built-in pre-trained Haar feature-based cascade classifiers in order to perform this task.\n", 10 | "\n", 11 | "### What are Cascade Classifiers?\n", 12 | "\n", 13 | "Cascade Classifiers are a form of ensemble learning systems. Such systems use a collection of a large number of simple classifiers in a cascade. This leads to accurate yet computationally efficient detection systems.\n", 14 | "\n", 15 | "### What are Haar feature-based Cascade Classifiers?\n", 16 | "\n", 17 | "Haar features are named after Haar wavelets in mathematics. The are patterns in the pixel values of an image such as edges, lines and neighbors that are used with a windowing technique to extract features from an image. Since the features could be different, a collection of specialized but simple pattern classifiers are used in a cascade to perform the feature detection.\n", 18 | "\n", 19 | "### References:\n", 20 | "\n", 21 | "1. Rapid Object Detection using a Boosted Cascade of Simple Features [pdf](http://wearables.cc.gatech.edu/paper_of_week/viola01rapid.pdf) \n", 22 | " [_This is the original paper by Prof. Viola and Prof. Jones_]\n", 23 | "2. An Analysis of the Viola-Jones Face Detection Algorithm [pdf](http://www.ipol.im/pub/art/2014/104/article.pdf)\n", 24 | "3. A review on Face Detection and study of Viola Jones method [pdf](http://www.ijcttjournal.org/2015/Volume25/number-1/IJCTT-V25P110.pdf)\n", 25 | "4. Explaining AdaBoost [pdf](http://rob.schapire.net/papers/explaining-adaboost.pdf)\n", 26 | "5. Face detection using Haar Cascades [Tutorial link](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html)\n", 27 | "\n", 28 | "## Sample Code\n", 29 | "\n", 30 | "First we do the usual initializations ..." 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 1, 36 | "metadata": { 37 | "collapsed": true 38 | }, 39 | "outputs": [], 40 | "source": [ 41 | "from __future__ import print_function\n", 42 | "#!/usr/bin/env python\n", 43 | "\n", 44 | "# Python 2/3 compatibility\n", 45 | "from __future__ import print_function\n", 46 | "# Allows use of print like a function in Python 2.x\n", 47 | "\n", 48 | "# Import the Numby and OpenCV2 Python modules\n", 49 | "import numpy as np\n", 50 | "import cv2" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "Select the pre-trained Haar Cascade Classifier file to use for face and eye detection respectively and pass it to the OpenCV API [cv2.CascadeClassifier()](http://docs.opencv.org/3.0-last-rst/modules/objdetect/doc/cascade_classification.html#cv2.CascadeClassifier)" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 2, 63 | "metadata": { 64 | "collapsed": true 65 | }, 66 | "outputs": [], 67 | "source": [ 68 | "# This section selects the Haar Cascade Classifer File to use\n", 69 | "# Ensure that the path to the xml files are correct\n", 70 | "# In this example, the files have been copied to the local folder\n", 71 | "face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n", 72 | "eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')" 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "metadata": {}, 78 | "source": [ 79 | "Next load an image to analyze. Several examples are provided. Make sure that only one cv2.imread() command is active and all the rest are commented out. The example images have all been copied to the local folder." 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 3, 85 | "metadata": { 86 | "collapsed": true 87 | }, 88 | "outputs": [], 89 | "source": [ 90 | "img = cv2.imread('brian-krzanich_2.jpg')\n", 91 | "#img = cv2.imread('Intel_Board_of_Directors.jpg')\n", 92 | "#img = cv2.imread('bmw-group-intel-mobileye-3.jpg')" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "metadata": {}, 98 | "source": [ 99 | "Now convert the image to Grayscale to make it easier to process" 100 | ] 101 | }, 102 | { 103 | "cell_type": "code", 104 | "execution_count": 4, 105 | "metadata": { 106 | "collapsed": true 107 | }, 108 | "outputs": [], 109 | "source": [ 110 | "gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)" 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "metadata": {}, 116 | "source": [ 117 | "The [detectMultiScale](http://docs.opencv.org/3.0-last-rst/modules/objdetect/doc/cascade_classification.html#cv2.CascadeClassifier.detectMultiScale) method of the OpenCV Cascade Classifier API detects features of different sizes in the input image. The detected objects are returned as a list of rectangles.\n", 118 | "\n", 119 | "cv2.CascadeClassifier.detectMultiScale(image[,scaleFactor[,minNeighbors[,flags[,minSize[,maxSize]]]]]) -> objects" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": 5, 125 | "metadata": { 126 | "collapsed": true 127 | }, 128 | "outputs": [], 129 | "source": [ 130 | "faces = face_cascade.detectMultiScale(gray, 1.3, 5)" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "Draw the rectangles around detected Regions of Interest [ROI], namely faces amd eyes using [cv2.rectangle()](http://docs.opencv.org/3.0-last-rst/modules/imgproc/doc/drawing_functions.html#cv2.rectangle) for all detected objects in the image returned by the classifiers.\n", 138 | "\n", 139 | "cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img\n", 140 | "\n", 141 | ">Note: Since the eyes are a part of the face, we nest the classifier for the eyes. So we only look for eyes in areas identified as the face. This improves the accuracy." 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": 6, 147 | "metadata": { 148 | "collapsed": true 149 | }, 150 | "outputs": [], 151 | "source": [ 152 | "# Draw the rectangles around detected Regions of Interest [ROI] - faces\n", 153 | "for (x,y,w,h) in faces:\n", 154 | " cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n", 155 | " roi_gray = gray[y:y+h, x:x+w]\n", 156 | " roi_color = img[y:y+h, x:x+w]\n", 157 | " # Since eyes are a part of face, limit eye detection to face regions to improve accuracy\n", 158 | " eyes = eye_cascade.detectMultiScale(roi_gray)\n", 159 | " for (ex,ey,ew,eh) in eyes:\n", 160 | " # Draw the rectangles around detected Regions of Interest [ROI] - eyes\n", 161 | " cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": {}, 167 | "source": [ 168 | "Finally display the result until dismissed and release all reseources used." 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": 7, 174 | "metadata": { 175 | "collapsed": true 176 | }, 177 | "outputs": [], 178 | "source": [ 179 | "# Display the result \n", 180 | "cv2.imshow('img',img)\n", 181 | "# Show image until dismissed using GUI exit window\n", 182 | "cv2.waitKey(0)\n", 183 | "# Release all resources used\n", 184 | "cv2.destroyAllWindows()" 185 | ] 186 | } 187 | ], 188 | "metadata": { 189 | "kernelspec": { 190 | "display_name": "Python 2", 191 | "language": "python", 192 | "name": "python2" 193 | }, 194 | "language_info": { 195 | "codemirror_mode": { 196 | "name": "ipython", 197 | "version": 2 198 | }, 199 | "file_extension": ".py", 200 | "mimetype": "text/x-python", 201 | "name": "python", 202 | "nbconvert_exporter": "python", 203 | "pygments_lexer": "ipython2", 204 | "version": "2.7.13" 205 | } 206 | }, 207 | "nbformat": 4, 208 | "nbformat_minor": 2 209 | } 210 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_10_face_eye_detection_still/ocv_face_img.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | #!/usr/bin/env python 3 | 4 | # Python 2/3 compatibility 5 | from __future__ import print_function 6 | # Allows use of print like a function in Python 2.x 7 | 8 | # Import the Numby and OpenCV2 Python modules 9 | import numpy as np 10 | import cv2 11 | 12 | # This section selects the Haar Cascade Classifer File to use 13 | # Ensure that the path to the xml files are correct 14 | # In this example, the files have been copied to the local folder 15 | face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 16 | eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') 17 | 18 | # Some Additional Samples - Only one imread should be active. So be sure to comment out all others. 19 | # Image download URL - http://blogs.intel.com/iot/files/2016/04/ASBRparticpants.jpg 20 | #img = cv2.imread('ASBRparticpants.jpg') 21 | # Image download URL - http://blogs.intel.com/iot/files/2016/09/bmw-group-intel-mobileye-3.jpg 22 | #img = cv2.imread('bmw-group-intel-mobileye-3.jpg') 23 | # Image download URL - https://upload.wikimedia.org/wikipedia/commons/1/17/Intel_Board_of_Directors.jpg 24 | #img = cv2.imread('Intel_Board_of_Directors.jpg') 25 | try: 26 | # Image download URL - https://simplecore.intel.com/newsroom/wp-content/uploads/sites/11/brian-krzanich_2.jpg 27 | # Load image to analyze 28 | img = cv2.imread('brian-krzanich_2.jpg') 29 | # Converion to Grayscale 30 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 31 | 32 | # Face detection using Haar Cascades 33 | # Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles. 34 | # cv2.CascadeClassifier.detectMultiScale(image[,scaleFactor[,minNeighbors[,flags[,minSize[,maxSize]]]]]) -> objects 35 | faces = face_cascade.detectMultiScale(gray, 1.3, 5) 36 | # Draw the rectangles around detected Regions of Interest [ROI] - faces 37 | # cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img 38 | for (x,y,w,h) in faces: 39 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 40 | roi_gray = gray[y:y+h, x:x+w] 41 | roi_color = img[y:y+h, x:x+w] 42 | # Since eyes are a part of face, limit eye detection to face regions to improve accuracy 43 | eyes = eye_cascade.detectMultiScale(roi_gray) 44 | for (ex,ey,ew,eh) in eyes: 45 | # Draw the rectangles around detected Regions of Interest [ROI] - eyes 46 | cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) 47 | 48 | # Display the result 49 | cv2.imshow('img',img) 50 | # Show image until dismissed using GUI exit window 51 | cv2.waitKey(0) 52 | # Release all resources used 53 | cv2.destroyAllWindows() 54 | 55 | 56 | except cv2.error as e: 57 | print('Please correct OpenCV Error') -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_11_real-time_face_detection_and_tracking/.ipynb_checkpoints/ocv_face_vid-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample 11: ocv_face_vid\n", 8 | "\n", 9 | "[Sample 11](sample_11/ocv_face_vid.py) is a basic Face and Eye Detection program that uses OpenCV to analyze real-time video and detect human faces and eyes. The detected areas or Regions of Interest (ROI) are demarcated with rectangles. The program uses the OpenCV built-in pre-trained Haar feature-based cascade classifiers in order to perform this task.\n", 10 | "\n", 11 | "This sample uses the same basic procedures from the previous samples to detect faces and eyes in real-time video. The detection is performed on every video frame.\n", 12 | "\n", 13 | "The OS and SYS modules are also loaded in this sample in order to automatically locate the OpenCV libraries and use the Haar Cascade Classifier files.\n", 14 | "\n", 15 | "Here is the initialization code." 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 1, 21 | "metadata": { 22 | "collapsed": false 23 | }, 24 | "outputs": [ 25 | { 26 | "name": "stdout", 27 | "output_type": "stream", 28 | "text": [ 29 | "Face and Eyes Tracker for Real-Time Video\n", 30 | "Type Esc to Exit Program ...\n", 31 | "C:\\opencv_pre\\\n" 32 | ] 33 | } 34 | ], 35 | "source": [ 36 | "#!/usr/bin/env python2\n", 37 | "\n", 38 | "# Python 2/3 compatibility\n", 39 | "from __future__ import print_function\n", 40 | "# Allows use of print like a function in Python 2.x\n", 41 | "\n", 42 | "# Import Python modules\n", 43 | "import numpy as np\n", 44 | "import cv2\n", 45 | "import sys\n", 46 | "import os\n", 47 | "print('Face and Eyes Tracker for Real-Time Video')\n", 48 | "print('Type Esc to Exit Program ...')\n", 49 | "try:\n", 50 | " # Checks to see if OpenCV can be found\n", 51 | " ocv = os.getenv(\"OPENCV_DIR\")\n", 52 | " print(ocv)\n", 53 | "except KeyError:\n", 54 | " print('Cannot find OpenCV')\n", 55 | "# This automatically locates the cascade files within OpenCV\n", 56 | "pri_cascade_file = os.path.join(ocv,'build\\etc\\haarcascades\\haarcascade_frontalface_default.xml')\n", 57 | "sec_cascade_file = os.path.join(ocv,'build\\etc\\haarcascades\\haarcascade_eye_tree_eyeglasses.xml')\n", 58 | "\n", 59 | "# Uncomment for Debug if needed\n", 60 | "#print(pri_cascade_file)\n", 61 | "#print(sec_cascade_file)" 62 | ] 63 | }, 64 | { 65 | "cell_type": "markdown", 66 | "metadata": {}, 67 | "source": [ 68 | "Setup the classifiers to use. We are still using the pre-trained classifiers provided as part of OpenCV." 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 2, 74 | "metadata": { 75 | "collapsed": true 76 | }, 77 | "outputs": [], 78 | "source": [ 79 | "face_cascade = cv2.CascadeClassifier(pri_cascade_file)\n", 80 | "eye_cascade = cv2.CascadeClassifier(sec_cascade_file)" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": {}, 86 | "source": [ 87 | "Now we grab the webcam and configure it" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": 3, 93 | "metadata": { 94 | "collapsed": false 95 | }, 96 | "outputs": [ 97 | { 98 | "name": "stdout", 99 | "output_type": "stream", 100 | "text": [ 101 | "Grabbing Camera ..\n" 102 | ] 103 | } 104 | ], 105 | "source": [ 106 | "# Initialize Default Camera\n", 107 | "webcam = cv2.VideoCapture(0)\n", 108 | "# Check if Camera initialized correctly\n", 109 | "success = webcam.isOpened()\n", 110 | "if success == True:\n", 111 | " print('Grabbing Camera ..')\n", 112 | " # Uncomment and adjust according to your webcam capabilities\n", 113 | " #webcam.set(cv2.CAP_PROP_FPS,30);\n", 114 | " #webcam.set(cv2.CAP_PROP_FRAME_WIDTH,1024);\n", 115 | " #webcam.set(cv2.CAP_PROP_FRAME_HEIGHT,768);\n", 116 | "elif success == False:\n", 117 | " print('Error: Camera could not be opened')" 118 | ] 119 | }, 120 | { 121 | "cell_type": "markdown", 122 | "metadata": {}, 123 | "source": [ 124 | "This section is a mashup of the video camera test sample_04 and the previous sample_10 for face and eye detection on a still image. Only difference is that it is done on each video frame within the while loop.\n", 125 | "\n", 126 | "Video is converted to grayscale and histogram equalization filter is applied to improve the contrast. This helps the Haar Cascade Classifiers. Everything else stays the same." 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": 4, 132 | "metadata": { 133 | "collapsed": false 134 | }, 135 | "outputs": [ 136 | { 137 | "name": "stdout", 138 | "output_type": "stream", 139 | "text": [ 140 | "Quitting ...\n" 141 | ] 142 | } 143 | ], 144 | "source": [ 145 | "while(True):\n", 146 | " # Read each frame in video stream\n", 147 | " ret, frame = webcam.read()\n", 148 | " # Perform operations on the frame here\n", 149 | " # First convert to Grayscale \n", 150 | " gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n", 151 | " # Next run filters\n", 152 | " gray = cv2.equalizeHist(gray)\n", 153 | " # Uncomment for Debug if needed\n", 154 | " #cv2.imshow('Grayscale', gray)\n", 155 | " # Face detection using Haar Cascades\n", 156 | " # Detects objects of different sizes in the input image which are returned as a list of rectangles.\n", 157 | " # cv2.CascadeClassifier.detectMultiScale(image[,scaleFactor[,minNeighbors[,flags[,minSize[,maxSize]]]]])\n", 158 | " faces = face_cascade.detectMultiScale(gray, 1.3, 5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)\n", 159 | " # Draw the rectangles around detected Regions of Interest [ROI] - faces\n", 160 | " # cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]])\n", 161 | " out = frame.copy()\n", 162 | " for (x,y,w,h) in faces:\n", 163 | " cv2.rectangle(out,(x,y),(x+w,y+h),(255,0,0),2)\n", 164 | " roi_gray = gray[y:y+h, x:x+w]\n", 165 | " roi_color = out[y:y+h, x:x+w]\n", 166 | " # Since eyes are a part of face, limit eye detection to face regions to improve accuracy\n", 167 | " eyes = eye_cascade.detectMultiScale(roi_gray)\n", 168 | " for (ex,ey,ew,eh) in eyes:\n", 169 | " # Draw the rectangles around detected Regions of Interest [ROI] - eyes\n", 170 | " cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n", 171 | "\n", 172 | " cv2.imshow('Facetracker', out)\n", 173 | " # Wait for Esc Key to quit\n", 174 | " if cv2.waitKey(5) == 27:\n", 175 | " print('Quitting ...')\n", 176 | " break\n", 177 | "# Release all resources used\n", 178 | "webcam.release()\n", 179 | "cv2.destroyAllWindows()" 180 | ] 181 | }, 182 | { 183 | "cell_type": "code", 184 | "execution_count": null, 185 | "metadata": { 186 | "collapsed": true 187 | }, 188 | "outputs": [], 189 | "source": [] 190 | } 191 | ], 192 | "metadata": { 193 | "kernelspec": { 194 | "display_name": "Python 2", 195 | "language": "python", 196 | "name": "python2" 197 | }, 198 | "language_info": { 199 | "codemirror_mode": { 200 | "name": "ipython", 201 | "version": 2 202 | }, 203 | "file_extension": ".py", 204 | "mimetype": "text/x-python", 205 | "name": "python", 206 | "nbconvert_exporter": "python", 207 | "pygments_lexer": "ipython2", 208 | "version": "2.7.13" 209 | } 210 | }, 211 | "nbformat": 4, 212 | "nbformat_minor": 2 213 | } 214 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_11_real-time_face_detection_and_tracking/README.md: -------------------------------------------------------------------------------- 1 | # OpenCV Tutorial Sample 11: ocv_face_vid 2 | [Sample 11](sample_11/ocv_face_vid.py) is a basic Face and Eye Detection program that uses OpenCV to analyze real-time video and detect human faces and eyes. The detected areas or Regions of Interest (ROI) are demarcated with rectangles. The program uses the OpenCV built-in pre-trained Haar feature-based cascade classifiers in order to perform this task. 3 | 4 | This sample uses the same basic procedures from the previous samples to detect faces and eyes in real-time video. The detection is performed on every video frame. 5 | 6 | The OS and SYS modules are also loaded in this sample in order to automatically locate the OpenCV libraries and use the Haar Cascade Classifier files. 7 | 8 | ## Usage: 9 | 10 | Launch the interactive tutorial by typing the following command in your comand window: 11 | 12 | ``` 13 | jupyter notebook ./ocv_face_vid.ipynb 14 | ``` 15 | OR 16 | 17 | You may run the script using the command: 18 | 19 | ``` 20 | python ./ocv_face_vid.py 21 | ``` 22 | 23 | ## Code Walkthrough: 24 | First the init code... 25 | In this version we automatically find the OpenCV installation using the environment variables. We also print out some debug messages. 26 | 27 | ``` 28 | #!/usr/bin/env python2 29 | 30 | # Python 2/3 compatibility 31 | from __future__ import print_function 32 | # Allows use of print like a function in Python 2.x 33 | 34 | # Import Python modules 35 | import numpy as np 36 | import cv2 37 | import sys 38 | import os 39 | print('Face and Eyes Tracker for Real-Time Video') 40 | print('Type Esc to Exit Program ...') 41 | try: 42 | # Checks to see if OpenCV can be found 43 | ocv = os.getenv("OPENCV_DIR") 44 | print(ocv) 45 | except KeyError: 46 | print('Cannot find OpenCV') 47 | # This automatically locates the cascade files within OpenCV 48 | pri_cascade_file = os.path.join(ocv,'build\etc\haarcascades\haarcascade_frontalface_default.xml') 49 | sec_cascade_file = os.path.join(ocv,'build\etc\haarcascades\haarcascade_eye_tree_eyeglasses.xml') 50 | 51 | # Uncomment for Debug if needed 52 | #print(pri_cascade_file) 53 | #print(sec_cascade_file) 54 | ``` 55 | **Console Output:** If all goes well ... 56 | ``` 57 | Face and Eyes Tracker for Real-Time Video 58 | Type Esc to Exit Program ... 59 | C:\opencv_pre\ 60 | ``` 61 | 62 | Setup the classifiers to use. We are still using the pre-trained classifiers provided as part of OpenCV. 63 | ``` 64 | face_cascade = cv2.CascadeClassifier(pri_cascade_file) 65 | eye_cascade = cv2.CascadeClassifier(sec_cascade_file) 66 | ``` 67 | Now we grab the webcam and configure it 68 | ``` 69 | webcam = cv2.VideoCapture(0) 70 | ``` 71 | Check if Camera initialized correctly 72 | ``` 73 | success = webcam.isOpened() 74 | if success == True: 75 | print('Grabbing Camera ..') 76 | # Uncomment and adjust according to your webcam capabilities 77 | #webcam.set(cv2.CAP_PROP_FPS,30); 78 | #webcam.set(cv2.CAP_PROP_FRAME_WIDTH,1024); 79 | #webcam.set(cv2.CAP_PROP_FRAME_HEIGHT,768); 80 | elif success == False: 81 | print('Error: Camera could not be opened') 82 | ``` 83 | **Console Output:** Assuming all goes well ... 84 | ``` 85 | Grabbing Camera .. 86 | ``` 87 | This section is a mashup of the video camera test sample_04 and the previous sample_10 for face and eye detection on a still image. Only difference is that it is done on each video frame within the while loop. 88 | 89 | Video is converted to grayscale and histogram equalization filter is applied to improve the contrast. This helps the Haar Cascade Classifiers. Everything else stays the same. 90 | 91 | ``` 92 | while(True): 93 | # Read each frame in video stream 94 | ret, frame = webcam.read() 95 | # Perform operations on the frame here 96 | # First convert to Grayscale 97 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 98 | # Next run filters 99 | gray = cv2.equalizeHist(gray) 100 | # Uncomment for Debug if needed 101 | #cv2.imshow('Grayscale', gray) 102 | # Face detection using Haar Cascades 103 | # Detects objects of different sizes in the input image which are returned as a list of rectangles. 104 | # cv2.CascadeClassifier.detectMultiScale(image[,scaleFactor[,minNeighbors[,flags[,minSize[,maxSize]]]]]) 105 | faces = face_cascade.detectMultiScale(gray, 1.3, 5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE) 106 | # Draw the rectangles around detected Regions of Interest [ROI] - faces 107 | # cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) 108 | out = frame.copy() 109 | for (x,y,w,h) in faces: 110 | cv2.rectangle(out,(x,y),(x+w,y+h),(255,0,0),2) 111 | roi_gray = gray[y:y+h, x:x+w] 112 | roi_color = out[y:y+h, x:x+w] 113 | # Since eyes are a part of face, limit eye detection to face regions to improve accuracy 114 | eyes = eye_cascade.detectMultiScale(roi_gray) 115 | for (ex,ey,ew,eh) in eyes: 116 | # Draw the rectangles around detected Regions of Interest [ROI] - eyes 117 | cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) 118 | 119 | cv2.imshow('Facetracker', out) 120 | # Wait for Esc Key to quit 121 | if cv2.waitKey(5) == 27: 122 | print('Quitting ...') 123 | break 124 | # Release all resources used 125 | webcam.release() 126 | cv2.destroyAllWindows() 127 | ``` 128 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_11_real-time_face_detection_and_tracking/ocv_face_vid.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# OpenCV Tutorial Sample 11: ocv_face_vid\n", 8 | "\n", 9 | "[Sample 11](sample_11/ocv_face_vid.py) is a basic Face and Eye Detection program that uses OpenCV to analyze real-time video and detect human faces and eyes. The detected areas or Regions of Interest (ROI) are demarcated with rectangles. The program uses the OpenCV built-in pre-trained Haar feature-based cascade classifiers in order to perform this task.\n", 10 | "\n", 11 | "This sample uses the same basic procedures from the previous samples to detect faces and eyes in real-time video. The detection is performed on every video frame.\n", 12 | "\n", 13 | "The OS and SYS modules are also loaded in this sample in order to automatically locate the OpenCV libraries and use the Haar Cascade Classifier files.\n", 14 | "\n", 15 | "Here is the initialization code." 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 1, 21 | "metadata": { 22 | "collapsed": false 23 | }, 24 | "outputs": [ 25 | { 26 | "name": "stdout", 27 | "output_type": "stream", 28 | "text": [ 29 | "Face and Eyes Tracker for Real-Time Video\n", 30 | "Type Esc to Exit Program ...\n", 31 | "C:\\opencv_pre\\\n" 32 | ] 33 | } 34 | ], 35 | "source": [ 36 | "#!/usr/bin/env python2\n", 37 | "\n", 38 | "# Python 2/3 compatibility\n", 39 | "from __future__ import print_function\n", 40 | "# Allows use of print like a function in Python 2.x\n", 41 | "\n", 42 | "# Import Python modules\n", 43 | "import numpy as np\n", 44 | "import cv2\n", 45 | "import sys\n", 46 | "import os\n", 47 | "print('Face and Eyes Tracker for Real-Time Video')\n", 48 | "print('Type Esc to Exit Program ...')\n", 49 | "try:\n", 50 | " # Checks to see if OpenCV can be found\n", 51 | " ocv = os.getenv(\"OPENCV_DIR\")\n", 52 | " print(ocv)\n", 53 | "except KeyError:\n", 54 | " print('Cannot find OpenCV')\n", 55 | "# This automatically locates the cascade files within OpenCV\n", 56 | "pri_cascade_file = os.path.join(ocv,'build\\etc\\haarcascades\\haarcascade_frontalface_default.xml')\n", 57 | "sec_cascade_file = os.path.join(ocv,'build\\etc\\haarcascades\\haarcascade_eye_tree_eyeglasses.xml')\n", 58 | "\n", 59 | "# Uncomment for Debug if needed\n", 60 | "#print(pri_cascade_file)\n", 61 | "#print(sec_cascade_file)" 62 | ] 63 | }, 64 | { 65 | "cell_type": "markdown", 66 | "metadata": {}, 67 | "source": [ 68 | "Setup the classifiers to use. We are still using the pre-trained classifiers provided as part of OpenCV." 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 2, 74 | "metadata": { 75 | "collapsed": true 76 | }, 77 | "outputs": [], 78 | "source": [ 79 | "face_cascade = cv2.CascadeClassifier(pri_cascade_file)\n", 80 | "eye_cascade = cv2.CascadeClassifier(sec_cascade_file)" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": {}, 86 | "source": [ 87 | "Now we grab the webcam and configure it" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": 3, 93 | "metadata": { 94 | "collapsed": false 95 | }, 96 | "outputs": [ 97 | { 98 | "name": "stdout", 99 | "output_type": "stream", 100 | "text": [ 101 | "Grabbing Camera ..\n" 102 | ] 103 | } 104 | ], 105 | "source": [ 106 | "# Initialize Default Camera\n", 107 | "webcam = cv2.VideoCapture(0)\n", 108 | "# Check if Camera initialized correctly\n", 109 | "success = webcam.isOpened()\n", 110 | "if success == True:\n", 111 | " print('Grabbing Camera ..')\n", 112 | " # Uncomment and adjust according to your webcam capabilities\n", 113 | " #webcam.set(cv2.CAP_PROP_FPS,30);\n", 114 | " #webcam.set(cv2.CAP_PROP_FRAME_WIDTH,1024);\n", 115 | " #webcam.set(cv2.CAP_PROP_FRAME_HEIGHT,768);\n", 116 | "elif success == False:\n", 117 | " print('Error: Camera could not be opened')" 118 | ] 119 | }, 120 | { 121 | "cell_type": "markdown", 122 | "metadata": {}, 123 | "source": [ 124 | "This section is a mashup of the video camera test sample_04 and the previous sample_10 for face and eye detection on a still image. Only difference is that it is done on each video frame within the while loop.\n", 125 | "\n", 126 | "Video is converted to grayscale and histogram equalization filter is applied to improve the contrast. This helps the Haar Cascade Classifiers. Everything else stays the same." 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": 4, 132 | "metadata": { 133 | "collapsed": false 134 | }, 135 | "outputs": [ 136 | { 137 | "name": "stdout", 138 | "output_type": "stream", 139 | "text": [ 140 | "Quitting ...\n" 141 | ] 142 | } 143 | ], 144 | "source": [ 145 | "while(True):\n", 146 | " # Read each frame in video stream\n", 147 | " ret, frame = webcam.read()\n", 148 | " # Perform operations on the frame here\n", 149 | " # First convert to Grayscale \n", 150 | " gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n", 151 | " # Next run filters\n", 152 | " gray = cv2.equalizeHist(gray)\n", 153 | " # Uncomment for Debug if needed\n", 154 | " #cv2.imshow('Grayscale', gray)\n", 155 | " # Face detection using Haar Cascades\n", 156 | " # Detects objects of different sizes in the input image which are returned as a list of rectangles.\n", 157 | " # cv2.CascadeClassifier.detectMultiScale(image[,scaleFactor[,minNeighbors[,flags[,minSize[,maxSize]]]]])\n", 158 | " faces = face_cascade.detectMultiScale(gray, 1.3, 5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)\n", 159 | " # Draw the rectangles around detected Regions of Interest [ROI] - faces\n", 160 | " # cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]])\n", 161 | " out = frame.copy()\n", 162 | " for (x,y,w,h) in faces:\n", 163 | " cv2.rectangle(out,(x,y),(x+w,y+h),(255,0,0),2)\n", 164 | " roi_gray = gray[y:y+h, x:x+w]\n", 165 | " roi_color = out[y:y+h, x:x+w]\n", 166 | " # Since eyes are a part of face, limit eye detection to face regions to improve accuracy\n", 167 | " eyes = eye_cascade.detectMultiScale(roi_gray)\n", 168 | " for (ex,ey,ew,eh) in eyes:\n", 169 | " # Draw the rectangles around detected Regions of Interest [ROI] - eyes\n", 170 | " cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n", 171 | "\n", 172 | " cv2.imshow('Facetracker', out)\n", 173 | " # Wait for Esc Key to quit\n", 174 | " if cv2.waitKey(5) == 27:\n", 175 | " print('Quitting ...')\n", 176 | " break\n", 177 | "# Release all resources used\n", 178 | "webcam.release()\n", 179 | "cv2.destroyAllWindows()" 180 | ] 181 | }, 182 | { 183 | "cell_type": "code", 184 | "execution_count": null, 185 | "metadata": { 186 | "collapsed": true 187 | }, 188 | "outputs": [], 189 | "source": [] 190 | } 191 | ], 192 | "metadata": { 193 | "kernelspec": { 194 | "display_name": "Python 2", 195 | "language": "python", 196 | "name": "python2" 197 | }, 198 | "language_info": { 199 | "codemirror_mode": { 200 | "name": "ipython", 201 | "version": 2 202 | }, 203 | "file_extension": ".py", 204 | "mimetype": "text/x-python", 205 | "name": "python", 206 | "nbconvert_exporter": "python", 207 | "pygments_lexer": "ipython2", 208 | "version": "2.7.13" 209 | } 210 | }, 211 | "nbformat": 4, 212 | "nbformat_minor": 2 213 | } 214 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_11_real-time_face_detection_and_tracking/ocv_face_vid.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Python 2/3 compatibility 4 | from __future__ import print_function 5 | # Allows use of print like a function in Python 2.x 6 | 7 | # Import Python modules 8 | import numpy as np 9 | import cv2 10 | import sys 11 | import os 12 | print('Face and Eyes Tracker for Real-Time Video') 13 | print('Type Esc to Exit Program ...') 14 | try: 15 | # Checks to see if OpenCV can be found 16 | ocv = os.getenv("OPENCV_DIR") 17 | print(ocv) 18 | except KeyError: 19 | print('Cannot find OpenCV') 20 | # This automatically locates the cascade files within OpenCV 21 | pri_cascade_file = os.path.join(ocv,'build\etc\haarcascades\haarcascade_frontalface_default.xml') 22 | sec_cascade_file = os.path.join(ocv,'build\etc\haarcascades\haarcascade_eye_tree_eyeglasses.xml') 23 | 24 | # Uncomment for Debug if needed 25 | #print(pri_cascade_file) 26 | #print(sec_cascade_file) 27 | 28 | # Setup Classifiers 29 | face_cascade = cv2.CascadeClassifier(pri_cascade_file) 30 | eye_cascade = cv2.CascadeClassifier(sec_cascade_file) 31 | 32 | try: 33 | # Initialize Default Camera 34 | webcam = cv2.VideoCapture(0) 35 | # Check if Camera initialized correctly 36 | success = webcam.isOpened() 37 | if success == True: 38 | print('Grabbing Camera ..') 39 | # Uncomment and adjust according to your webcam capabilities 40 | #webcam.set(cv2.CAP_PROP_FPS,30); 41 | #webcam.set(cv2.CAP_PROP_FRAME_WIDTH,1024); 42 | #webcam.set(cv2.CAP_PROP_FRAME_HEIGHT,768); 43 | elif success == False: 44 | print('Error: Camera could not be opened') 45 | 46 | while(True): 47 | # Read each frame in video stream 48 | ret, frame = webcam.read() 49 | # Perform operations on the frame here 50 | # First convert to Grayscale 51 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 52 | # Next run filters 53 | gray = cv2.equalizeHist(gray) 54 | # Uncomment for Debug if needed 55 | #cv2.imshow('Grayscale', gray) 56 | # Face detection using Haar Cascades 57 | # Detects objects of different sizes in the input image which are returned as a list of rectangles. 58 | # cv2.CascadeClassifier.detectMultiScale(image[,scaleFactor[,minNeighbors[,flags[,minSize[,maxSize]]]]]) 59 | faces = face_cascade.detectMultiScale(gray, 1.3, 5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE) 60 | # Draw the rectangles around detected Regions of Interest [ROI] - faces 61 | # cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) 62 | out = frame.copy() 63 | for (x,y,w,h) in faces: 64 | cv2.rectangle(out,(x,y),(x+w,y+h),(255,0,0),2) 65 | roi_gray = gray[y:y+h, x:x+w] 66 | roi_color = out[y:y+h, x:x+w] 67 | # Since eyes are a part of face, limit eye detection to face regions to improve accuracy 68 | eyes = eye_cascade.detectMultiScale(roi_gray) 69 | for (ex,ey,ew,eh) in eyes: 70 | # Draw the rectangles around detected Regions of Interest [ROI] - faces 71 | cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) 72 | 73 | cv2.imshow('Facetracker', out) 74 | # Wait for Esc Key to quit 75 | if cv2.waitKey(5) == 27: 76 | break 77 | # Release all resources used 78 | webcam.release() 79 | cv2.destroyAllWindows() 80 | 81 | except cv2.error as e: 82 | print('Please correct OpenCV Error') -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_12_real-time_people_counter/README.md: -------------------------------------------------------------------------------- 1 | # OpenCV Tutorial Sample 12: ocv_face_cnt_vid 2 | [Sample 12](sample_12/ocv_face_cnt_vid.py) is a basic People Counter using the previous Face and Eye Detection program that uses OpenCV to analyze real-time video and detect human faces and eyes. In addition to detecting Faces and Eyes, the program also returns the number of faces detected to the console. 3 | 4 | ## Usage: 5 | 6 | Launch the interactive tutorial by typing the following command in your comand window: 7 | 8 | ``` 9 | jupyter notebook ./ocv_face_cnt_vid.ipynb 10 | ``` 11 | OR 12 | 13 | You may run the script using the command: 14 | 15 | ``` 16 | python ./ocv_face_cnt_vid.py 17 | ``` 18 | 19 | ## Code Walkthrough: 20 | This program counts the number of faces seen in a frame and sends the output to console. It does not perform a cumulative count. This because the detection is done on every frame of the video and unless faces were recognized and ignored in the following frame, each face would be counted multiple times per frame and produce erroneous results. 21 | 22 | As such a people counter that counts faces needs to also have face recognition capabilities to be robust and perform a cumulative count. 23 | In the context of digital signage, this example can be used to detect whether a sign is being seen by a single individual on more than one individual. 24 | 25 | This sample is identical to the previous sample with the following exception: It uses the **Numpy len()** function to count the number of elements in the array of rectangles for the faces detected after the cascade classifier is run. 26 | 27 | **print('Number of faces detected: ' + str(len(faces)))** 28 | 29 | ``` 30 | #!/usr/bin/env python 31 | 32 | # Python 2/3 compatibility 33 | from __future__ import print_function 34 | # Allows use of print like a function in Python 2.x 35 | 36 | # Import Python modules 37 | import numpy as np 38 | import cv2 39 | import sys 40 | import os 41 | print('Face and Eyes Tracker for Real-Time Video') 42 | print('Type Esc to Exit Program ...') 43 | try: 44 | # Checks to see if OpenCV can be found 45 | ocv = os.getenv("OPENCV_DIR") 46 | print(ocv) 47 | except KeyError: 48 | print('Cannot find OpenCV') 49 | # This automatically locates the cascade files within OpenCV 50 | pri_cascade_file = os.path.join(ocv,'build\etc\haarcascades\haarcascade_frontalface_default.xml') 51 | sec_cascade_file = os.path.join(ocv,'build\etc\haarcascades\haarcascade_eye_tree_eyeglasses.xml') 52 | 53 | # Uncomment for Debug if needed 54 | #print(pri_cascade_file) 55 | #print(sec_cascade_file) 56 | 57 | # Setup Classifiers 58 | face_cascade = cv2.CascadeClassifier(pri_cascade_file) 59 | eye_cascade = cv2.CascadeClassifier(sec_cascade_file) 60 | 61 | try: 62 | # Initialize Default Camera 63 | webcam = cv2.VideoCapture(0) 64 | # Check if Camera initialized correctly 65 | success = webcam.isOpened() 66 | if success == True: 67 | print('Grabbing Camera ..') 68 | # Uncomment and adjust according to your webcam capabilities 69 | #webcam.set(cv2.CAP_PROP_FPS,30); 70 | #webcam.set(cv2.CAP_PROP_FRAME_WIDTH,1024); 71 | #webcam.set(cv2.CAP_PROP_FRAME_HEIGHT,768); 72 | elif success == False: 73 | print('Error: Camera could not be opened') 74 | 75 | while(True): 76 | # Read each frame in video stream 77 | ret, frame = webcam.read() 78 | # Perform operations on the frame here 79 | # First convert to Grayscale 80 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 81 | # Next run filters 82 | gray = cv2.equalizeHist(gray) 83 | # Uncomment for Debug if needed 84 | #cv2.imshow('Grayscale', gray) 85 | # Face detection using Haar Cascades 86 | # Detects objects of different sizes in the input image which are returned as a list of rectangles. 87 | # cv2.CascadeClassifier.detectMultiScale(image[,scaleFactor[,minNeighbors[,flags[,minSize[,maxSize]]]]]) 88 | faces = face_cascade.detectMultiScale(gray, 1.3, 5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE) 89 | # Print to console the number of faces detected 90 | print('Number of faces detected: ' + str(len(faces))) 91 | # Draw the rectangles around detected Regions of Interest [ROI] - faces 92 | # cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) 93 | out = frame.copy() 94 | for (x,y,w,h) in faces: 95 | cv2.rectangle(out,(x,y),(x+w,y+h),(255,0,0),2) 96 | roi_gray = gray[y:y+h, x:x+w] 97 | roi_color = out[y:y+h, x:x+w] 98 | # Since eyes are a part of face, limit eye detection to face regions to improve accuracy 99 | eyes = eye_cascade.detectMultiScale(roi_gray) 100 | for (ex,ey,ew,eh) in eyes: 101 | # Draw the rectangles around detected Regions of Interest [ROI] - faces 102 | cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) 103 | 104 | cv2.imshow('Facetracker', out) 105 | # Wait for Esc Key to quit 106 | if cv2.waitKey(5) == 27: 107 | break 108 | # Release all resources used 109 | webcam.release() 110 | cv2.destroyAllWindows() 111 | 112 | except cv2.error as e: 113 | print('Please correct OpenCV Error') 114 | ``` 115 | 116 | **Console Output:** 117 | ``` 118 | Face and Eyes Tracker for Real-Time Video 119 | Type Esc to Exit Program ... 120 | C:\opencv_pre\ 121 | Grabbing Camera .. 122 | Number of faces detected: 1 123 | Number of faces detected: 1 124 | Number of faces detected: 1 125 | Number of faces detected: 1 126 | Number of faces detected: 1 127 | Number of faces detected: 0 128 | Number of faces detected: 0 129 | Number of faces detected: 0 130 | Number of faces detected: 0 131 | Number of faces detected: 1 132 | Number of faces detected: 1 133 | Number of faces detected: 1 134 | Number of faces detected: 2 135 | Number of faces detected: 2 136 | Number of faces detected: 2 137 | Number of faces detected: 1 138 | Number of faces detected: 1 139 | Number of faces detected: 1 140 | Number of faces detected: 1 141 | Number of faces detected: 1 142 | Number of faces detected: 1 143 | Number of faces detected: 1 144 | Number of faces detected: 1 145 | Number of faces detected: 1 146 | ``` 147 | -------------------------------------------------------------------------------- /tutorials/opencv/Python/sample_12_real-time_people_counter/ocv_face_cnt_vid.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Python 2/3 compatibility 4 | from __future__ import print_function 5 | # Allows use of print like a function in Python 2.x 6 | 7 | # Import Python modules 8 | import numpy as np 9 | import cv2 10 | import sys 11 | import os 12 | print('Face and Eyes Tracker for Real-Time Video') 13 | print('Type Esc to Exit Program ...') 14 | try: 15 | # Checks to see if OpenCV can be found 16 | ocv = os.getenv("OPENCV_DIR") 17 | print(ocv) 18 | except KeyError: 19 | print('Cannot find OpenCV') 20 | # This automatically locates the cascade files within OpenCV 21 | pri_cascade_file = os.path.join(ocv,'build\etc\haarcascades\haarcascade_frontalface_default.xml') 22 | sec_cascade_file = os.path.join(ocv,'build\etc\haarcascades\haarcascade_eye_tree_eyeglasses.xml') 23 | 24 | # Uncomment for Debug if needed 25 | #print(pri_cascade_file) 26 | #print(sec_cascade_file) 27 | 28 | # Setup Classifiers 29 | face_cascade = cv2.CascadeClassifier(pri_cascade_file) 30 | eye_cascade = cv2.CascadeClassifier(sec_cascade_file) 31 | 32 | try: 33 | # Initialize Default Camera 34 | webcam = cv2.VideoCapture(0) 35 | # Check if Camera initialized correctly 36 | success = webcam.isOpened() 37 | if success == True: 38 | print('Grabbing Camera ..') 39 | # Uncomment and adjust according to your webcam capabilities 40 | #webcam.set(cv2.CAP_PROP_FPS,30); 41 | #webcam.set(cv2.CAP_PROP_FRAME_WIDTH,1024); 42 | #webcam.set(cv2.CAP_PROP_FRAME_HEIGHT,768); 43 | elif success == False: 44 | print('Error: Camera could not be opened') 45 | 46 | while(True): 47 | # Read each frame in video stream 48 | ret, frame = webcam.read() 49 | # Perform operations on the frame here 50 | # First convert to Grayscale 51 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 52 | # Next run filters 53 | gray = cv2.equalizeHist(gray) 54 | # Uncomment for Debug if needed 55 | #cv2.imshow('Grayscale', gray) 56 | # Face detection using Haar Cascades 57 | # Detects objects of different sizes in the input image which are returned as a list of rectangles. 58 | # cv2.CascadeClassifier.detectMultiScale(image[,scaleFactor[,minNeighbors[,flags[,minSize[,maxSize]]]]]) 59 | faces = face_cascade.detectMultiScale(gray, 1.3, 5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE) 60 | # Print to console the number of faces detected 61 | print('Number of faces detected: ' + str(len(faces))) 62 | # Draw the rectangles around detected Regions of Interest [ROI] - faces 63 | # cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) 64 | out = frame.copy() 65 | for (x,y,w,h) in faces: 66 | cv2.rectangle(out,(x,y),(x+w,y+h),(255,0,0),2) 67 | roi_gray = gray[y:y+h, x:x+w] 68 | roi_color = out[y:y+h, x:x+w] 69 | # Since eyes are a part of face, limit eye detection to face regions to improve accuracy 70 | eyes = eye_cascade.detectMultiScale(roi_gray) 71 | for (ex,ey,ew,eh) in eyes: 72 | # Draw the rectangles around detected Regions of Interest [ROI] - faces 73 | cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) 74 | 75 | cv2.imshow('Facetracker', out) 76 | # Wait for Esc Key to quit 77 | if cv2.waitKey(5) == 27: 78 | break 79 | # Release all resources used 80 | webcam.release() 81 | cv2.destroyAllWindows() 82 | 83 | except cv2.error as e: 84 | print('Please correct OpenCV Error') --------------------------------------------------------------------------------