├── docs └── images │ ├── color.jpg │ ├── crack.jpg │ ├── grafana1.png │ ├── grafana2.png │ ├── grafana3.png │ ├── grafana4.png │ ├── jupyter.png │ ├── jupyter_code.png │ ├── orientation.jpg │ └── architectural_diagram.png ├── resources └── config.json ├── LICENSE ├── setup.sh ├── README.md ├── Jupyter ├── README.md ├── object_flaw_detector.py └── object_flaw_detector.ipynb ├── flaw_detector.json └── application └── object_flaw_detector.py /docs/images/color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/object-flaw-detector-python/HEAD/docs/images/color.jpg -------------------------------------------------------------------------------- /docs/images/crack.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/object-flaw-detector-python/HEAD/docs/images/crack.jpg -------------------------------------------------------------------------------- /docs/images/grafana1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/object-flaw-detector-python/HEAD/docs/images/grafana1.png -------------------------------------------------------------------------------- /docs/images/grafana2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/object-flaw-detector-python/HEAD/docs/images/grafana2.png -------------------------------------------------------------------------------- /docs/images/grafana3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/object-flaw-detector-python/HEAD/docs/images/grafana3.png -------------------------------------------------------------------------------- /docs/images/grafana4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/object-flaw-detector-python/HEAD/docs/images/grafana4.png -------------------------------------------------------------------------------- /docs/images/jupyter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/object-flaw-detector-python/HEAD/docs/images/jupyter.png -------------------------------------------------------------------------------- /docs/images/jupyter_code.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/object-flaw-detector-python/HEAD/docs/images/jupyter_code.png -------------------------------------------------------------------------------- /docs/images/orientation.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/object-flaw-detector-python/HEAD/docs/images/orientation.jpg -------------------------------------------------------------------------------- /resources/config.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | "inputs": [ 4 | { 5 | "video": "../resources/bolt-detection.mp4" 6 | } 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /docs/images/architectural_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel-iot-devkit/object-flaw-detector-python/HEAD/docs/images/architectural_diagram.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2021, Intel Corporation 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright (c) 2018 Intel Corporation. 4 | # Permission is hereby granted, free of charge, to any person obtaining 5 | # a copy of this software and associated documentation files (the 6 | # "Software"), to deal in the Software without restriction, including 7 | # without limitation the rights to use, copy, modify, merge, publish, 8 | # distribute, sublicense, and/or sell copies of the Software, and to 9 | # permit persons to whom the Software is furnished to do so, subject to 10 | # the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be 13 | # included in all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 | # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 17 | # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 | # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 19 | # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 20 | # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 21 | # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 | 23 | #Install the dependencies 24 | sudo apt-get update 25 | sudo apt install curl 26 | sudo curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add - 27 | source /etc/lsb-release 28 | echo "deb https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list 29 | sudo apt-get install influxdb 30 | sudo service influxdb start 31 | wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.3.2_amd64.deb 32 | sudo apt-get install -y adduser libfontconfig 33 | sudo dpkg -i grafana_5.3.2_amd64.deb 34 | sudo /bin/systemctl start grafana-server 35 | sudo apt-get install python3-pip 36 | sudo pip3 install influxdb numpy jupyter 37 | 38 | #Download the video 39 | cd resources 40 | wget -O bolt-detection.mp4 https://raw.githubusercontent.com/intel-iot-devkit/sample-videos/master/bolt-detection.mp4 41 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DISCONTINUATION OF PROJECT # 2 | This project will no longer be maintained by Intel. 3 | Intel has ceased development and contributions including, but not limited to, maintenance, bug fixes, new releases, or updates, to this project. 4 | Intel no longer accepts patches to this project. 5 | 6 | # Object Flaw Detector 7 | 8 | | Details | | 9 | | --------------------- | ----------------- | 10 | | Target OS: | Ubuntu* 18.04 LTS | 11 | | Programming Language: | Python* 3.6 | 12 | | Time to complete: | 30 min | 13 | 14 | This reference implementation is also [available in C++](https://github.com/intel-iot-devkit/reference-implementation-private/blob/object-flaw-measurement/object-flaw-detector-measurement/README.MD). 15 | 16 | ## What it does 17 | 18 | The object flaw detector application detects the anomalies such as color, crack, and orientation of the object moving on a conveyor belt. Anomalies are marked as defective and saved in the color, crack, orientation folders respectively. Also objects with no defects are saved in no_defect folder. 19 | These anomalies data is sent to InfluxDB* database and is visualized on Grafana*. 20 | This application also measures length and width of the object in millimeters. 21 | 22 | ## Requirements 23 | 24 | - Ubuntu 18.04 25 | - Intel® Distribution of OpenVINO™ toolkit 2020 R3 Release 26 | - Grafana* v5.3.2 27 | - InfluxDB* v1.6.2 28 | 29 | ## Setup 30 | 31 | ### Install Intel® Distribution of OpenVINO™ toolkit 32 | 33 | Refer to [ Install the Intel® Distribution of OpenVINO™ toolkit for Linux*](https://software.intel.com/en-us/articles/OpenVINO-Install-Linux) for more information on how to install and set up the Intel® Distribution of OpenVINO™ toolkit 34 | 35 | ## How It works 36 | 37 | This application takes the input from a video camera or a video file for processing. 38 | 39 | ![Data Flow Diagram](./docs/images/architectural_diagram.png) 40 | 41 | **Orientation defect detection:** Get the frame and change the color space to HSV format. Threshold the image based on the color of the object using [inRange](https://docs.opencv.org/master/df/d9d/tutorial_py_colorspaces.html) function to create a mask. Perform morphological opening and closing on the mask and find the contours using [findContours](https://docs.opencv.org/master/d4/d73/tutorial_py_contours_begin.html) function. Filter the contours based on the area. Perform [PCA](https://docs.opencv.org/master/d1/dee/tutorial_introduction_to_pca.html) (Principal Component Analysis) on the contours to get the orientation of the object. 42 | 43 | ![orientation](./docs/images/orientation.jpg) 44 | 45 | **Color defect detection:** Threshold the image based on the defective color of the object using [inRange](https://docs.opencv.org/master/df/d9d/tutorial_py_colorspaces.html) function. Use the mask obtained from the [inRange](https://docs.opencv.org/master/df/d9d/tutorial_py_colorspaces.html) function to find the defective area. 46 | 47 | ![color](./docs/images/color.jpg) 48 | 49 | **Crack detection:** Transform the image from BGR to Grayscale format using [cvtColor](https://docs.opencv.org/master/df/d9d/tutorial_py_colorspaces.html) function. Blur the image using [blur](https://docs.opencv.org/master/d4/d13/tutorial_py_filtering.html) function to remove the noises. Use the contours found on the blurred image to detect the cracks. 50 | 51 | ![crack](./docs/images/crack.jpg) 52 | 53 | Save the images of defective objects in their respective folders. For example, objects with color defect are saved in **color** folder, objects with cracks are saved in **crack** folder, objects with orientation defect are saved in **orientation** folder and objects with no defect are stored in **no_defect** folder. 54 | 55 | ## Setup 56 | 57 | ### Get the code 58 | 59 | Steps to clone the reference implementation: (object-flaw-detector-python) 60 | 61 | sudo apt-get update && sudo apt-get install git 62 | git clone https://github.com/intel-iot-devkit/object-flaw-detector-python.git 63 | 64 | ### Install Intel® Distribution of OpenVINO™ toolkit 65 | Before running the application, install the Intel® Distribution of OpenVINO™ toolkit. For details, see [Installing the Intel® Distribution of OpenVINO™ toolkit for Linux*](https://software.intel.com/en-us/openvino-toolkit/choose-download/free-download-linux) 66 | 67 | ### Other dependencies 68 | #### InfluxDB* 69 | InfluxDB is a time series database designed to handle high write and query loads. It is an integral component of the TICK stack. InfluxDB is meant to be used as a backing store for any use case involving large amounts of timestamped data, including DevOps monitoring, application metrics, IoT sensor data, and real-time analytics. 70 | 71 | #### Grafana* 72 | Grafana is an open-source, general purpose dashboard and graph composer, which runs as a web application. It supports Graphite, InfluxDB, Prometheus, Google Stackdriver, AWS CloudWatch, Azure Monitor, Loki, MySQL, PostgreSQL, Microsoft SQL Server, Testdata, Mixed, OpenTSDB and Elasticsearch as backends. Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored. 73 | 74 | To install the dependencies of the RI, run the below command: 75 | ``` 76 | cd 77 | ./setup.sh 78 | ``` 79 | ### The Config File 80 | 81 | The _resources/config.json_ contains the path to the videos that will be used by the application. 82 | The _config.json_ file is of the form name/value pair, `video: ` 83 | 84 | Example of the _config.json_ file: 85 | 86 | ``` 87 | { 88 | 89 | "inputs": [ 90 | { 91 | "video": "videos/video1.mp4" 92 | } 93 | ] 94 | } 95 | ``` 96 | 97 | ### Which Input video to use 98 | 99 | The application works with any input video. Find sample videos for object detection [here](https://github.com/intel-iot-devkit/sample-videos/). 100 | 101 | For first-use, we recommend using the [bolt-detection](https://github.com/intel-iot-devkit/sample-videos/blob/master/bolt-detection.mp4) video.The video is automatically downloaded to the `resources/` folder. 102 | For example:
103 | The config.json would be: 104 | 105 | ``` 106 | { 107 | 108 | "inputs": [ 109 | { 110 | "video": "sample-videos/bolt-detection.mp4" 111 | } 112 | ] 113 | } 114 | ``` 115 | To use any other video, specify the path in config.json file 116 | 117 | ### Using the Camera instead of video 118 | 119 | Replace the path/to/video in the _resources/config.json_ file with the camera ID, where the ID is taken from the video device (the number X in /dev/videoX). 120 | 121 | On Ubuntu, list all available video devices with the following command: 122 | 123 | ``` 124 | ls /dev/video* 125 | ``` 126 | 127 | For example, if the output of above command is /dev/video0, then config.json would be:: 128 | 129 | ``` 130 | { 131 | 132 | "inputs": [ 133 | { 134 | "video": "0" 135 | } 136 | ] 137 | } 138 | ``` 139 | 140 | ### Setup the Environment 141 | 142 | Configure the environment to use the Intel® Distribution of OpenVINO™ toolkit once per session by running the **source** command on the command line: 143 | ``` 144 | source /opt/intel/openvino/bin/setupvars.sh 145 | ``` 146 | __Note__: This command needs to be executed only once in the terminal where the application will be executed. If the terminal is closed, the command needs to be executed again. 147 | 148 | ## Run the Application 149 | 150 | - Change the current directory to the git-cloned application code location on your system: 151 | 152 | ``` 153 | cd /application 154 | ``` 155 | 156 | - To see a list of the help options: 157 | 158 | ``` 159 | python3 object_flaw_detector.py --help 160 | ``` 161 | 162 | - Input source can be a video file or a camera. 163 | 164 | - To save defective images in a specific directory 165 | 166 | ``` 167 | python3 object_flaw_detector.py -dir 168 | ``` 169 | 170 | - To save defective images in current working directory 171 | 172 | ``` 173 | python3 object_flaw_detector.py 174 | ``` 175 | 176 | **Optional:** If field of view and distance between the object and camera are available use ```-fv``` and ```-dis``` command line arguments respectively. Otherwise camera of 96 pixels per inch is considered by default. For example: 177 | 178 | python3 object_flaw_detector.py -f 60 -d 50 179 | 180 | **Note:** User can get field of view from camera specifications. The values for ```-f``` and ```-d``` should be in __degrees__ and __millimeters__ respectively. 181 | 182 | - To check the data on InfluxDB, run the following commands: 183 | 184 | ``` 185 | influx 186 | show databases 187 | use obj_flaw_database 188 | select * from obj_flaw_detector 189 | ``` 190 | 191 | ### Visualize on Grafana 192 | 193 | - If you wish to import settings to visualise the data on Grafana, follow steps below. 194 | 195 | 1. On the terminal, run the following command: 196 | 197 | ``` 198 | sudo service grafana-server start 199 | ``` 200 | 201 | 2. In your browser, go to localhost:3000. 202 | 203 | 3. Log in with user as **admin** and password as **admin**. 204 | 205 | 4. Click on **Configuration**. 206 | 207 | 5. Select **“Data Sources”**. 208 | 209 | 6. Click on **“+ Add data source”** and provide inputs below. 210 | 211 | - *Name*: Obj_flaw_detector 212 | - *Type*: InfluxDB 213 | - *URL*: http://localhost:8086 214 | - *Database*: obj_flaw_database 215 | - Click on “Save and Test” 216 | 217 | 7. Click on **+** icon present on the left side of the browser, select **import**. 218 | 219 | 8. Click on **Upload.json File**. 220 | 221 | 9. Select the file name "flaw_detector.json" from object-flaw-detector-python directory. 222 | 223 | 10. Click on import. 224 | 225 | 11. Run the python code again on the terminal to visualize data on grafana. 226 | 227 | - If you wish to start from scratch to visualize data on Grafana, follow the steps below. 228 | 229 | 1. On the terminal, run the following command. 230 | 231 | ``` 232 | sudo service grafana-server start 233 | ``` 234 | 235 | 2. Open the browser, go to **localhost:3000**. 236 | 237 | 3. Log in with user as **admin** and password as **admin**. 238 | 239 | 4. Click on the **Configuration** icon and Select **“Data Sources”**. 240 | 241 | 5. Click on **“+ Add data source”** and provide inputs below. 242 | 243 | - *Name*: Obj_flaw_detector 244 | - *Type*: InfluxDB 245 | - *URL*: http://localhost:8086 246 | - *Database*: obj_flaw_database 247 | - Click on “Save and Test” 248 | 249 | ![Grafana1](./docs/images/grafana1.png) 250 | 251 | 6. To create a new Dashboard 252 | 253 | - Select **+** icon from the side menu bar which is under grafana icon and select **Dashboard**. 254 | - Select **Graph**, click on the **Panel Title** and select **Edit**. 255 | - On the **Metrics** tab 256 | 1. From **Datasource** choose **obj_flaw_detector**. 257 | 2. Click on the row just below the tab, starting with **“A”**. 258 | 3. Click on **select measurement** and select **obj_flaw_detector**. 259 | 4. From **SELECT** row, click on **fields** and select **Color**. Also click on **+** from the same row, select **aggregations** and click on **distinct()**. From **GROUP BY** row, click on **time** and select **1s**. Name the query as **color** in the **ALIAS BY** row. 260 | 5. Similarly from **Metrics** tab configure for **Crack**, **Orientation**, **No defect** and **Object Number** by clicking **Add Query**. 261 | - On the **Time range** tab, change the **override relative time** to **100s**. 262 | - Save the dashboard with name **flaw_detector**. 263 | 264 | ![Grafana2](./docs/images/grafana2.png) 265 | 266 | 7. Click on the **add panel** icon on the top menu. 267 | 268 | - Select **Table** , Click on the **Panel Title** and select **Edit** and follow the steps mentioned in the previous step for configuring **Metric** and **Time range** tab. 269 | - From the **Column Styles** tab, click on **+Add** and in the **Apply to columns named** give the name **color**, and also value **0** in the **Decimals**. 270 | - Similarly from **Column Styles** tab configure for **Crack**, **Orientation**, **No defect** and **Object Number** by clicking **+Add**. 271 | - Save the dashboard and click on **Back to dashboard** icon which is on right corner of the top menu. 272 | 273 | 8. Click on the **add panel** icon on the top menu. 274 | 275 | - Select **Singlestat**, Click on the **Panel Title** and select **Edit**. 276 | 1. From **Datasource** choose **obj_flaw_detector** and click on the row just below the tab, starting with **“A”**. 277 | 2. Click on **select measurement** and select **obj_flaw_detector**. 278 | 3. From **SELECT** row, click on **fields** and select **Object Number**. Also click on **+** from the same row, select **aggregations** and click on **sum()**. From **GROUP BY** row, click on **time** and select **1s**. Name the query as **Object Count** in the **ALIAS BY** row. 279 | - On the **Options** tab, select **show** under **Gauge** option and change the value of **decimals** to **0** under **Value** option. 280 | - Save the dashboard and click on **Back to dashboard** icon. 281 | 282 | 9. Mark the current directory as favourite by clicking on **Mark as favorite** icon on the top menu. 283 | 284 | 10. Select **Time picker** from the top menu of dashboard. Under **Custom range** change the **From** value to **now-10s** and **Refreshing every:** to **5s**, click on **Apply** and save the dashboard. 285 | 286 | 11. For re-testing, follow the steps below: 287 | 288 | - In a new browser tab or window, go to **http://localhost:3000/**. 289 | - Log in with user as **admin** and password as **admin**. 290 | - The **“Home Dashboard”** shows up the list of starred and Recently viewed dashboards. Select **flaw_detector**. 291 | 292 | ![Grafana3](./docs/images/grafana3.png) 293 | 294 | 12. Run the Python code again on the terminal to visualize data on Grafana. 295 | 296 | ![Grafana4](./docs/images/grafana4.png) 297 | 298 | **NOTE :** From the top right corner of the dashboard, select the **Range** option and set the **Refreshing every** option to **5s**. 299 | 300 | ## (Optional) Save Data to the Cloud 301 | 302 | As an optional step, send data results to an Amazon Web Services (AWS)* instance for graphing. 303 | 304 | 1. Make an EC2 Linux* instance on AWS. Steps are found [here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_GetStarted.html). 305 | 2. Install InfluxDB on EC2 Linux instance. Download [here](https://github.com/influxdata/influxdb). 306 | 3. Download and install Grafana on EC2 Linux instance. Download [here](https://grafana.com/get). 307 | -------------------------------------------------------------------------------- /Jupyter/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Object Flaw Detector 3 | 4 | | Details | | 5 | | --------------------- | ----------------- | 6 | | Target OS: | Ubuntu* 18.04 LTS | 7 | | Programming Language: | Python* 3.6 | 8 | | Time to complete: | 30 min | 9 | 10 | This reference implementation is also [available in C++](https://github.com/intel-iot-devkit/reference-implementation-private/blob/object-flaw-measurement/object-flaw-detector-measurement/README.MD). 11 | 12 | ## What it does 13 | 14 | The object flaw detector application detects the anomalies such as color, crack, and orientation of the object moving on a conveyor belt. Anomalies are marked as defective and saved in the color, crack, orientation folders respectively. Also objects with no defects are saved in no_defect folder. 15 | These anomalies data is sent to InfluxDB* database and is visualized on Grafana*. 16 | This application also measures length and width of the object in millimeters. 17 | 18 | ## Requirements 19 | 20 | - Ubuntu 18.04 21 | - Intel® Distribution of OpenVINO™ toolkit 2020 R3 Release 22 | - Grafana* v5.3.2 23 | - InfluxDB* v1.6.2 24 | - Jupyter* Notebook v5.7.0 25 | 26 | ## Setup 27 | 28 | ### Install Intel® Distribution of OpenVINO™ toolkit 29 | 30 | Refer to [ Install the Intel® Distribution of OpenVINO™ toolkit for Linux*](https://software.intel.com/en-us/articles/OpenVINO-Install-Linux) for more information on how to install and set up the Intel® Distribution of OpenVINO™ toolkit 31 | 32 | ## How It works 33 | 34 | This application takes the input from a video camera or a video file for processing. 35 | 36 | ![Data Flow Diagram](../docs/images/architectural_diagram.png) 37 | 38 | **Orientation defect detection:** Get the frame and change the color space to HSV format. Threshold the image based on the color of the object using [inRange](https://docs.opencv.org/master/df/d9d/tutorial_py_colorspaces.html) function to create a mask. Perform morphological opening and closing on the mask and find the contours using [findContours](https://docs.opencv.org/master/d4/d73/tutorial_py_contours_begin.html) function. Filter the contours based on the area. Perform [PCA](https://docs.opencv.org/master/d1/dee/tutorial_introduction_to_pca.html) (Principal Component Analysis) on the contours to get the orientation of the object. 39 | 40 | ![orientation](../docs/images/orientation.jpg) 41 | 42 | **Color defect detection:** Threshold the image based on the defective color of the object using [inRange](https://docs.opencv.org/master/df/d9d/tutorial_py_colorspaces.html) function. Use the mask obtained from the [inRange](https://docs.opencv.org/master/df/d9d/tutorial_py_colorspaces.html) function to find the defective area. 43 | 44 | ![color](../docs/images/color.jpg) 45 | 46 | **Crack detection:** Transform the image from BGR to Grayscale format using [cvtColor](https://docs.opencv.org/master/df/d9d/tutorial_py_colorspaces.html) function. Blur the image using [blur](https://docs.opencv.org/master/d4/d13/tutorial_py_filtering.html) function to remove the noises. Use the contours found on the blurred image to detect the cracks. 47 | 48 | ![crack](../docs/images/crack.jpg) 49 | 50 | Save the images of defective objects in their respective folders. For example, objects with color defect are saved in **color** folder, objects with cracks are saved in **crack** folder, objects with orientation defect are saved in **orientation** folder and objects with no defect are stored in **no_defect** folder. 51 | 52 | ## Setup 53 | 54 | ### Get the code 55 | 56 | Steps to clone the reference implementation: (object-flaw-detector-python) 57 | 58 | sudo apt-get update && sudo apt-get install git 59 | git clone https://gitlab.devtools.intel.com/reference-implementations/object-flaw-detector-python.git 60 | 61 | ### Install Intel® Distribution of OpenVINO™ toolkit 62 | Before running the application, install the Intel® Distribution of OpenVINO™ toolkit. For details, see [Installing the Intel® Distribution of OpenVINO™ toolkit for Linux*](https://software.intel.com/en-us/openvino-toolkit/choose-download/free-download-linux) 63 | 64 | ### Other dependencies 65 | #### InfluxDB* 66 | InfluxDB is a time series database designed to handle high write and query loads. It is an integral component of the TICK stack. InfluxDB is meant to be used as a backing store for any use case involving large amounts of timestamped data, including DevOps monitoring, application metrics, IoT sensor data, and real-time analytics. 67 | 68 | #### Grafana* 69 | Grafana is an open-source, general purpose dashboard and graph composer, which runs as a web application. It supports Graphite, InfluxDB, Prometheus, Google Stackdriver, AWS CloudWatch, Azure Monitor, Loki, MySQL, PostgreSQL, Microsoft SQL Server, Testdata, Mixed, OpenTSDB and Elasticsearch as backends. Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored. 70 | 71 | To install the dependencies of the RI, run the below command: 72 | ``` 73 | cd 74 | ./setup.sh 75 | ``` 76 | ### The Config File 77 | 78 | The _resources/config.json_ contains the path to the videos that will be used by the application. 79 | The _config.json_ file is of the form name/value pair, `video: ` 80 | 81 | Example of the _config.json_ file: 82 | 83 | ``` 84 | { 85 | 86 | "inputs": [ 87 | { 88 | "video": "videos/video1.mp4" 89 | } 90 | ] 91 | } 92 | ``` 93 | 94 | ### Which Input video to use 95 | 96 | The application works with any input video. Find sample videos for object detection [here](https://github.com/intel-iot-devkit/sample-videos/). 97 | 98 | For first-use, we recommend using the [bolt-detection](https://github.com/intel-iot-devkit/sample-videos/blob/master/bolt-detection.mp4) video.The video is automatically downloaded to the `resources/` folder. 99 | For example:
100 | The config.json would be: 101 | 102 | ``` 103 | { 104 | 105 | "inputs": [ 106 | { 107 | "video": "sample-videos/bolt-detection.mp4" 108 | } 109 | ] 110 | } 111 | ``` 112 | To use any other video, specify the path in config.json file 113 | 114 | ### Using the Camera instead of video 115 | 116 | Replace the path/to/video in the _resources/config.json_ file with the camera ID, where the ID is taken from the video device (the number X in /dev/videoX). 117 | 118 | On Ubuntu, list all available video devices with the following command: 119 | 120 | ``` 121 | ls /dev/video* 122 | ``` 123 | 124 | For example, if the output of above command is /dev/video0, then config.json would be:: 125 | 126 | ``` 127 | { 128 | 129 | "inputs": [ 130 | { 131 | "video": "0" 132 | } 133 | ] 134 | } 135 | ``` 136 | 137 | ### Setup the Environment 138 | 139 | Configure the environment to use the Intel® Distribution of OpenVINO™ toolkit once per session by running the **source** command on the command line: 140 | ``` 141 | source /opt/intel/openvino/bin/setupvars.sh 142 | ``` 143 | 144 | __Note__: This command needs to be executed only once in the terminal where the application will be executed. If the terminal is closed, the command needs to be executed again. 145 | 146 | ## Run the Code on Juptyer* 147 | 148 | * Go to the _object-flaw-detector-python directory_ and open the Jupyter notebook by running the following commands: 149 | 150 | ``` 151 | cd /Jupyter 152 | jupyter notebook 153 | ``` 154 | 155 | **Follow the steps to run the code on jupyter:** 156 | 157 | 1. Click on **New** button on the right side of the jupyter window. 158 | 159 | ![Jupyter Notebook](../docs/images/jupyter.png) 160 | 161 | 2. Click on **Python 3** option from the drop down list. 162 | 163 | 3. In the first cell type **import os** and press **Shift+Enter** from the keyboard. 164 | 165 | 4. If user wants to save defect image folders in specific directory then export environment variable (directory) in the next cell as given below and press **Shift+Enter**.
166 | **Note:** If user skips this step then defective image folders are saved in the working directory itself.
167 | %env directory =
168 | 169 | 5. If values of field of view of the camera in **degrees** and distance between object and camera in **millimeters** are available then user can export these environment variables(fieldofview, distance respectively) as given below to get accurate results and press **Shift+Enter**.
170 | **Note:** If user skips this step, these values are set to default values.
171 | %env fieldofview = 60
172 | %env distance = 30
173 | 174 | 6. Copy the code from **object_flaw_detector_jupyter.py** and paste it in the next cell and press **Shift+Enter**. 175 | 176 | 7. Alternatively, code can be run in the following way. 177 | 178 | i. Click on the **object_flaw_detector_jupyter.ipynb** file in the jupyter notebook window. 179 | 180 | ii. Click on the **Kernel** menu and then select **Restart & Run All** from the drop down list. 181 | 182 | iii. Click on **Restart and Run All Cells**. 183 | 184 | ![Jupyter Notebook](../docs/images/jupyter_code.png) 185 | 186 | - To check the data on InfluxDB, run the following commands: 187 | 188 | ``` 189 | influx 190 | show databases 191 | use obj_flaw_database 192 | select * from obj_flaw_detector 193 | ``` 194 | 195 | 196 | ### Data Visualization 197 | 198 | - If you wish to import settings to visualise the data on Grafana, follow steps below. 199 | 200 | 1. On the terminal, run the following command: 201 | 202 | ``` 203 | sudo service grafana-server start 204 | ``` 205 | 206 | 2. In your browser, go to localhost:3000. 207 | 208 | 3. Log in with user as **admin** and password as **admin**. 209 | 210 | 4. Click on **Configuration**. 211 | 212 | 5. Select **“Data Sources”**. 213 | 214 | 6. Click on **“+ Add data source”** and provide inputs below. 215 | 216 | - *Name*: Obj_flaw_detector 217 | - *Type*: InfluxDB 218 | - *URL*: http://localhost:8086 219 | - *Database*: obj_flaw_database 220 | - Click on “Save and Test” 221 | 222 | 7. Click on **+** icon present on the left side of the browser, select **import**. 223 | 224 | 8. Click on **Upload.json File**. 225 | 226 | 9. Select the file name "flaw_detector.json" from object-flaw-detector-python directory. 227 | 228 | 10. Click on import. 229 | 230 | 11. Run the python code again on the terminal to visualize data on grafana. 231 | 232 | - If you wish to start from scratch to visualize data on Grafana, follow the steps below. 233 | 234 | 1. On the terminal, run the following command. 235 | 236 | ``` 237 | sudo service grafana-server start 238 | ``` 239 | 240 | 2. Open the browser, go to **localhost:3000**. 241 | 242 | 3. Log in with user as **admin** and password as **admin**. 243 | 244 | 4. Click on the **Configuration** icon and Select **“Data Sources”**. 245 | 246 | 5. Click on **“+ Add data source”** and provide inputs below. 247 | 248 | - *Name*: Obj_flaw_detector 249 | - *Type*: InfluxDB 250 | - *URL*: http://localhost:8086 251 | - *Database*: obj_flaw_database 252 | - Click on “Save and Test” 253 | 254 | ![Grafana1](../docs/images/grafana1.png) 255 | 256 | 6. To create a new Dashboard. 257 | 258 | - Select **+** icon from the side menu bar which is under grafana icon and select **Dashboard**. 259 | - Select **Graph**, click on the **Panel Title** and select **Edit**. 260 | - On the **Metrics** tab. 261 | 1. From **Datasource** choose **obj_flaw_detector**. 262 | 2. Click on the row just below the tab, starting with **“A”**. 263 | 3. Click on **select measurement** and select **obj_flaw_detector** . 264 | 4. From **SELECT** row, click on **fields** and select **Color**. Also click on **+** from the same row, select **aggregations** and click on **distinct()**. From **GROUP BY** row, click on **time** and select **1s**. Name the query as **color** in the **ALIAS BY** row. 265 | 5. Similarly from **Metrics** tab configure for **Crack**, **Orientation**, **No defect** and **Object Number** by clicking **Add Query**. 266 | - On the **Time range** tab, change the **override relative time** to **100s**. 267 | - Save the dashboard with name **flaw_detector**. 268 | 269 | ``` 270 | ![Grafana2](../docs/images/grafana2.png) 271 | ``` 272 | 273 | 7. Click on the **add panel** icon on the top menu. 274 | 275 | - Select **Table** , Click on the **Panel Title** and select **Edit** and follow the steps mentioned in the previous step for configuring **Metric** and **Time range** tab. 276 | - From the **Column Styles** tab, click on **+Add** and in the **Apply to columns named** give the name **color**, and also value **0** in the **Decimals**. 277 | - Similarly from **Column Styles** tab configure for **Crack**, **Orientation**, **No defect** and **Object Number** by clicking **+Add**. 278 | - Save the dashboard and click on **Back to dashboard** icon which is on right corner of the top menu. 279 | 280 | 8. Click on the **add panel** icon on the top menu. 281 | 282 | - Select **Singlestat**, Click on the **Panel Title** and select **Edit**. 283 | 1. From **Datasource** choose **obj_flaw_detector** and click on the row just below the tab, starting with **“A”**. 284 | 2. Click on **select measurement** and select **obj_flaw_detector**. 285 | 3. From **SELECT** row, click on **fields** and select **Object Number**. Also click on **+** from the same row, select **aggregations** and click on **sum()**. From **GROUP BY** row, click on **time** and select **1s**. Name the query as **Object Count** in the **ALIAS BY** row. 286 | - On the **Options** tab, select **show** under **Gauge** option and change the value of **decimals** to **0** under **Value** option. 287 | - Save the dashboard and click on **Back to dashboard** icon. 288 | 289 | 9. Mark the current directory as favourite by clicking on **Mark as favorite** icon on the top menu. 290 | 291 | 10. Select **Time picker** from the top menu of dashboard. Under **Custom range** change the **From** value to **now-10s** and **Refreshing every:** to **5s**, click on **Apply** and save the dashboard. 292 | 293 | 11. For re-testing, follow the steps below: 294 | 295 | - In a new browser tab or window, go to **http://localhost:3000/**. 296 | - Log in with user as **admin** and password as **admin**. 297 | - The **“Home Dashboard”** shows up the list of starred and Recently viewed dashboards. Select **flaw_detector**. 298 | 299 | ![Grafana3](../docs/images/grafana3.png) 300 | 301 | 12. Run the Python code again on the terminal to visualize data on Grafana. 302 | 303 | ![Grafana4](../docs/images/grafana4.png) 304 | 305 | **NOTE :** From the top right corner of the dashboard, select the **Range** option and set the **Refreshing every** option to **5s**. 306 | 307 | ## (Optional) Save Data to the Cloud 308 | 309 | As an optional step, send data results to an Amazon Web Services (AWS)* instance for graphing. 310 | 1. Make an EC2 Linux* instance on AWS. Steps are found [here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_GetStarted.html). 311 | 2. Install InfluxDB on EC2 Linux instance. Download [here](https://github.com/influxdata/influxdb). 312 | 3. Download and install Grafana on EC2 Linux instance. Download [here](https://grafana.com/get). 313 | -------------------------------------------------------------------------------- /flaw_detector.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "type": "dashboard" 12 | } 13 | ] 14 | }, 15 | "editable": true, 16 | "gnetId": null, 17 | "graphTooltip": 0, 18 | "id": 5, 19 | "links": [], 20 | "panels": [ 21 | { 22 | "aliasColors": {}, 23 | "bars": false, 24 | "dashLength": 10, 25 | "dashes": false, 26 | "datasource": "Obj_flaw_detector", 27 | "fill": 1, 28 | "gridPos": { 29 | "h": 9, 30 | "w": 12, 31 | "x": 0, 32 | "y": 0 33 | }, 34 | "id": 6, 35 | "legend": { 36 | "avg": false, 37 | "current": false, 38 | "max": false, 39 | "min": false, 40 | "show": true, 41 | "total": false, 42 | "values": false 43 | }, 44 | "lines": true, 45 | "linewidth": 1, 46 | "nullPointMode": "null", 47 | "percentage": false, 48 | "pointradius": 5, 49 | "points": false, 50 | "renderer": "flot", 51 | "seriesOverrides": [], 52 | "spaceLength": 10, 53 | "stack": false, 54 | "steppedLine": false, 55 | "targets": [ 56 | { 57 | "alias": "color", 58 | "groupBy": [ 59 | { 60 | "params": [ 61 | "1s" 62 | ], 63 | "type": "time" 64 | }, 65 | { 66 | "params": [ 67 | "null" 68 | ], 69 | "type": "fill" 70 | } 71 | ], 72 | "measurement": "obj_flaw_detector", 73 | "orderByTime": "ASC", 74 | "policy": "default", 75 | "refId": "A", 76 | "resultFormat": "time_series", 77 | "select": [ 78 | [ 79 | { 80 | "params": [ 81 | "Color" 82 | ], 83 | "type": "field" 84 | }, 85 | { 86 | "params": [], 87 | "type": "distinct" 88 | } 89 | ] 90 | ], 91 | "tags": [] 92 | }, 93 | { 94 | "alias": "crack", 95 | "groupBy": [ 96 | { 97 | "params": [ 98 | "1s" 99 | ], 100 | "type": "time" 101 | }, 102 | { 103 | "params": [ 104 | "null" 105 | ], 106 | "type": "fill" 107 | } 108 | ], 109 | "measurement": "obj_flaw_detector", 110 | "orderByTime": "ASC", 111 | "policy": "default", 112 | "refId": "B", 113 | "resultFormat": "time_series", 114 | "select": [ 115 | [ 116 | { 117 | "params": [ 118 | "Crack" 119 | ], 120 | "type": "field" 121 | }, 122 | { 123 | "params": [], 124 | "type": "distinct" 125 | } 126 | ] 127 | ], 128 | "tags": [] 129 | }, 130 | { 131 | "alias": "orientation", 132 | "groupBy": [ 133 | { 134 | "params": [ 135 | "1s" 136 | ], 137 | "type": "time" 138 | }, 139 | { 140 | "params": [ 141 | "null" 142 | ], 143 | "type": "fill" 144 | } 145 | ], 146 | "measurement": "obj_flaw_detector", 147 | "orderByTime": "ASC", 148 | "policy": "default", 149 | "refId": "C", 150 | "resultFormat": "time_series", 151 | "select": [ 152 | [ 153 | { 154 | "params": [ 155 | "Orientation" 156 | ], 157 | "type": "field" 158 | }, 159 | { 160 | "params": [], 161 | "type": "distinct" 162 | } 163 | ] 164 | ], 165 | "tags": [] 166 | }, 167 | { 168 | "alias": "no defect", 169 | "groupBy": [ 170 | { 171 | "params": [ 172 | "1s" 173 | ], 174 | "type": "time" 175 | }, 176 | { 177 | "params": [ 178 | "null" 179 | ], 180 | "type": "fill" 181 | } 182 | ], 183 | "measurement": "obj_flaw_detector", 184 | "orderByTime": "ASC", 185 | "policy": "default", 186 | "refId": "D", 187 | "resultFormat": "time_series", 188 | "select": [ 189 | [ 190 | { 191 | "params": [ 192 | "No defect" 193 | ], 194 | "type": "field" 195 | }, 196 | { 197 | "params": [], 198 | "type": "distinct" 199 | } 200 | ] 201 | ], 202 | "tags": [] 203 | }, 204 | { 205 | "alias": "object number", 206 | "groupBy": [ 207 | { 208 | "params": [ 209 | "1s" 210 | ], 211 | "type": "time" 212 | }, 213 | { 214 | "params": [ 215 | "null" 216 | ], 217 | "type": "fill" 218 | } 219 | ], 220 | "measurement": "obj_flaw_detector", 221 | "orderByTime": "ASC", 222 | "policy": "default", 223 | "refId": "E", 224 | "resultFormat": "time_series", 225 | "select": [ 226 | [ 227 | { 228 | "params": [ 229 | "Object Number" 230 | ], 231 | "type": "field" 232 | }, 233 | { 234 | "params": [], 235 | "type": "distinct" 236 | } 237 | ] 238 | ], 239 | "tags": [] 240 | } 241 | ], 242 | "thresholds": [], 243 | "timeFrom": "100s", 244 | "timeShift": null, 245 | "title": "Panel Title", 246 | "tooltip": { 247 | "shared": true, 248 | "sort": 0, 249 | "value_type": "individual" 250 | }, 251 | "type": "graph", 252 | "xaxis": { 253 | "buckets": null, 254 | "mode": "time", 255 | "name": null, 256 | "show": true, 257 | "values": [] 258 | }, 259 | "yaxes": [ 260 | { 261 | "format": "short", 262 | "label": null, 263 | "logBase": 1, 264 | "max": null, 265 | "min": null, 266 | "show": true 267 | }, 268 | { 269 | "format": "short", 270 | "label": null, 271 | "logBase": 1, 272 | "max": null, 273 | "min": null, 274 | "show": true 275 | } 276 | ], 277 | "yaxis": { 278 | "align": false, 279 | "alignLevel": null 280 | } 281 | }, 282 | { 283 | "columns": [], 284 | "datasource": "Obj_flaw_detector", 285 | "fontSize": "100%", 286 | "gridPos": { 287 | "h": 9, 288 | "w": 12, 289 | "x": 12, 290 | "y": 0 291 | }, 292 | "id": 2, 293 | "pageSize": null, 294 | "scroll": false, 295 | "showHeader": true, 296 | "sort": { 297 | "col": null, 298 | "desc": false 299 | }, 300 | "styles": [ 301 | { 302 | "alias": "Time", 303 | "dateFormat": "YYYY-MM-DD HH:mm:ss", 304 | "pattern": "Time", 305 | "type": "date" 306 | }, 307 | { 308 | "alias": "", 309 | "colorMode": null, 310 | "colors": [ 311 | "rgba(245, 54, 54, 0.9)", 312 | "rgba(237, 129, 40, 0.89)", 313 | "rgba(50, 172, 45, 0.97)" 314 | ], 315 | "dateFormat": "YYYY-MM-DD HH:mm:ss", 316 | "decimals": 0, 317 | "mappingType": 1, 318 | "pattern": "color", 319 | "thresholds": [], 320 | "type": "number", 321 | "unit": "short" 322 | }, 323 | { 324 | "alias": "", 325 | "colorMode": null, 326 | "colors": [ 327 | "rgba(245, 54, 54, 0.9)", 328 | "rgba(237, 129, 40, 0.89)", 329 | "rgba(50, 172, 45, 0.97)" 330 | ], 331 | "dateFormat": "YYYY-MM-DD HH:mm:ss", 332 | "decimals": 0, 333 | "mappingType": 1, 334 | "pattern": "crack", 335 | "thresholds": [], 336 | "type": "number", 337 | "unit": "short" 338 | }, 339 | { 340 | "alias": "", 341 | "colorMode": null, 342 | "colors": [ 343 | "rgba(245, 54, 54, 0.9)", 344 | "rgba(237, 129, 40, 0.89)", 345 | "rgba(50, 172, 45, 0.97)" 346 | ], 347 | "dateFormat": "YYYY-MM-DD HH:mm:ss", 348 | "decimals": 0, 349 | "mappingType": 1, 350 | "pattern": "orientation", 351 | "thresholds": [], 352 | "type": "number", 353 | "unit": "short" 354 | }, 355 | { 356 | "alias": "", 357 | "colorMode": null, 358 | "colors": [ 359 | "rgba(245, 54, 54, 0.9)", 360 | "rgba(237, 129, 40, 0.89)", 361 | "rgba(50, 172, 45, 0.97)" 362 | ], 363 | "dateFormat": "YYYY-MM-DD HH:mm:ss", 364 | "decimals": 0, 365 | "mappingType": 1, 366 | "pattern": "no defect", 367 | "thresholds": [], 368 | "type": "number", 369 | "unit": "short" 370 | }, 371 | { 372 | "alias": "", 373 | "colorMode": null, 374 | "colors": [ 375 | "rgba(245, 54, 54, 0.9)", 376 | "rgba(237, 129, 40, 0.89)", 377 | "rgba(50, 172, 45, 0.97)" 378 | ], 379 | "dateFormat": "YYYY-MM-DD HH:mm:ss", 380 | "decimals": 0, 381 | "mappingType": 1, 382 | "pattern": "object number", 383 | "thresholds": [], 384 | "type": "number", 385 | "unit": "short" 386 | }, 387 | { 388 | "alias": "", 389 | "colorMode": null, 390 | "colors": [ 391 | "rgba(245, 54, 54, 0.9)", 392 | "rgba(237, 129, 40, 0.89)", 393 | "rgba(50, 172, 45, 0.97)" 394 | ], 395 | "decimals": 2, 396 | "pattern": "/.*/", 397 | "thresholds": [], 398 | "type": "number", 399 | "unit": "short" 400 | } 401 | ], 402 | "targets": [ 403 | { 404 | "alias": "color", 405 | "groupBy": [ 406 | { 407 | "params": [ 408 | "1s" 409 | ], 410 | "type": "time" 411 | }, 412 | { 413 | "params": [ 414 | "null" 415 | ], 416 | "type": "fill" 417 | } 418 | ], 419 | "measurement": "obj_flaw_detector", 420 | "orderByTime": "ASC", 421 | "policy": "default", 422 | "refId": "A", 423 | "resultFormat": "time_series", 424 | "select": [ 425 | [ 426 | { 427 | "params": [ 428 | "Color" 429 | ], 430 | "type": "field" 431 | }, 432 | { 433 | "params": [], 434 | "type": "distinct" 435 | } 436 | ] 437 | ], 438 | "tags": [] 439 | }, 440 | { 441 | "alias": "crack", 442 | "groupBy": [ 443 | { 444 | "params": [ 445 | "1s" 446 | ], 447 | "type": "time" 448 | }, 449 | { 450 | "params": [ 451 | "null" 452 | ], 453 | "type": "fill" 454 | } 455 | ], 456 | "measurement": "obj_flaw_detector", 457 | "orderByTime": "ASC", 458 | "policy": "default", 459 | "refId": "B", 460 | "resultFormat": "time_series", 461 | "select": [ 462 | [ 463 | { 464 | "params": [ 465 | "Crack" 466 | ], 467 | "type": "field" 468 | }, 469 | { 470 | "params": [], 471 | "type": "distinct" 472 | } 473 | ] 474 | ], 475 | "tags": [] 476 | }, 477 | { 478 | "alias": "orientation", 479 | "groupBy": [ 480 | { 481 | "params": [ 482 | "1s" 483 | ], 484 | "type": "time" 485 | }, 486 | { 487 | "params": [ 488 | "null" 489 | ], 490 | "type": "fill" 491 | } 492 | ], 493 | "measurement": "obj_flaw_detector", 494 | "orderByTime": "ASC", 495 | "policy": "default", 496 | "refId": "C", 497 | "resultFormat": "time_series", 498 | "select": [ 499 | [ 500 | { 501 | "params": [ 502 | "Orientation" 503 | ], 504 | "type": "field" 505 | }, 506 | { 507 | "params": [], 508 | "type": "distinct" 509 | } 510 | ] 511 | ], 512 | "tags": [] 513 | }, 514 | { 515 | "alias": "no defect", 516 | "groupBy": [ 517 | { 518 | "params": [ 519 | "1s" 520 | ], 521 | "type": "time" 522 | }, 523 | { 524 | "params": [ 525 | "null" 526 | ], 527 | "type": "fill" 528 | } 529 | ], 530 | "measurement": "obj_flaw_detector", 531 | "orderByTime": "ASC", 532 | "policy": "default", 533 | "refId": "D", 534 | "resultFormat": "time_series", 535 | "select": [ 536 | [ 537 | { 538 | "params": [ 539 | "No defect" 540 | ], 541 | "type": "field" 542 | }, 543 | { 544 | "params": [], 545 | "type": "distinct" 546 | } 547 | ] 548 | ], 549 | "tags": [] 550 | }, 551 | { 552 | "alias": "object number", 553 | "groupBy": [ 554 | { 555 | "params": [ 556 | "1s" 557 | ], 558 | "type": "time" 559 | }, 560 | { 561 | "params": [ 562 | "null" 563 | ], 564 | "type": "fill" 565 | } 566 | ], 567 | "measurement": "obj_flaw_detector", 568 | "orderByTime": "ASC", 569 | "policy": "default", 570 | "refId": "E", 571 | "resultFormat": "time_series", 572 | "select": [ 573 | [ 574 | { 575 | "params": [ 576 | "Object Number" 577 | ], 578 | "type": "field" 579 | }, 580 | { 581 | "params": [], 582 | "type": "distinct" 583 | } 584 | ] 585 | ], 586 | "tags": [] 587 | } 588 | ], 589 | "timeFrom": "100s", 590 | "title": "Panel Title", 591 | "transform": "timeseries_to_columns", 592 | "type": "table" 593 | }, 594 | { 595 | "cacheTimeout": null, 596 | "colorBackground": false, 597 | "colorValue": false, 598 | "colors": [ 599 | "#299c46", 600 | "rgba(237, 129, 40, 0.89)", 601 | "#d44a3a" 602 | ], 603 | "datasource": "Obj_flaw_detector", 604 | "decimals": 0, 605 | "format": "none", 606 | "gauge": { 607 | "maxValue": 100, 608 | "minValue": 0, 609 | "show": true, 610 | "thresholdLabels": false, 611 | "thresholdMarkers": true 612 | }, 613 | "gridPos": { 614 | "h": 9, 615 | "w": 12, 616 | "x": 0, 617 | "y": 9 618 | }, 619 | "id": 4, 620 | "interval": null, 621 | "links": [], 622 | "mappingType": 1, 623 | "mappingTypes": [ 624 | { 625 | "name": "value to text", 626 | "value": 1 627 | }, 628 | { 629 | "name": "range to text", 630 | "value": 2 631 | } 632 | ], 633 | "maxDataPoints": 100, 634 | "nullPointMode": "connected", 635 | "nullText": null, 636 | "postfix": "", 637 | "postfixFontSize": "50%", 638 | "prefix": "", 639 | "prefixFontSize": "50%", 640 | "rangeMaps": [ 641 | { 642 | "from": "null", 643 | "text": "N/A", 644 | "to": "null" 645 | } 646 | ], 647 | "sparkline": { 648 | "fillColor": "rgba(31, 118, 189, 0.18)", 649 | "full": false, 650 | "lineColor": "rgb(31, 120, 193)", 651 | "show": false 652 | }, 653 | "tableColumn": "", 654 | "targets": [ 655 | { 656 | "alias": "Object count", 657 | "groupBy": [ 658 | { 659 | "params": [ 660 | "1s" 661 | ], 662 | "type": "time" 663 | }, 664 | { 665 | "params": [ 666 | "null" 667 | ], 668 | "type": "fill" 669 | } 670 | ], 671 | "measurement": "obj_flaw_detector", 672 | "orderByTime": "ASC", 673 | "policy": "default", 674 | "refId": "A", 675 | "resultFormat": "time_series", 676 | "select": [ 677 | [ 678 | { 679 | "params": [ 680 | "Object Number" 681 | ], 682 | "type": "field" 683 | }, 684 | { 685 | "params": [], 686 | "type": "sum" 687 | } 688 | ] 689 | ], 690 | "tags": [] 691 | } 692 | ], 693 | "thresholds": "", 694 | "title": "Panel Title", 695 | "type": "singlestat", 696 | "valueFontSize": "80%", 697 | "valueMaps": [ 698 | { 699 | "op": "=", 700 | "text": "N/A", 701 | "value": "null" 702 | } 703 | ], 704 | "valueName": "avg" 705 | } 706 | ], 707 | "refresh": "5s", 708 | "schemaVersion": 16, 709 | "style": "dark", 710 | "tags": [], 711 | "templating": { 712 | "list": [] 713 | }, 714 | "time": { 715 | "from": "now-10s", 716 | "to": "now" 717 | }, 718 | "timepicker": { 719 | "refresh_intervals": [ 720 | "5s", 721 | "10s", 722 | "30s", 723 | "1m", 724 | "5m", 725 | "15m", 726 | "30m", 727 | "1h", 728 | "2h", 729 | "1d" 730 | ], 731 | "time_options": [ 732 | "5m", 733 | "15m", 734 | "1h", 735 | "6h", 736 | "12h", 737 | "24h", 738 | "2d", 739 | "7d", 740 | "30d" 741 | ] 742 | }, 743 | "timezone": "", 744 | "title": "OBJECT FLAW DETECTOR", 745 | "uid": "vgCy0uBmz", 746 | "version": 9 747 | } -------------------------------------------------------------------------------- /Jupyter/object_flaw_detector.py: -------------------------------------------------------------------------------- 1 | """Object flaw detector.""" 2 | """ 3 | * Copyright (c) 2018 Intel Corporation. 4 | * 5 | * Permission is hereby granted, free of charge, to any person obtaining 6 | * a copy of this software and associated documentation files (the 7 | * "Software"), to deal in the Software without restriction, including 8 | * without limitation the rights to use, copy, modify, merge, publish, 9 | * distribute, sublicense, and/or sell copies of the Software, and to 10 | * permit persons to whom the Software is furnished to do so, subject to 11 | * the following conditions: 12 | * 13 | * The above copyright notice and this permission notice shall be 14 | * included in all copies or substantial portions of the Software. 15 | * 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | * 24 | """ 25 | 26 | import socket 27 | import math 28 | import sys 29 | import cv2 30 | import os 31 | import json 32 | from argparse import ArgumentParser 33 | from influxdb import InfluxDBClient 34 | from math import atan2 35 | 36 | import numpy as np 37 | 38 | # GLOBAL Variables 39 | CONFIG_FILE = '../resources/config.json' 40 | 41 | OBJECT_AREA_MIN = 9000 42 | OBJECT_AREA_MAX = 50000 43 | LOW_H = 0 44 | LOW_S = 0 45 | LOW_V = 47 46 | # Thresholding of an Image in a color range 47 | HIGH_H = 179 48 | HIGH_S = 255 49 | HIGH_V = 255 50 | # Lower and upper value of color Range of the object 51 | # for color thresholding to detect the object 52 | LOWER_COLOR_RANGE = (0, 0, 0) 53 | UPPER_COLOR_RANGE = (174, 73, 255) 54 | COUNT_OBJECT = 0 55 | HEIGHT_OF_OBJ = 0 56 | WIDTH_OF_OBJ = 0 57 | 58 | OBJECT_COUNT = "Object Number : {}".format(COUNT_OBJECT) 59 | 60 | input = '' 61 | base_dir = os.getcwd() 62 | distance = 0 63 | fieldofview = 0 64 | 65 | 66 | def dimensions(box): 67 | """ 68 | Return the length and width of the object. 69 | 70 | :param box: consists of top left, right and bottom left, right co-ordinates 71 | :return: Length and width of the object 72 | """ 73 | (tl, tr, br, bl) = box 74 | x = int( 75 | math.sqrt(math.pow((bl[0] - tl[0]), 2) + math.pow((bl[1] - tl[1]), 2))) 76 | y = int( 77 | math.sqrt(math.pow((tl[0] - tr[0]), 2) + math.pow((tl[1] - tr[1]), 2))) 78 | 79 | if x > y: 80 | return x, y 81 | else: 82 | return y, x 83 | 84 | 85 | def get_ip_address(): 86 | """ 87 | Return IP address of the server. 88 | 89 | :return: None 90 | """ 91 | hostname = socket.gethostname() 92 | ipaddress = socket.gethostbyname(hostname) 93 | port = 8086 94 | proxy = {"http": "http://{}:{}".format(ipaddress, port)} 95 | return ipaddress, port, proxy 96 | 97 | 98 | def get_orientation(contours): 99 | """ 100 | Gives the angle of the orientation of the object in radians. 101 | Step 1: Convert 3D matrix of contours to 2D. 102 | Step 2: Apply PCA algorithm to find angle of the data points. 103 | Step 3: If angle is greater than 0.5, return_flag is made to True 104 | else false. 105 | Step 4: Save the image in "Orientation" folder if it has a 106 | orientation defect. 107 | 108 | :param contours: contour of the object from the frame 109 | :return: angle of orientation of the object in radians 110 | """ 111 | size_points = len(contours) 112 | # data_pts stores contour values in 2D 113 | data_pts = np.empty((size_points, 2), dtype=np.float64) 114 | for i in range(data_pts.shape[0]): 115 | data_pts[i, 0] = contours[i, 0, 0] 116 | data_pts[i, 1] = contours[i, 0, 1] 117 | # Use PCA algorithm to find angle of the data points 118 | mean, eigenvector = cv2.PCACompute(data_pts, mean=None) 119 | angle = atan2(eigenvector[0, 1], eigenvector[0, 0]) 120 | return angle 121 | 122 | 123 | def detect_orientation(frame, contours): 124 | """ 125 | Identifies the Orientation of the object based on the detected angle. 126 | 127 | :param frame: Input frame from video 128 | :param contours: contour of the object from the frame 129 | :return: defect_flag, defect 130 | """ 131 | defect = "Orientation" 132 | global OBJECT_COUNT 133 | # Find the orientation of each contour 134 | angle = get_orientation(contours) 135 | # If angle is less than 0.5 then no orientation defect is present 136 | if angle < 0.5: 137 | defect_flag = False 138 | else: 139 | x, y, w, h = cv2.boundingRect(contours) 140 | print("Orientation defect detected in object {}".format(COUNT_OBJECT)) 141 | defect_flag = True 142 | cv2.imwrite("{}/orientation/Orientation_{}.png" 143 | .format(base_dir, COUNT_OBJECT), 144 | frame[y: y + h, x: x + w]) 145 | cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 146 | 0.75, (255, 255, 255), 2) 147 | cv2.putText(frame, "Defect: {}".format(defect), (5, 140), 148 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 149 | cv2.putText(frame, "Length (mm): {}".format(HEIGHT_OF_OBJ), (5, 80), 150 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 151 | cv2.putText(frame, "Width (mm): {}".format(WIDTH_OF_OBJ), (5, 110), 152 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 153 | cv2.imshow("Out", frame) 154 | cv2.waitKey(2000) 155 | return defect_flag, defect 156 | 157 | 158 | def detect_color(frame, cnt): 159 | """ 160 | Identifies the color defect W.R.T the set default color of the object. 161 | Step 1: Increase the brightness of the image. 162 | Step 2: Convert the image to HSV Format. HSV color space gives more 163 | information about the colors of the image. 164 | It helps to identify distinct colors in the image. 165 | Step 3: Threshold the image based on the color using "inRange" function. 166 | Range of the color, which is considered as a defect for object, is 167 | passed as one of the argument to inRange function to create a mask. 168 | Step 4: Morphological opening and closing is done on the mask to remove 169 | noises and fill the gaps. 170 | Step 5: Find the contours on the mask image. Contours are filtered based on 171 | the area to get the contours of defective area. Contour of the 172 | defective area is then drawn on the original image to visualize. 173 | Step 6: Save the image in "color" folder if it has a color defect. 174 | 175 | :param frame: Input frame from the video 176 | :param cnt: Contours of the object 177 | :return: color_flag, defect 178 | """ 179 | defect = "Color" 180 | global OBJECT_COUNT 181 | color_flag = False 182 | # Increase the brightness of the image 183 | cv2.convertScaleAbs(frame, frame, 1, 20) 184 | # Convert the captured frame from BGR to HSV 185 | img_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 186 | # Threshold the image 187 | img_threshold = cv2.inRange(img_hsv, LOWER_COLOR_RANGE, UPPER_COLOR_RANGE) 188 | # Morphological opening (remove small objects from the foreground) 189 | img_threshold = cv2.erode(img_threshold, 190 | kernel=cv2.getStructuringElement( 191 | cv2.MORPH_ELLIPSE, (5, 5))) 192 | img_threshold = cv2.dilate(img_threshold, 193 | kernel=cv2.getStructuringElement( 194 | cv2.MORPH_ELLIPSE, (5, 5))) 195 | contours, hierarchy = cv2.findContours(img_threshold, cv2.RETR_LIST, 196 | cv2.CHAIN_APPROX_NONE) 197 | for i in range(len(contours)): 198 | area = cv2.contourArea(contours[i]) 199 | if 2000 < area < 10000: 200 | cv2.drawContours(frame, contours[i], -1, (0, 0, 255), 2) 201 | color_flag = True 202 | if color_flag: 203 | x, y, w, h = cv2.boundingRect(cnt) 204 | print("Color defect detected in object {}".format(COUNT_OBJECT)) 205 | cv2.imwrite("{}/color/Color_{}.png".format(base_dir, COUNT_OBJECT), 206 | frame[y: y + h, x: x + w]) 207 | cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 208 | 0.75, (255, 255, 255), 2) 209 | cv2.putText(frame, "Defect: {}".format(defect), (5, 140), 210 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 211 | cv2.putText(frame, "Length (mm): {}".format(HEIGHT_OF_OBJ), (5, 80), 212 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 213 | cv2.putText(frame, "Width (mm): {}".format(WIDTH_OF_OBJ), (5, 110), 214 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 215 | cv2.imshow("Out", frame) 216 | cv2.waitKey(2000) 217 | return color_flag, defect 218 | 219 | 220 | def detect_crack(frame, cnt): 221 | """ 222 | Identify the Crack defect on the object. 223 | Step 1: Convert the image to gray scale. 224 | Step 2: Blur the gray image to remove the noises. 225 | Step 3: Find the edges on the blurred image to get the contours of 226 | possible cracks. 227 | Step 4: Filter the contours to get the contour of the crack. 228 | Step 5: Draw the contour on the orignal image for visualization. 229 | Step 6: Save the image in "crack" folder if it has crack defect. 230 | 231 | :param frame: Input frame from the video 232 | :param cnt: Contours of the object 233 | :return: defect_flag, defect, cnt 234 | """ 235 | defect = "Crack" 236 | global OBJECT_COUNT 237 | defect_flag = False 238 | low_threshold = 130 239 | kernel_size = 3 240 | ratio = 3 241 | # Convert the captured frame from BGR to GRAY 242 | img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 243 | img = cv2.blur(img, (7, 7)) 244 | # Find the edges 245 | detected_edges = cv2.Canny(img, low_threshold, 246 | low_threshold * ratio, kernel_size) 247 | # Find the contours 248 | contours, hierarchy = cv2.findContours(detected_edges, cv2.RETR_TREE, 249 | cv2.CHAIN_APPROX_NONE) 250 | 251 | if len(contours) != 0: 252 | for i in range(len(contours)): 253 | area = cv2.contourArea(contours[i]) 254 | if area > 20 or area < 9: 255 | cv2.drawContours(frame, contours, i, (0, 255, 0), 2) 256 | defect_flag = True 257 | 258 | if defect_flag: 259 | x, y, w, h = cv2.boundingRect(cnt) 260 | print("Crack defect detected in object {}".format(COUNT_OBJECT)) 261 | cv2.imwrite("{}/crack/Crack_{}.png".format(base_dir, COUNT_OBJECT), 262 | frame[y: y + h, x: x + w]) 263 | cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 264 | 0.75, (255, 255, 255), 2) 265 | cv2.putText(frame, "Defect: {}".format(defect), (5, 140), 266 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 267 | cv2.putText(frame, "Length (mm): {}".format(HEIGHT_OF_OBJ), 268 | (5, 80), 269 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 270 | cv2.putText(frame, "Width (mm): {}".format(WIDTH_OF_OBJ), (5, 110), 271 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 272 | cv2.imshow("Out", frame) 273 | cv2.waitKey(2000) 274 | return defect_flag, defect 275 | 276 | 277 | def update_data(input_data): 278 | """ 279 | To update database with input_data. 280 | Step 1: Write given data points to the database. 281 | Step 2: Use SELECT statement to query the database. 282 | 283 | :param input_data: JSON body consisting of object number and defect values 284 | """ 285 | client.write_points([input_data]) 286 | client.query('SELECT * from "obj_flaw_detector"') 287 | 288 | 289 | def flaw_detection(): 290 | """ 291 | Measurement and defects such as color, crack and orientation of the object 292 | are found. 293 | 294 | :return: None 295 | """ 296 | global HEIGHT_OF_OBJ 297 | global WIDTH_OF_OBJ 298 | global COUNT_OBJECT 299 | global OBJ_DEFECT 300 | global FRAME_COUNT 301 | global OBJECT_COUNT 302 | 303 | while cap.isOpened(): 304 | # Read the frame from the stream 305 | ret, frame = cap.read() 306 | 307 | if not ret: 308 | break 309 | 310 | FRAME_COUNT += 1 311 | 312 | # Check every given frame number 313 | # (Number chosen based on the frequency of object on conveyor belt) 314 | if FRAME_COUNT % frame_number == 0: 315 | HEIGHT_OF_OBJ = 0 316 | WIDTH_OF_OBJ = 0 317 | OBJ_DEFECT = [] 318 | data_base = [] 319 | # Convert BGR image to HSV color space 320 | img_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 321 | 322 | # Thresholding of an Image in a color range 323 | img_threshold = cv2.inRange(img_hsv, (LOW_H, LOW_S, LOW_V), 324 | (HIGH_H, HIGH_S, HIGH_V)) 325 | 326 | # Morphological opening(remove small objects from the foreground) 327 | img_threshold = cv2.erode(img_threshold, 328 | cv2.getStructuringElement( 329 | cv2.MORPH_ELLIPSE, (5, 5))) 330 | img_threshold = cv2.dilate(img_threshold, 331 | cv2.getStructuringElement( 332 | cv2.MORPH_ELLIPSE, (5, 5))) 333 | 334 | # Morphological closing(fill small holes in the foreground) 335 | img_threshold = cv2.dilate(img_threshold, 336 | cv2.getStructuringElement( 337 | cv2.MORPH_ELLIPSE, (5, 5))) 338 | img_threshold = cv2.erode(img_threshold, 339 | cv2.getStructuringElement( 340 | cv2.MORPH_ELLIPSE, (5, 5))) 341 | 342 | # Find the contours on the image 343 | contours, hierarchy = cv2.findContours(img_threshold, 344 | cv2.RETR_LIST, 345 | cv2.CHAIN_APPROX_NONE) 346 | 347 | for cnt in contours: 348 | x, y, w, h = cv2.boundingRect(cnt) 349 | if OBJECT_AREA_MAX > w * h > OBJECT_AREA_MIN: 350 | box = cv2.minAreaRect(cnt) 351 | box = cv2.boxPoints(box) 352 | height, width = dimensions(np.array(box, dtype='int')) 353 | HEIGHT_OF_OBJ = round(height * one_pixel_length * 10, 2) 354 | WIDTH_OF_OBJ = round(width * one_pixel_length * 10, 2) 355 | COUNT_OBJECT += 1 356 | frame_orient = frame.copy() 357 | frame_clr = frame.copy() 358 | frame_crack = frame.copy() 359 | frame_nodefect = frame.copy() 360 | OBJECT_COUNT = "Object Number : {}".format(COUNT_OBJECT) 361 | 362 | # Check for the orientation of the object 363 | orientation_flag, orientation_defect = \ 364 | detect_orientation(frame_orient, cnt) 365 | if orientation_flag: 366 | value = 1 367 | data_base.append(value) 368 | OBJ_DEFECT.append(str(orientation_defect)) 369 | else: 370 | value = 0 371 | data_base.append(value) 372 | 373 | # Check for the color defect of the object 374 | color_flag, color_defect = detect_color(frame_clr, cnt) 375 | if color_flag: 376 | value = 1 377 | data_base.append(value) 378 | OBJ_DEFECT.append(str(color_defect)) 379 | else: 380 | value = 0 381 | data_base.append(value) 382 | 383 | # Check for the crack defect of the object 384 | crack_flag, crack_defect = detect_crack(frame_crack, cnt) 385 | if crack_flag: 386 | value = 1 387 | data_base.append(value) 388 | OBJ_DEFECT.append(str(crack_defect)) 389 | else: 390 | value = 0 391 | data_base.append(value) 392 | 393 | # Check if none of the defect is found 394 | if not OBJ_DEFECT: 395 | value = 1 396 | data_base.append(value) 397 | defect = "No Defect" 398 | OBJ_DEFECT.append(defect) 399 | print("No defect detected in object {}" 400 | .format(COUNT_OBJECT)) 401 | cv2.putText(frame_nodefect, 402 | "Length (mm): {}".format(HEIGHT_OF_OBJ), 403 | (5, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, 404 | (255, 255, 255), 2) 405 | cv2.putText(frame_nodefect, 406 | "Width (mm): {}".format(WIDTH_OF_OBJ), 407 | (5, 110), cv2.FONT_HERSHEY_SIMPLEX, 0.75, 408 | (255, 255, 255), 2) 409 | cv2.imwrite("{}/no_defect/Nodefect_{}.png".format( 410 | base_dir, COUNT_OBJECT), 411 | frame[y: y + h, 412 | x: x + w]) 413 | else: 414 | value = 0 415 | data_base.append(value) 416 | print("Length (mm) = {}, width (mm) = {}".format( 417 | HEIGHT_OF_OBJ, WIDTH_OF_OBJ)) 418 | if not OBJ_DEFECT: 419 | continue 420 | 421 | # Create json_body to store the defects 422 | else: 423 | json_body = { 424 | "measurement": "obj_flaw_detector", 425 | "tags": { 426 | "user": "User" 427 | }, 428 | 429 | "fields": { 430 | "Object Number": COUNT_OBJECT, 431 | "Orientation": data_base[0], 432 | "Color": data_base[1], 433 | "Crack": data_base[2], 434 | "No defect": data_base[3] 435 | } 436 | } 437 | # Send json_body to influxdb 438 | update_data(json_body) 439 | 440 | all_defects = " ".join(OBJ_DEFECT) 441 | cv2.putText(frame, "Press q to quit", (410, 50), 442 | cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2) 443 | cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 444 | 0.75, (255, 255, 255), 2) 445 | cv2.putText(frame, "Defect: {}".format(all_defects), (5, 140), 446 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 447 | cv2.putText(frame, "Length (mm): {}".format(HEIGHT_OF_OBJ), (5, 80), 448 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 449 | cv2.putText(frame, "Width (mm): {}".format(WIDTH_OF_OBJ), (5, 110), 450 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 451 | cv2.imshow("Out", frame) 452 | keypressed = cv2.waitKey(40) 453 | if keypressed == 113 or keypressed == 81: 454 | break 455 | cv2.destroyAllWindows() 456 | cap.release() 457 | 458 | 459 | if __name__ == '__main__': 460 | 461 | if "distance" in os.environ: 462 | distance = float(os.environ["distance"]) 463 | if "directory" in os.environ: 464 | base_dir = os.environ["directory"] 465 | if "fieldofview" in os.environ: 466 | fieldofview = float(os.environ["fieldofview"]) 467 | 468 | assert os.path.isfile(CONFIG_FILE), "{} file doesn't exist".format(CONFIG_FILE) 469 | config = json.loads(open(CONFIG_FILE).read()) 470 | 471 | for idx, item in enumerate(config['inputs']): 472 | if item['video'].isdigit(): 473 | input_stream = int(item['video']) 474 | cap = cv2.VideoCapture(input_stream) 475 | if not cap.isOpened(): 476 | print("\nCamera not plugged in... Exiting...\n") 477 | sys.exit(0) 478 | fps = cap.get(cv2.CAP_PROP_FPS) 479 | delay = int(1000 / fps) 480 | else: 481 | input_stream = item['video'] 482 | cap = cv2.VideoCapture(input_stream) 483 | if not cap.isOpened(): 484 | print("\nUnable to open video file... Exiting...\n") 485 | sys.exit(0) 486 | fps = cap.get(cv2.CAP_PROP_FPS) 487 | delay = int(1000 / fps) 488 | 489 | if distance != 0 and fieldofview != 0: 490 | width_of_video = cap.get(3) 491 | height_of_video = cap.get(4) 492 | # Convert degrees to radians 493 | radians = (fieldofview / 2) * 0.0174533 494 | # Calculate the diagonal length of image in millimeters using 495 | # field of view of camera and distance between object and camera. 496 | diagonal_length_of_image_plane = abs(2 * (distance / 10) * math.tan(radians)) 497 | # Calculate diagonal length of image in pixel 498 | diagonal_length_in_pixel = math.sqrt(math.pow(width_of_video, 2) 499 | + math.pow(height_of_video, 2)) 500 | # Convert one pixel value in millimeters 501 | one_pixel_length = (diagonal_length_of_image_plane / diagonal_length_in_pixel) 502 | # If distance between camera and object and field of view of camera 503 | # are not provided, then 96 pixels per inch is considered. 504 | # pixel_lengh = 2.54 cm (1 inch) / 96 pixels 505 | else: 506 | one_pixel_length = 0.0264583333 507 | 508 | dir_names = ["crack", "color", "orientation", "no_defect"] 509 | OBJ_DEFECT = [] 510 | frame_number = 40 511 | FRAME_COUNT = 0 512 | 513 | # Get ipaddress from the get_ip_address 514 | ipaddress, port, proxy, = get_ip_address() 515 | database = 'obj_flaw_database' 516 | client = InfluxDBClient(host=ipaddress, port=port, 517 | database=database, proxies=proxy) 518 | client.create_database(database) 519 | 520 | # create folders with the given dir_names to save defective objects 521 | for i in range(len(dir_names)): 522 | if not os.path.exists(os.path.join(base_dir, dir_names[i])): 523 | os.makedirs(os.path.join(base_dir, dir_names[i])) 524 | else: 525 | file_list = os.listdir(os.path.join(base_dir, dir_names[i])) 526 | for f in file_list: 527 | os.remove(os.path.join(base_dir, dir_names[i], f)) 528 | # Find dimensions and flaw detections such as color, crack and orientation 529 | # of the object. 530 | flaw_detection() 531 | -------------------------------------------------------------------------------- /application/object_flaw_detector.py: -------------------------------------------------------------------------------- 1 | """Object flaw detector.""" 2 | """ 3 | * Copyright (c) 2018 Intel Corporation. 4 | * 5 | * Permission is hereby granted, free of charge, to any person obtaining 6 | * a copy of this software and associated documentation files (the 7 | * "Software"), to deal in the Software without restriction, including 8 | * without limitation the rights to use, copy, modify, merge, publish, 9 | * distribute, sublicense, and/or sell copies of the Software, and to 10 | * permit persons to whom the Software is furnished to do so, subject to 11 | * the following conditions: 12 | * 13 | * The above copyright notice and this permission notice shall be 14 | * included in all copies or substantial portions of the Software. 15 | * 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | * 24 | """ 25 | 26 | 27 | import socket 28 | import math 29 | import sys 30 | import cv2 31 | import os 32 | import json 33 | from argparse import ArgumentParser 34 | from influxdb import InfluxDBClient 35 | from math import atan2 36 | 37 | import numpy as np 38 | 39 | # GLOBAL Variables 40 | CONFIG_FILE = '../resources/config.json' 41 | 42 | OBJECT_AREA_MIN = 9000 43 | OBJECT_AREA_MAX = 50000 44 | LOW_H = 0 45 | LOW_S = 0 46 | LOW_V = 47 47 | # Thresholding of an Image in a color range 48 | HIGH_H = 179 49 | HIGH_S = 255 50 | HIGH_V = 255 51 | # Lower and upper value of color Range of the object 52 | # for color thresholding to detect the object 53 | LOWER_COLOR_RANGE = (0, 0, 0) 54 | UPPER_COLOR_RANGE = (174, 73, 255) 55 | COUNT_OBJECT = 0 56 | HEIGHT_OF_OBJ = 0 57 | WIDTH_OF_OBJ = 0 58 | 59 | OBJECT_COUNT = "Object Number : {}".format(COUNT_OBJECT) 60 | 61 | 62 | def build_argparser(): 63 | """ 64 | Parse the command line arguments. 65 | 66 | :return: command line arguments 67 | """ 68 | parser = ArgumentParser() 69 | parser.add_argument('-dir', '--directory', 70 | required=False, 71 | help="Name of the directory to " 72 | "which defective images are saved") 73 | parser.add_argument("-d", "--distance", 74 | required=False, 75 | type=float, 76 | default=None, 77 | help="Distance between camera " 78 | "and object in millimeters") 79 | parser.add_argument("-f", "--fieldofview", 80 | required=False, 81 | type=float, 82 | default=None, 83 | help="Field of view of camera") 84 | 85 | return parser 86 | 87 | 88 | def dimensions(box): 89 | """ 90 | Return the length and width of the object. 91 | 92 | :param box: consists of top left, right and bottom left, right co-ordinates 93 | :return: Length and width of the object 94 | """ 95 | (tl, tr, br, bl) = box 96 | x = int(math.sqrt(math.pow((bl[0] - tl[0]), 2) + math.pow((bl[1] - tl[1]), 2))) 97 | y = int(math.sqrt(math.pow((tl[0] - tr[0]), 2) + math.pow((tl[1] - tr[1]), 2))) 98 | 99 | if x > y: 100 | return x, y 101 | else: 102 | return y, x 103 | 104 | 105 | def get_ip_address(): 106 | """ 107 | Return IP address of the server. 108 | 109 | :return: None 110 | """ 111 | hostname = socket.gethostname() 112 | ipaddress = socket.gethostbyname(hostname) 113 | port = 8086 114 | proxy = {"http": "http://{}:{}".format(ipaddress, port)} 115 | return ipaddress, port, proxy 116 | 117 | 118 | def get_orientation(contours): 119 | """ 120 | Gives the angle of the orientation of the object in radians. 121 | Step 1: Convert 3D matrix of contours to 2D. 122 | Step 2: Apply PCA algorithm to find angle of the data points. 123 | Step 3: If angle is greater than 0.5, return_flag is made to True 124 | else false. 125 | Step 4: Save the image in "Orientation" folder if it has a 126 | orientation defect. 127 | 128 | :param contours: contour of the object from the frame 129 | :return: angle of orientation of the object in radians 130 | """ 131 | size_points = len(contours) 132 | # data_pts stores contour values in 2D 133 | data_pts = np.empty((size_points, 2), dtype=np.float64) 134 | for i in range(data_pts.shape[0]): 135 | data_pts[i, 0] = contours[i, 0, 0] 136 | data_pts[i, 1] = contours[i, 0, 1] 137 | # Use PCA algorithm to find angle of the data points 138 | mean, eigenvector = cv2.PCACompute(data_pts, mean=None) 139 | angle = atan2(eigenvector[0, 1], eigenvector[0, 0]) 140 | return angle 141 | 142 | 143 | def detect_orientation(frame, contours): 144 | """ 145 | Identifies the Orientation of the object based on the detected angle. 146 | 147 | :param frame: Input frame from video 148 | :param contours: contour of the object from the frame 149 | :return: defect_flag, defect 150 | """ 151 | defect = "Orientation" 152 | global OBJECT_COUNT 153 | # Find the orientation of each contour 154 | angle = get_orientation(contours) 155 | # If angle is less than 0.5 then no orientation defect is present 156 | if angle < 0.5: 157 | defect_flag = False 158 | else: 159 | x, y, w, h = cv2.boundingRect(contours) 160 | print("Orientation defect detected in object {}".format(COUNT_OBJECT)) 161 | defect_flag = True 162 | cv2.imwrite("{}/orientation/Orientation_{}.png" 163 | .format(base_dir, COUNT_OBJECT), 164 | frame[y: y + h , x : x + w]) 165 | cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 166 | 0.75, (255, 255, 255), 2) 167 | cv2.putText(frame, "Defect: {}".format(defect), (5, 140), 168 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 169 | cv2.putText(frame, "Length (mm): {}".format(HEIGHT_OF_OBJ), (5, 80), 170 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 171 | cv2.putText(frame, "Width (mm): {}".format(WIDTH_OF_OBJ), (5, 110), 172 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 173 | cv2.imshow("Out", frame) 174 | cv2.waitKey(2000) 175 | return defect_flag, defect 176 | 177 | 178 | def detect_color(frame, cnt): 179 | """ 180 | Identifies the color defect W.R.T the set default color of the object. 181 | Step 1: Increase the brightness of the image. 182 | Step 2: Convert the image to HSV Format. HSV color space gives more 183 | information about the colors of the image. 184 | It helps to identify distinct colors in the image. 185 | Step 3: Threshold the image based on the color using "inRange" function. 186 | Range of the color, which is considered as a defect for object, is 187 | passed as one of the argument to inRange function to create a mask. 188 | Step 4: Morphological opening and closing is done on the mask to remove 189 | noises and fill the gaps. 190 | Step 5: Find the contours on the mask image. Contours are filtered based on 191 | the area to get the contours of defective area. Contour of the 192 | defective area is then drawn on the original image to visualize. 193 | Step 6: Save the image in "color" folder if it has a color defect. 194 | 195 | :param frame: Input frame from the video 196 | :param cnt: Contours of the object 197 | :return: color_flag, defect 198 | """ 199 | defect = "Color" 200 | global OBJECT_COUNT 201 | color_flag = False 202 | # Increase the brightness of the image 203 | cv2.convertScaleAbs(frame, frame, 1, 20) 204 | # Convert the captured frame from BGR to HSV 205 | img_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 206 | # Threshold the image 207 | img_threshold = cv2.inRange(img_hsv, LOWER_COLOR_RANGE, UPPER_COLOR_RANGE) 208 | # Morphological opening (remove small objects from the foreground) 209 | img_threshold = cv2.erode(img_threshold, 210 | kernel=cv2.getStructuringElement( 211 | cv2.MORPH_ELLIPSE, (5, 5))) 212 | img_threshold = cv2.dilate(img_threshold, 213 | kernel=cv2.getStructuringElement( 214 | cv2.MORPH_ELLIPSE, (5, 5))) 215 | contours, hierarchy = cv2.findContours(img_threshold, cv2.RETR_LIST, 216 | cv2.CHAIN_APPROX_NONE) 217 | for i in range(len(contours)): 218 | area = cv2.contourArea(contours[i]) 219 | if 2000 < area < 10000: 220 | cv2.drawContours(frame, contours[i], -1, (0, 0, 255), 2) 221 | color_flag = True 222 | if color_flag: 223 | x, y, w, h = cv2.boundingRect(cnt) 224 | print("Color defect detected in object {}".format(COUNT_OBJECT)) 225 | cv2.imwrite("{}/color/Color_{}.png".format(base_dir, COUNT_OBJECT), 226 | frame[y : y + h, x : x + w]) 227 | cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 228 | 0.75, (255, 255, 255), 2) 229 | cv2.putText(frame, "Defect: {}".format(defect), (5, 140), 230 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 231 | cv2.putText(frame, "Length (mm): {}".format(HEIGHT_OF_OBJ), (5, 80), 232 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 233 | cv2.putText(frame, "Width (mm): {}".format(WIDTH_OF_OBJ), (5, 110), 234 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 235 | cv2.imshow("Out", frame) 236 | cv2.waitKey(2000) 237 | return color_flag, defect 238 | 239 | 240 | def detect_crack(frame, cnt): 241 | """ 242 | Identify the Crack defect on the object. 243 | Step 1: Convert the image to gray scale. 244 | Step 2: Blur the gray image to remove the noises. 245 | Step 3: Find the edges on the blurred image to get the contours of 246 | possible cracks. 247 | Step 4: Filter the contours to get the contour of the crack. 248 | Step 5: Draw the contour on the orignal image for visualization. 249 | Step 6: Save the image in "crack" folder if it has crack defect. 250 | 251 | :param frame: Input frame from the video 252 | :param cnt: Contours of the object 253 | :return: defect_flag, defect, cnt 254 | """ 255 | defect = "Crack" 256 | global OBJECT_COUNT 257 | defect_flag = False 258 | low_threshold = 130 259 | kernel_size = 3 260 | ratio = 3 261 | # Convert the captured frame from BGR to GRAY 262 | img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 263 | img = cv2.blur(img, (7, 7)) 264 | # Find the edges 265 | detected_edges = cv2.Canny(img, low_threshold, 266 | low_threshold * ratio, kernel_size) 267 | # Find the contours 268 | contours, hierarchy = cv2.findContours(detected_edges, cv2.RETR_TREE, 269 | cv2.CHAIN_APPROX_NONE) 270 | 271 | if len(contours) != 0: 272 | for i in range(len(contours)): 273 | area = cv2.contourArea(contours[i]) 274 | if area > 20 or area < 9: 275 | cv2.drawContours(frame, contours, i, (0, 255, 0), 2) 276 | defect_flag = True 277 | 278 | if defect_flag: 279 | x, y, w, h = cv2.boundingRect(cnt) 280 | print("Crack defect detected in object {}".format(COUNT_OBJECT)) 281 | cv2.imwrite("{}/crack/Crack_{}.png".format(base_dir, COUNT_OBJECT), 282 | frame[y : y + h , x : x + w ]) 283 | cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 284 | 0.75, (255, 255, 255), 2) 285 | cv2.putText(frame, "Defect: {}".format(defect), (5, 140), 286 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 287 | cv2.putText(frame, "Length (mm): {}".format(HEIGHT_OF_OBJ), 288 | (5, 80), 289 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 290 | cv2.putText(frame, "Width (mm): {}".format(WIDTH_OF_OBJ), (5, 110), 291 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 292 | cv2.imshow("Out", frame) 293 | cv2.waitKey(2000) 294 | return defect_flag, defect 295 | 296 | 297 | def update_data(input_data): 298 | """ 299 | To update database with input_data. 300 | Step 1: Write given data points to the database. 301 | Step 2: Use SELECT statement to query the database. 302 | 303 | :param input_data: JSON body consisting of object number and defect values 304 | """ 305 | client.write_points([input_data]) 306 | client.query('SELECT * from "obj_flaw_detector"') 307 | 308 | 309 | def flaw_detection(): 310 | """ 311 | Measurement and defects such as color, crack and orientation of the object 312 | are found. 313 | 314 | :return: None 315 | """ 316 | global HEIGHT_OF_OBJ 317 | global WIDTH_OF_OBJ 318 | global COUNT_OBJECT 319 | global OBJ_DEFECT 320 | global FRAME_COUNT 321 | global OBJECT_COUNT 322 | 323 | while cap.isOpened(): 324 | # Read the frame from the stream 325 | ret, frame = cap.read() 326 | 327 | if not ret: 328 | break 329 | 330 | FRAME_COUNT += 1 331 | 332 | # Check every given frame number 333 | # (Number chosen based on the frequency of object on conveyor belt) 334 | if FRAME_COUNT % frame_number == 0: 335 | HEIGHT_OF_OBJ = 0 336 | WIDTH_OF_OBJ = 0 337 | OBJ_DEFECT = [] 338 | data_base = [] 339 | # Convert BGR image to HSV color space 340 | img_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 341 | 342 | # Thresholding of an Image in a color range 343 | img_threshold = cv2.inRange(img_hsv, (LOW_H, LOW_S, LOW_V), 344 | (HIGH_H, HIGH_S, HIGH_V)) 345 | 346 | # Morphological opening(remove small objects from the foreground) 347 | img_threshold = cv2.erode(img_threshold, 348 | cv2.getStructuringElement( 349 | cv2.MORPH_ELLIPSE, (5, 5))) 350 | img_threshold = cv2.dilate(img_threshold, 351 | cv2.getStructuringElement( 352 | cv2.MORPH_ELLIPSE, (5, 5))) 353 | 354 | # Morphological closing(fill small holes in the foreground) 355 | img_threshold = cv2.dilate(img_threshold, 356 | cv2.getStructuringElement( 357 | cv2.MORPH_ELLIPSE, (5, 5))) 358 | img_threshold = cv2.erode(img_threshold, 359 | cv2.getStructuringElement( 360 | cv2.MORPH_ELLIPSE, (5, 5))) 361 | 362 | # Find the contours on the image 363 | contours, hierarchy = cv2.findContours(img_threshold, 364 | cv2.RETR_LIST, 365 | cv2.CHAIN_APPROX_NONE) 366 | 367 | for cnt in contours: 368 | x, y, w, h = cv2.boundingRect(cnt) 369 | if OBJECT_AREA_MAX > w * h > OBJECT_AREA_MIN: 370 | box = cv2.minAreaRect(cnt) 371 | box = cv2.boxPoints(box) 372 | height, width = dimensions(np.array(box, dtype='int')) 373 | HEIGHT_OF_OBJ = round(height * one_pixel_length * 10, 2) 374 | WIDTH_OF_OBJ = round(width * one_pixel_length * 10, 2) 375 | COUNT_OBJECT += 1 376 | frame_orient = frame.copy() 377 | frame_clr = frame.copy() 378 | frame_crack = frame.copy() 379 | frame_nodefect = frame.copy() 380 | OBJECT_COUNT = "Object Number : {}".format(COUNT_OBJECT) 381 | 382 | # Check for the orientation of the object 383 | orientation_flag, orientation_defect = \ 384 | detect_orientation(frame_orient, cnt) 385 | if orientation_flag: 386 | value = 1 387 | data_base.append(value) 388 | OBJ_DEFECT.append(str(orientation_defect)) 389 | else: 390 | value = 0 391 | data_base.append(value) 392 | 393 | # Check for the color defect of the object 394 | color_flag, color_defect = detect_color(frame_clr, cnt) 395 | if color_flag: 396 | value = 1 397 | data_base.append(value) 398 | OBJ_DEFECT.append(str(color_defect)) 399 | else: 400 | value = 0 401 | data_base.append(value) 402 | 403 | # Check for the crack defect of the object 404 | crack_flag, crack_defect = detect_crack(frame_crack, cnt) 405 | if crack_flag: 406 | value = 1 407 | data_base.append(value) 408 | OBJ_DEFECT.append(str(crack_defect)) 409 | else: 410 | value = 0 411 | data_base.append(value) 412 | 413 | # Check if none of the defect is found 414 | if not OBJ_DEFECT: 415 | value = 1 416 | data_base.append(value) 417 | defect = "No Defect" 418 | OBJ_DEFECT.append(defect) 419 | print("No defect detected in object {}" 420 | .format(COUNT_OBJECT)) 421 | cv2.putText(frame_nodefect, "Length (mm): {}".format(HEIGHT_OF_OBJ), 422 | (5, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, 423 | (255, 255, 255), 2) 424 | cv2.putText(frame_nodefect, "Width (mm): {}".format(WIDTH_OF_OBJ), 425 | (5, 110), cv2.FONT_HERSHEY_SIMPLEX, 0.75, 426 | (255, 255, 255), 2) 427 | cv2.imwrite("{}/no_defect/Nodefect_{}.png".format( 428 | base_dir, COUNT_OBJECT), 429 | frame[y : y + h, 430 | x : x + w]) 431 | else: 432 | value = 0 433 | data_base.append(value) 434 | print("Length (mm) = {}, width (mm) = {}".format( 435 | HEIGHT_OF_OBJ, WIDTH_OF_OBJ)) 436 | if not OBJ_DEFECT: 437 | continue 438 | 439 | # Create json_body to store the defects 440 | else: 441 | json_body = { 442 | "measurement": "obj_flaw_detector", 443 | "tags": { 444 | "user": "User" 445 | }, 446 | 447 | "fields": { 448 | "Object Number": COUNT_OBJECT, 449 | "Orientation": data_base[0], 450 | "Color": data_base[1], 451 | "Crack": data_base[2], 452 | "No defect": data_base[3] 453 | } 454 | } 455 | # Send json_body to influxdb 456 | update_data(json_body) 457 | 458 | all_defects = " ".join(OBJ_DEFECT) 459 | cv2.putText(frame, "Press q to quit", (410, 50), 460 | cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2) 461 | cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 462 | 0.75, (255, 255, 255), 2) 463 | cv2.putText(frame, "Defect: {}".format(all_defects), (5, 140), 464 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 465 | cv2.putText(frame, "Length (mm): {}".format(HEIGHT_OF_OBJ), (5, 80), 466 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 467 | cv2.putText(frame, "Width (mm): {}".format(WIDTH_OF_OBJ), (5, 110), 468 | cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2) 469 | cv2.imshow("Out", frame) 470 | keypressed = cv2.waitKey(40) 471 | if keypressed == 113 or keypressed == 81: 472 | break 473 | cv2.destroyAllWindows() 474 | cap.release() 475 | 476 | 477 | if __name__ == '__main__': 478 | 479 | args = build_argparser().parse_args() 480 | 481 | base_dir = args.directory 482 | 483 | if not base_dir: 484 | base_dir = os.getcwd() 485 | 486 | # Checks for the video file 487 | assert os.path.isfile(CONFIG_FILE), "{} file doesn't exist".format(CONFIG_FILE) 488 | config = json.loads(open(CONFIG_FILE).read()) 489 | 490 | for idx, item in enumerate(config['inputs']): 491 | if item['video'].isdigit(): 492 | input_stream = int(item['video']) 493 | cap = cv2.VideoCapture(input_stream) 494 | if not cap.isOpened(): 495 | print("\nCamera not plugged in... Exiting...\n") 496 | sys.exit(0) 497 | fps = cap.get(cv2.CAP_PROP_FPS) 498 | delay = (int)(1000 / fps) 499 | else: 500 | input_stream = item['video'] 501 | cap = cv2.VideoCapture(input_stream) 502 | if not cap.isOpened(): 503 | print("\nUnable to open video file... Exiting...\n") 504 | sys.exit(0) 505 | fps = cap.get(cv2.CAP_PROP_FPS) 506 | delay = (int)(1000 / fps) 507 | 508 | if args.distance and args.fieldofview: 509 | width_of_video = cap.get(3) 510 | height_of_video = cap.get(4) 511 | # Convert degrees to radians 512 | radians = (args.fieldofview / 2) * 0.0174533 513 | # Calculate the diagonal length of image in millimeters using 514 | # field of view of camera and distance between object and camera. 515 | diagonal_length_of_image_plane = abs( 516 | 2 * (args.distance / 10) * math.tan(radians)) 517 | # Calculate diagonal length of image in pixel 518 | diagonal_length_in_pixel = math.sqrt( 519 | math.pow(width_of_video, 2) + math.pow(height_of_video, 2)) 520 | # Convert one pixel value in millimeters 521 | one_pixel_length = (diagonal_length_of_image_plane / 522 | diagonal_length_in_pixel) 523 | # If distance between camera and object and field of view of camera 524 | # are not provided, then 96 pixels per inch is considered. 525 | # pixel_lengh = 2.54 cm (1 inch) / 96 pixels 526 | else: 527 | one_pixel_length = 0.0264583333 528 | 529 | dir_names = ["crack", "color", "orientation", "no_defect"] 530 | OBJ_DEFECT = [] 531 | frame_number = 40 532 | FRAME_COUNT = 0 533 | 534 | # Get ipaddress from the get_ip_address 535 | ipaddress, port, proxy, = get_ip_address() 536 | database = 'obj_flaw_database' 537 | client = InfluxDBClient(host=ipaddress, port=port, 538 | database=database, proxies=proxy) 539 | client.create_database(database) 540 | 541 | # create folders with the given dir_names to save defective objects 542 | for i in range(len(dir_names)): 543 | if not os.path.exists(os.path.join(base_dir, dir_names[i])): 544 | os.makedirs(os.path.join(base_dir, dir_names[i])) 545 | else: 546 | file_list = os.listdir(os.path.join(base_dir, dir_names[i])) 547 | for f in file_list: 548 | os.remove(os.path.join(base_dir, dir_names[i], f)) 549 | # Find dimensions and flaw detections such as color, crack and orientation 550 | # of the object. 551 | flaw_detection() 552 | 553 | -------------------------------------------------------------------------------- /Jupyter/object_flaw_detector.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "\"\"\"Object flaw detector.\"\"\"\n", 19 | "\"\"\"\n", 20 | "* Copyright (c) 2018 Intel Corporation.\n", 21 | "*\n", 22 | "* Permission is hereby granted, free of charge, to any person obtaining\n", 23 | "* a copy of this software and associated documentation files (the\n", 24 | "* \"Software\"), to deal in the Software without restriction, including\n", 25 | "* without limitation the rights to use, copy, modify, merge, publish,\n", 26 | "* distribute, sublicense, and/or sell copies of the Software, and to\n", 27 | "* permit persons to whom the Software is furnished to do so, subject to\n", 28 | "* the following conditions:\n", 29 | "*\n", 30 | "* The above copyright notice and this permission notice shall be\n", 31 | "* included in all copies or substantial portions of the Software.\n", 32 | "*\n", 33 | "* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n", 34 | "* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n", 35 | "* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n", 36 | "* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n", 37 | "* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n", 38 | "* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n", 39 | "* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n", 40 | "*\n", 41 | "\"\"\"\n", 42 | "\n", 43 | "import socket\n", 44 | "import math\n", 45 | "import sys\n", 46 | "import cv2\n", 47 | "import os\n", 48 | "import json\n", 49 | "from argparse import ArgumentParser\n", 50 | "from influxdb import InfluxDBClient\n", 51 | "from math import atan2\n", 52 | "\n", 53 | "import numpy as np\n", 54 | "\n", 55 | "# GLOBAL Variables\n", 56 | "CONFIG_FILE = '../resources/config.json'\n", 57 | "\n", 58 | "OBJECT_AREA_MIN = 9000\n", 59 | "OBJECT_AREA_MAX = 50000\n", 60 | "LOW_H = 0\n", 61 | "LOW_S = 0\n", 62 | "LOW_V = 47\n", 63 | "# Thresholding of an Image in a color range\n", 64 | "HIGH_H = 179\n", 65 | "HIGH_S = 255\n", 66 | "HIGH_V = 255\n", 67 | "# Lower and upper value of color Range of the object\n", 68 | "# for color thresholding to detect the object\n", 69 | "LOWER_COLOR_RANGE = (0, 0, 0)\n", 70 | "UPPER_COLOR_RANGE = (174, 73, 255)\n", 71 | "COUNT_OBJECT = 0\n", 72 | "HEIGHT_OF_OBJ = 0\n", 73 | "WIDTH_OF_OBJ = 0\n", 74 | "\n", 75 | "OBJECT_COUNT = \"Object Number : {}\".format(COUNT_OBJECT)\n", 76 | "\n", 77 | "input = ''\n", 78 | "base_dir = os.getcwd()\n", 79 | "distance = 0\n", 80 | "fieldofview = 0\n", 81 | "\n", 82 | "\n", 83 | "def dimensions(box):\n", 84 | " \"\"\"\n", 85 | " Return the length and width of the object.\n", 86 | "\n", 87 | " :param box: consists of top left, right and bottom left, right co-ordinates\n", 88 | " :return: Length and width of the object\n", 89 | " \"\"\"\n", 90 | " (tl, tr, br, bl) = box\n", 91 | " x = int(\n", 92 | " math.sqrt(math.pow((bl[0] - tl[0]), 2) + math.pow((bl[1] - tl[1]), 2)))\n", 93 | " y = int(\n", 94 | " math.sqrt(math.pow((tl[0] - tr[0]), 2) + math.pow((tl[1] - tr[1]), 2)))\n", 95 | "\n", 96 | " if x > y:\n", 97 | " return x, y\n", 98 | " else:\n", 99 | " return y, x\n", 100 | "\n", 101 | "\n", 102 | "def get_ip_address():\n", 103 | " \"\"\"\n", 104 | " Return IP address of the server.\n", 105 | "\n", 106 | " :return: None\n", 107 | " \"\"\"\n", 108 | " hostname = socket.gethostname()\n", 109 | " ipaddress = socket.gethostbyname(hostname)\n", 110 | " port = 8086\n", 111 | " proxy = {\"http\": \"http://{}:{}\".format(ipaddress, port)}\n", 112 | " return ipaddress, port, proxy\n", 113 | "\n", 114 | "\n", 115 | "def get_orientation(contours):\n", 116 | " \"\"\"\n", 117 | " Gives the angle of the orientation of the object in radians.\n", 118 | " Step 1: Convert 3D matrix of contours to 2D.\n", 119 | " Step 2: Apply PCA algorithm to find angle of the data points.\n", 120 | " Step 3: If angle is greater than 0.5, return_flag is made to True\n", 121 | " else false.\n", 122 | " Step 4: Save the image in \"Orientation\" folder if it has a\n", 123 | " orientation defect.\n", 124 | "\n", 125 | " :param contours: contour of the object from the frame\n", 126 | " :return: angle of orientation of the object in radians\n", 127 | " \"\"\"\n", 128 | " size_points = len(contours)\n", 129 | " # data_pts stores contour values in 2D\n", 130 | " data_pts = np.empty((size_points, 2), dtype=np.float64)\n", 131 | " for i in range(data_pts.shape[0]):\n", 132 | " data_pts[i, 0] = contours[i, 0, 0]\n", 133 | " data_pts[i, 1] = contours[i, 0, 1]\n", 134 | " # Use PCA algorithm to find angle of the data points\n", 135 | " mean, eigenvector = cv2.PCACompute(data_pts, mean=None)\n", 136 | " angle = atan2(eigenvector[0, 1], eigenvector[0, 0])\n", 137 | " return angle\n", 138 | "\n", 139 | "\n", 140 | "def detect_orientation(frame, contours):\n", 141 | " \"\"\"\n", 142 | " Identifies the Orientation of the object based on the detected angle.\n", 143 | "\n", 144 | " :param frame: Input frame from video\n", 145 | " :param contours: contour of the object from the frame\n", 146 | " :return: defect_flag, defect\n", 147 | " \"\"\"\n", 148 | " defect = \"Orientation\"\n", 149 | " global OBJECT_COUNT\n", 150 | " # Find the orientation of each contour\n", 151 | " angle = get_orientation(contours)\n", 152 | " # If angle is less than 0.5 then no orientation defect is present\n", 153 | " if angle < 0.5:\n", 154 | " defect_flag = False\n", 155 | " else:\n", 156 | " x, y, w, h = cv2.boundingRect(contours)\n", 157 | " print(\"Orientation defect detected in object {}\".format(COUNT_OBJECT))\n", 158 | " defect_flag = True\n", 159 | " cv2.imwrite(\"{}/orientation/Orientation_{}.png\"\n", 160 | " .format(base_dir, COUNT_OBJECT),\n", 161 | " frame[y: y + h, x: x + w])\n", 162 | " cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX,\n", 163 | " 0.75, (255, 255, 255), 2)\n", 164 | " cv2.putText(frame, \"Defect: {}\".format(defect), (5, 140),\n", 165 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 166 | " cv2.putText(frame, \"Length (mm): {}\".format(HEIGHT_OF_OBJ), (5, 80),\n", 167 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 168 | " cv2.putText(frame, \"Width (mm): {}\".format(WIDTH_OF_OBJ), (5, 110),\n", 169 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 170 | " cv2.imshow(\"Out\", frame)\n", 171 | " cv2.waitKey(2000)\n", 172 | " return defect_flag, defect\n", 173 | "\n", 174 | "\n", 175 | "def detect_color(frame, cnt):\n", 176 | " \"\"\"\n", 177 | " Identifies the color defect W.R.T the set default color of the object.\n", 178 | " Step 1: Increase the brightness of the image.\n", 179 | " Step 2: Convert the image to HSV Format. HSV color space gives more\n", 180 | " information about the colors of the image.\n", 181 | " It helps to identify distinct colors in the image.\n", 182 | " Step 3: Threshold the image based on the color using \"inRange\" function.\n", 183 | " Range of the color, which is considered as a defect for object, is\n", 184 | " passed as one of the argument to inRange function to create a mask.\n", 185 | " Step 4: Morphological opening and closing is done on the mask to remove\n", 186 | " noises and fill the gaps.\n", 187 | " Step 5: Find the contours on the mask image. Contours are filtered based on\n", 188 | " the area to get the contours of defective area. Contour of the\n", 189 | " defective area is then drawn on the original image to visualize.\n", 190 | " Step 6: Save the image in \"color\" folder if it has a color defect.\n", 191 | "\n", 192 | " :param frame: Input frame from the video\n", 193 | " :param cnt: Contours of the object\n", 194 | " :return: color_flag, defect\n", 195 | " \"\"\"\n", 196 | " defect = \"Color\"\n", 197 | " global OBJECT_COUNT\n", 198 | " color_flag = False\n", 199 | " # Increase the brightness of the image\n", 200 | " cv2.convertScaleAbs(frame, frame, 1, 20)\n", 201 | " # Convert the captured frame from BGR to HSV\n", 202 | " img_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n", 203 | " # Threshold the image\n", 204 | " img_threshold = cv2.inRange(img_hsv, LOWER_COLOR_RANGE, UPPER_COLOR_RANGE)\n", 205 | " # Morphological opening (remove small objects from the foreground)\n", 206 | " img_threshold = cv2.erode(img_threshold,\n", 207 | " kernel=cv2.getStructuringElement(\n", 208 | " cv2.MORPH_ELLIPSE, (5, 5)))\n", 209 | " img_threshold = cv2.dilate(img_threshold,\n", 210 | " kernel=cv2.getStructuringElement(\n", 211 | " cv2.MORPH_ELLIPSE, (5, 5)))\n", 212 | " contours, hierarchy = cv2.findContours(img_threshold, cv2.RETR_LIST,\n", 213 | " cv2.CHAIN_APPROX_NONE)\n", 214 | " for i in range(len(contours)):\n", 215 | " area = cv2.contourArea(contours[i])\n", 216 | " if 2000 < area < 10000:\n", 217 | " cv2.drawContours(frame, contours[i], -1, (0, 0, 255), 2)\n", 218 | " color_flag = True\n", 219 | " if color_flag:\n", 220 | " x, y, w, h = cv2.boundingRect(cnt)\n", 221 | " print(\"Color defect detected in object {}\".format(COUNT_OBJECT))\n", 222 | " cv2.imwrite(\"{}/color/Color_{}.png\".format(base_dir, COUNT_OBJECT),\n", 223 | " frame[y: y + h, x: x + w])\n", 224 | " cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX,\n", 225 | " 0.75, (255, 255, 255), 2)\n", 226 | " cv2.putText(frame, \"Defect: {}\".format(defect), (5, 140),\n", 227 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 228 | " cv2.putText(frame, \"Length (mm): {}\".format(HEIGHT_OF_OBJ), (5, 80),\n", 229 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 230 | " cv2.putText(frame, \"Width (mm): {}\".format(WIDTH_OF_OBJ), (5, 110),\n", 231 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 232 | " cv2.imshow(\"Out\", frame)\n", 233 | " cv2.waitKey(2000)\n", 234 | " return color_flag, defect\n", 235 | "\n", 236 | "\n", 237 | "def detect_crack(frame, cnt):\n", 238 | " \"\"\"\n", 239 | " Identify the Crack defect on the object.\n", 240 | " Step 1: Convert the image to gray scale.\n", 241 | " Step 2: Blur the gray image to remove the noises.\n", 242 | " Step 3: Find the edges on the blurred image to get the contours of\n", 243 | " possible cracks.\n", 244 | " Step 4: Filter the contours to get the contour of the crack.\n", 245 | " Step 5: Draw the contour on the orignal image for visualization.\n", 246 | " Step 6: Save the image in \"crack\" folder if it has crack defect.\n", 247 | "\n", 248 | " :param frame: Input frame from the video\n", 249 | " :param cnt: Contours of the object\n", 250 | " :return: defect_flag, defect, cnt\n", 251 | " \"\"\"\n", 252 | " defect = \"Crack\"\n", 253 | " global OBJECT_COUNT\n", 254 | " defect_flag = False\n", 255 | " low_threshold = 130\n", 256 | " kernel_size = 3\n", 257 | " ratio = 3\n", 258 | " # Convert the captured frame from BGR to GRAY\n", 259 | " img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n", 260 | " img = cv2.blur(img, (7, 7))\n", 261 | " # Find the edges\n", 262 | " detected_edges = cv2.Canny(img, low_threshold,\n", 263 | " low_threshold * ratio, kernel_size)\n", 264 | " # Find the contours\n", 265 | " contours, hierarchy = cv2.findContours(detected_edges, cv2.RETR_TREE,\n", 266 | " cv2.CHAIN_APPROX_NONE)\n", 267 | "\n", 268 | " if len(contours) != 0:\n", 269 | " for i in range(len(contours)):\n", 270 | " area = cv2.contourArea(contours[i])\n", 271 | " if area > 20 or area < 9:\n", 272 | " cv2.drawContours(frame, contours, i, (0, 255, 0), 2)\n", 273 | " defect_flag = True\n", 274 | "\n", 275 | " if defect_flag:\n", 276 | " x, y, w, h = cv2.boundingRect(cnt)\n", 277 | " print(\"Crack defect detected in object {}\".format(COUNT_OBJECT))\n", 278 | " cv2.imwrite(\"{}/crack/Crack_{}.png\".format(base_dir, COUNT_OBJECT),\n", 279 | " frame[y: y + h, x: x + w])\n", 280 | " cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX,\n", 281 | " 0.75, (255, 255, 255), 2)\n", 282 | " cv2.putText(frame, \"Defect: {}\".format(defect), (5, 140),\n", 283 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 284 | " cv2.putText(frame, \"Length (mm): {}\".format(HEIGHT_OF_OBJ),\n", 285 | " (5, 80),\n", 286 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 287 | " cv2.putText(frame, \"Width (mm): {}\".format(WIDTH_OF_OBJ), (5, 110),\n", 288 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 289 | " cv2.imshow(\"Out\", frame)\n", 290 | " cv2.waitKey(2000)\n", 291 | " return defect_flag, defect\n", 292 | "\n", 293 | "\n", 294 | "def update_data(input_data):\n", 295 | " \"\"\"\n", 296 | " To update database with input_data.\n", 297 | " Step 1: Write given data points to the database.\n", 298 | " Step 2: Use SELECT statement to query the database.\n", 299 | "\n", 300 | " :param input_data: JSON body consisting of object number and defect values\n", 301 | " \"\"\"\n", 302 | " client.write_points([input_data])\n", 303 | " client.query('SELECT * from \"obj_flaw_detector\"')\n", 304 | "\n", 305 | "\n", 306 | "def flaw_detection():\n", 307 | " \"\"\"\n", 308 | " Measurement and defects such as color, crack and orientation of the object\n", 309 | " are found.\n", 310 | "\n", 311 | " :return: None\n", 312 | " \"\"\"\n", 313 | " global HEIGHT_OF_OBJ\n", 314 | " global WIDTH_OF_OBJ\n", 315 | " global COUNT_OBJECT\n", 316 | " global OBJ_DEFECT\n", 317 | " global FRAME_COUNT\n", 318 | " global OBJECT_COUNT\n", 319 | "\n", 320 | " while cap.isOpened():\n", 321 | " # Read the frame from the stream\n", 322 | " ret, frame = cap.read()\n", 323 | "\n", 324 | " if not ret:\n", 325 | " break\n", 326 | "\n", 327 | " FRAME_COUNT += 1\n", 328 | "\n", 329 | " # Check every given frame number\n", 330 | " # (Number chosen based on the frequency of object on conveyor belt)\n", 331 | " if FRAME_COUNT % frame_number == 0:\n", 332 | " HEIGHT_OF_OBJ = 0\n", 333 | " WIDTH_OF_OBJ = 0\n", 334 | " OBJ_DEFECT = []\n", 335 | " data_base = []\n", 336 | " # Convert BGR image to HSV color space\n", 337 | " img_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n", 338 | "\n", 339 | " # Thresholding of an Image in a color range\n", 340 | " img_threshold = cv2.inRange(img_hsv, (LOW_H, LOW_S, LOW_V),\n", 341 | " (HIGH_H, HIGH_S, HIGH_V))\n", 342 | "\n", 343 | " # Morphological opening(remove small objects from the foreground)\n", 344 | " img_threshold = cv2.erode(img_threshold,\n", 345 | " cv2.getStructuringElement(\n", 346 | " cv2.MORPH_ELLIPSE, (5, 5)))\n", 347 | " img_threshold = cv2.dilate(img_threshold,\n", 348 | " cv2.getStructuringElement(\n", 349 | " cv2.MORPH_ELLIPSE, (5, 5)))\n", 350 | "\n", 351 | " # Morphological closing(fill small holes in the foreground)\n", 352 | " img_threshold = cv2.dilate(img_threshold,\n", 353 | " cv2.getStructuringElement(\n", 354 | " cv2.MORPH_ELLIPSE, (5, 5)))\n", 355 | " img_threshold = cv2.erode(img_threshold,\n", 356 | " cv2.getStructuringElement(\n", 357 | " cv2.MORPH_ELLIPSE, (5, 5)))\n", 358 | "\n", 359 | " # Find the contours on the image\n", 360 | " contours, hierarchy = cv2.findContours(img_threshold,\n", 361 | " cv2.RETR_LIST,\n", 362 | " cv2.CHAIN_APPROX_NONE)\n", 363 | "\n", 364 | " for cnt in contours:\n", 365 | " x, y, w, h = cv2.boundingRect(cnt)\n", 366 | " if OBJECT_AREA_MAX > w * h > OBJECT_AREA_MIN:\n", 367 | " box = cv2.minAreaRect(cnt)\n", 368 | " box = cv2.boxPoints(box)\n", 369 | " height, width = dimensions(np.array(box, dtype='int'))\n", 370 | " HEIGHT_OF_OBJ = round(height * one_pixel_length * 10, 2)\n", 371 | " WIDTH_OF_OBJ = round(width * one_pixel_length * 10, 2)\n", 372 | " COUNT_OBJECT += 1\n", 373 | " frame_orient = frame.copy()\n", 374 | " frame_clr = frame.copy()\n", 375 | " frame_crack = frame.copy()\n", 376 | " frame_nodefect = frame.copy()\n", 377 | " OBJECT_COUNT = \"Object Number : {}\".format(COUNT_OBJECT)\n", 378 | "\n", 379 | " # Check for the orientation of the object\n", 380 | " orientation_flag, orientation_defect = \\\n", 381 | " detect_orientation(frame_orient, cnt)\n", 382 | " if orientation_flag:\n", 383 | " value = 1\n", 384 | " data_base.append(value)\n", 385 | " OBJ_DEFECT.append(str(orientation_defect))\n", 386 | " else:\n", 387 | " value = 0\n", 388 | " data_base.append(value)\n", 389 | "\n", 390 | " # Check for the color defect of the object\n", 391 | " color_flag, color_defect = detect_color(frame_clr, cnt)\n", 392 | " if color_flag:\n", 393 | " value = 1\n", 394 | " data_base.append(value)\n", 395 | " OBJ_DEFECT.append(str(color_defect))\n", 396 | " else:\n", 397 | " value = 0\n", 398 | " data_base.append(value)\n", 399 | "\n", 400 | " # Check for the crack defect of the object\n", 401 | " crack_flag, crack_defect = detect_crack(frame_crack, cnt)\n", 402 | " if crack_flag:\n", 403 | " value = 1\n", 404 | " data_base.append(value)\n", 405 | " OBJ_DEFECT.append(str(crack_defect))\n", 406 | " else:\n", 407 | " value = 0\n", 408 | " data_base.append(value)\n", 409 | "\n", 410 | " # Check if none of the defect is found\n", 411 | " if not OBJ_DEFECT:\n", 412 | " value = 1\n", 413 | " data_base.append(value)\n", 414 | " defect = \"No Defect\"\n", 415 | " OBJ_DEFECT.append(defect)\n", 416 | " print(\"No defect detected in object {}\"\n", 417 | " .format(COUNT_OBJECT))\n", 418 | " cv2.putText(frame_nodefect,\n", 419 | " \"Length (mm): {}\".format(HEIGHT_OF_OBJ),\n", 420 | " (5, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,\n", 421 | " (255, 255, 255), 2)\n", 422 | " cv2.putText(frame_nodefect,\n", 423 | " \"Width (mm): {}\".format(WIDTH_OF_OBJ),\n", 424 | " (5, 110), cv2.FONT_HERSHEY_SIMPLEX, 0.75,\n", 425 | " (255, 255, 255), 2)\n", 426 | " cv2.imwrite(\"{}/no_defect/Nodefect_{}.png\".format(\n", 427 | " base_dir, COUNT_OBJECT),\n", 428 | " frame[y: y + h,\n", 429 | " x: x + w])\n", 430 | " else:\n", 431 | " value = 0\n", 432 | " data_base.append(value)\n", 433 | " print(\"Length (mm) = {}, width (mm) = {}\".format(\n", 434 | " HEIGHT_OF_OBJ, WIDTH_OF_OBJ))\n", 435 | " if not OBJ_DEFECT:\n", 436 | " continue\n", 437 | "\n", 438 | " # Create json_body to store the defects\n", 439 | " else:\n", 440 | " json_body = {\n", 441 | " \"measurement\": \"obj_flaw_detector\",\n", 442 | " \"tags\": {\n", 443 | " \"user\": \"User\"\n", 444 | " },\n", 445 | "\n", 446 | " \"fields\": {\n", 447 | " \"Object Number\": COUNT_OBJECT,\n", 448 | " \"Orientation\": data_base[0],\n", 449 | " \"Color\": data_base[1],\n", 450 | " \"Crack\": data_base[2],\n", 451 | " \"No defect\": data_base[3]\n", 452 | " }\n", 453 | " }\n", 454 | " # Send json_body to influxdb\n", 455 | " update_data(json_body)\n", 456 | "\n", 457 | " all_defects = \" \".join(OBJ_DEFECT)\n", 458 | " cv2.putText(frame, \"Press q to quit\", (410, 50),\n", 459 | " cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\n", 460 | " cv2.putText(frame, OBJECT_COUNT, (5, 50), cv2.FONT_HERSHEY_SIMPLEX,\n", 461 | " 0.75, (255, 255, 255), 2)\n", 462 | " cv2.putText(frame, \"Defect: {}\".format(all_defects), (5, 140),\n", 463 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 464 | " cv2.putText(frame, \"Length (mm): {}\".format(HEIGHT_OF_OBJ), (5, 80),\n", 465 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 466 | " cv2.putText(frame, \"Width (mm): {}\".format(WIDTH_OF_OBJ), (5, 110),\n", 467 | " cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)\n", 468 | " cv2.imshow(\"Out\", frame)\n", 469 | " keypressed = cv2.waitKey(40)\n", 470 | " if keypressed == 113 or keypressed == 81:\n", 471 | " break\n", 472 | " cv2.destroyAllWindows()\n", 473 | " cap.release()\n", 474 | "\n", 475 | "\n", 476 | "if __name__ == '__main__':\n", 477 | "\n", 478 | " if \"distance\" in os.environ:\n", 479 | " distance = float(os.environ[\"distance\"])\n", 480 | " if \"directory\" in os.environ:\n", 481 | " base_dir = os.environ[\"directory\"]\n", 482 | " if \"fieldofview\" in os.environ:\n", 483 | " fieldofview = float(os.environ[\"fieldofview\"])\n", 484 | "\n", 485 | " assert os.path.isfile(CONFIG_FILE), \"{} file doesn't exist\".format(CONFIG_FILE)\n", 486 | " config = json.loads(open(CONFIG_FILE).read())\n", 487 | "\n", 488 | " for idx, item in enumerate(config['inputs']):\n", 489 | " if item['video'].isdigit():\n", 490 | " input_stream = int(item['video'])\n", 491 | " cap = cv2.VideoCapture(input_stream)\n", 492 | " if not cap.isOpened():\n", 493 | " print(\"\\nCamera not plugged in... Exiting...\\n\")\n", 494 | " sys.exit(0)\n", 495 | " fps = cap.get(cv2.CAP_PROP_FPS)\n", 496 | " delay = int(1000 / fps)\n", 497 | " else:\n", 498 | " input_stream = item['video']\n", 499 | " cap = cv2.VideoCapture(input_stream)\n", 500 | " if not cap.isOpened():\n", 501 | " print(\"\\nUnable to open video file... Exiting...\\n\")\n", 502 | " sys.exit(0)\n", 503 | " fps = cap.get(cv2.CAP_PROP_FPS)\n", 504 | " delay = int(1000 / fps)\n", 505 | "\n", 506 | " if distance != 0 and fieldofview != 0:\n", 507 | " width_of_video = cap.get(3)\n", 508 | " height_of_video = cap.get(4)\n", 509 | " # Convert degrees to radians\n", 510 | " radians = (fieldofview / 2) * 0.0174533\n", 511 | " # Calculate the diagonal length of image in millimeters using\n", 512 | " # field of view of camera and distance between object and camera.\n", 513 | " diagonal_length_of_image_plane = abs(2 * (distance / 10) * math.tan(radians))\n", 514 | " # Calculate diagonal length of image in pixel\n", 515 | " diagonal_length_in_pixel = math.sqrt(math.pow(width_of_video, 2)\n", 516 | " + math.pow(height_of_video, 2))\n", 517 | " # Convert one pixel value in millimeters\n", 518 | " one_pixel_length = (diagonal_length_of_image_plane / diagonal_length_in_pixel)\n", 519 | " # If distance between camera and object and field of view of camera\n", 520 | " # are not provided, then 96 pixels per inch is considered.\n", 521 | " # pixel_lengh = 2.54 cm (1 inch) / 96 pixels\n", 522 | " else:\n", 523 | " one_pixel_length = 0.0264583333\n", 524 | "\n", 525 | " dir_names = [\"crack\", \"color\", \"orientation\", \"no_defect\"]\n", 526 | " OBJ_DEFECT = []\n", 527 | " frame_number = 40\n", 528 | " FRAME_COUNT = 0\n", 529 | "\n", 530 | " # Get ipaddress from the get_ip_address\n", 531 | " ipaddress, port, proxy, = get_ip_address()\n", 532 | " database = 'obj_flaw_database'\n", 533 | " client = InfluxDBClient(host=ipaddress, port=port,\n", 534 | " database=database, proxies=proxy)\n", 535 | " client.create_database(database)\n", 536 | "\n", 537 | " # create folders with the given dir_names to save defective objects\n", 538 | " for i in range(len(dir_names)):\n", 539 | " if not os.path.exists(os.path.join(base_dir, dir_names[i])):\n", 540 | " os.makedirs(os.path.join(base_dir, dir_names[i]))\n", 541 | " else:\n", 542 | " file_list = os.listdir(os.path.join(base_dir, dir_names[i]))\n", 543 | " for f in file_list:\n", 544 | " os.remove(os.path.join(base_dir, dir_names[i], f))\n", 545 | " # Find dimensions and flaw detections such as color, crack and orientation\n", 546 | " # of the object.\n", 547 | " flaw_detection()\n" 548 | ] 549 | }, 550 | { 551 | "cell_type": "code", 552 | "execution_count": null, 553 | "metadata": {}, 554 | "outputs": [], 555 | "source": [] 556 | } 557 | ], 558 | "metadata": { 559 | "kernelspec": { 560 | "display_name": "Python 3", 561 | "language": "python", 562 | "name": "python3" 563 | }, 564 | "language_info": { 565 | "codemirror_mode": { 566 | "name": "ipython", 567 | "version": 3 568 | }, 569 | "file_extension": ".py", 570 | "mimetype": "text/x-python", 571 | "name": "python", 572 | "nbconvert_exporter": "python", 573 | "pygments_lexer": "ipython3", 574 | "version": "3.5.2" 575 | } 576 | }, 577 | "nbformat": 4, 578 | "nbformat_minor": 2 579 | } 580 | --------------------------------------------------------------------------------