├── docs
└── images
│ ├── output.png
│ ├── jupyter.png
│ ├── jupyter_code.png
│ └── architectural-diagram.png
├── resources
└── config.json
├── LICENSE
├── setup.sh
├── Jupyter
├── inference.py
├── restricted_zone_notifier_jupyter.py
├── README.md
└── restricted_zone_notifier_jupyter.ipynb
├── application
├── inference.py
└── restricted_zone_notifier.py
└── README.md
/docs/images/output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/restricted-zone-notifier-python/HEAD/docs/images/output.png
--------------------------------------------------------------------------------
/docs/images/jupyter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/restricted-zone-notifier-python/HEAD/docs/images/jupyter.png
--------------------------------------------------------------------------------
/docs/images/jupyter_code.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/restricted-zone-notifier-python/HEAD/docs/images/jupyter_code.png
--------------------------------------------------------------------------------
/docs/images/architectural-diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/restricted-zone-notifier-python/HEAD/docs/images/architectural-diagram.png
--------------------------------------------------------------------------------
/resources/config.json:
--------------------------------------------------------------------------------
1 | {
2 |
3 | "inputs": [
4 |
5 | {
6 | "video": "../resources/worker-zone-detection.mp4"
7 | }
8 | ]
9 |
10 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2021, Intel Corporation
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Copyright (c) 2018 Intel Corporation.
4 | # Permission is hereby granted, free of charge, to any person obtaining
5 | # a copy of this software and associated documentation files (the
6 | # "Software"), to deal in the Software without restriction, including
7 | # without limitation the rights to use, copy, modify, merge, publish,
8 | # distribute, sublicense, and/or sell copies of the Software, and to
9 | # permit persons to whom the Software is furnished to do so, subject to
10 | # the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be
13 | # included in all copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 | # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 | # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 | # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19 | # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20 | # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21 | # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 |
23 | #Install the dependencies
24 | sudo apt-get update
25 | sudo apt-get install python3-pip
26 | sudo apt-get install mosquitto mosquitto-clients
27 | sudo pip3 install numpy paho-mqtt jupyter
28 |
29 | #Download the video
30 | cd resources
31 | wget -O worker-zone-detection.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/worker-zone-detection.mp4
32 |
33 | #Download the model
34 | cd /opt/intel/openvino/deployment_tools/tools/model_downloader
35 | sudo ./downloader.py --name person-detection-retail-0013
36 |
--------------------------------------------------------------------------------
/Jupyter/inference.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Copyright (c) 2018 Intel Corporation.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining
6 | a copy of this software and associated documentation files (the
7 | "Software"), to deal in the Software without restriction, including
8 | without limitation the rights to use, copy, modify, merge, publish,
9 | distribute, sublicense, and/or sell copies of the Software, and to
10 | permit persons to whom the Software is furnished to do so, subject to
11 | the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be
14 | included in all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 | """
24 |
25 | import os
26 | import sys
27 | import logging as log
28 | from openvino.inference_engine import IENetwork, IECore
29 |
30 |
31 | class Network:
32 | """
33 | Load and configure inference plugins for the specified target devices
34 | and performs synchronous and asynchronous modes for the specified infer requests.
35 | """
36 |
37 | def __init__(self):
38 | self.net = None
39 | self.plugin = None
40 | self.input_blob = None
41 | self.out_blob = None
42 | self.net_plugin = None
43 | self.infer_request_handle = None
44 |
45 | def load_model(self, model, device, input_size, output_size, num_requests, cpu_extension=None, plugin=None):
46 | """
47 | Loads a network and an image to the Inference Engine plugin.
48 | :param model: .xml file of pre trained model
49 | :param cpu_extension: extension for the CPU device
50 | :param device: Target device
51 | :param input_size: Number of input layers
52 | :param output_size: Number of output layers
53 | :param num_requests: Index of Infer request value. Limited to device capabilities.
54 | :param plugin: Plugin for specified device
55 | :return: Shape of input layer
56 | """
57 |
58 | model_xml = model
59 | model_bin = os.path.splitext(model_xml)[0] + ".bin"
60 | # Plugin initialization for specified device
61 | # and load extensions library if specified
62 | if not plugin:
63 | log.info("Initializing plugin for {} device...".format(device))
64 | self.plugin = IECore()
65 | else:
66 | self.plugin = plugin
67 |
68 | if cpu_extension and 'CPU' in device:
69 | self.plugin.add_extension(cpu_extension, "CPU")
70 |
71 | # Read IR
72 | log.info("Reading IR...")
73 | self.net = self.plugin.read_network(model=model_xml, weights=model_bin)
74 | log.info("Loading IR to the plugin...")
75 |
76 | if "CPU" in device:
77 | supported_layers = self.plugin.query_network(self.net, "CPU")
78 | not_supported_layers = \
79 | [l for l in self.net.layers.keys() if l not in supported_layers]
80 | if len(not_supported_layers) != 0:
81 | log.error("Following layers are not supported by "
82 | "the plugin for specified device {}:\n {}".
83 | format(device,
84 | ', '.join(not_supported_layers)))
85 | log.error("Please try to specify cpu extensions library path"
86 | " in command line parameters using -l "
87 | "or --cpu_extension command line argument")
88 | sys.exit(1)
89 |
90 | if num_requests == 0:
91 | # Loads network read from IR to the plugin
92 | self.net_plugin = self.plugin.load_network(network=self.net, device_name=device)
93 | else:
94 | self.net_plugin = self.plugin.load_network(network=self.net, num_requests=num_requests, device_name=device)
95 |
96 | self.input_blob = next(iter(self.net.inputs))
97 | self.out_blob = next(iter(self.net.outputs))
98 | assert len(self.net.inputs.keys()) == input_size, \
99 | "Supports only {} input topologies".format(len(self.net.inputs))
100 | assert len(self.net.outputs) == output_size, \
101 | "Supports only {} output topologies".format(len(self.net.outputs))
102 |
103 | return self.plugin, self.get_input_shape()
104 |
105 | def get_input_shape(self):
106 | """
107 | Gives the shape of the input layer of the network.
108 | :return: None
109 | """
110 | return self.net.inputs[self.input_blob].shape
111 |
112 | def performance_counter(self, request_id):
113 | """
114 | Queries performance measures per layer to get feedback of what is the
115 | most time consuming layer.
116 | :param request_id: Index of Infer request value. Limited to device capabilities
117 | :return: Performance of the layer
118 | """
119 | perf_count = self.net_plugin.requests[request_id].get_perf_counts()
120 | return perf_count
121 |
122 | def exec_net(self, request_id, frame):
123 | """
124 | Starts asynchronous inference for specified request.
125 | :param request_id: Index of Infer request value. Limited to device capabilities.
126 | :param frame: Input image
127 | :return: Instance of Executable Network class
128 | """
129 | self.infer_request_handle = self.net_plugin.start_async(
130 | request_id=request_id, inputs={self.input_blob: frame})
131 | return self.net_plugin
132 |
133 | def wait(self, request_id):
134 | """
135 | Waits for the result to become available.
136 | :param request_id: Index of Infer request value. Limited to device capabilities.
137 | :return: Timeout value
138 | """
139 | wait_process = self.net_plugin.requests[request_id].wait(-1)
140 | return wait_process
141 |
142 | def get_output(self, request_id, output=None):
143 | """
144 | Gives a list of results for the output layer of the network.
145 | :param request_id: Index of Infer request value. Limited to device capabilities.
146 | :param output: Name of the output layer
147 | :return: Results for the specified request
148 | """
149 | if output:
150 | res = self.infer_request_handle.outputs[output]
151 | else:
152 | res = self.net_plugin.requests[request_id].outputs[self.out_blob]
153 | return res
154 |
155 | def clean(self):
156 | """
157 | Deletes all the instances
158 | :return: None
159 | """
160 | del self.net_plugin
161 | del self.plugin
162 | del self.net
163 |
--------------------------------------------------------------------------------
/application/inference.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Copyright (c) 2018 Intel Corporation.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining
6 | a copy of this software and associated documentation files (the
7 | "Software"), to deal in the Software without restriction, including
8 | without limitation the rights to use, copy, modify, merge, publish,
9 | distribute, sublicense, and/or sell copies of the Software, and to
10 | permit persons to whom the Software is furnished to do so, subject to
11 | the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be
14 | included in all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 | """
24 |
25 | import os
26 | import sys
27 | import logging as log
28 | from openvino.inference_engine import IENetwork, IECore
29 |
30 |
31 | class Network:
32 | """
33 | Load and configure inference plugins for the specified target devices
34 | and performs synchronous and asynchronous modes for the specified infer requests.
35 | """
36 |
37 | def __init__(self):
38 | self.net = None
39 | self.plugin = None
40 | self.input_blob = None
41 | self.out_blob = None
42 | self.net_plugin = None
43 | self.infer_request_handle = None
44 |
45 | def load_model(self, model, device, input_size, output_size, num_requests, cpu_extension=None, plugin=None):
46 | """
47 | Loads a network and an image to the Inference Engine plugin.
48 | :param model: .xml file of pre trained model
49 | :param cpu_extension: extension for the CPU device
50 | :param device: Target device
51 | :param input_size: Number of input layers
52 | :param output_size: Number of output layers
53 | :param num_requests: Index of Infer request value. Limited to device capabilities.
54 | :param plugin: Plugin for specified device
55 | :return: Shape of input layer
56 | """
57 |
58 | model_xml = model
59 | model_bin = os.path.splitext(model_xml)[0] + ".bin"
60 | # Plugin initialization for specified device
61 | # and load extensions library if specified
62 | if not plugin:
63 | log.info("Initializing plugin for {} device...".format(device))
64 | self.plugin = IECore()
65 | else:
66 | self.plugin = plugin
67 |
68 | if cpu_extension and 'CPU' in device:
69 | self.plugin.add_extension(cpu_extension, "CPU")
70 |
71 | # Read IR
72 | log.info("Reading IR...")
73 | self.net = self.plugin.read_network(model=model_xml, weights=model_bin)
74 | log.info("Loading IR to the plugin...")
75 |
76 | if "CPU" in device:
77 | supported_layers = self.plugin.query_network(self.net, "CPU")
78 | not_supported_layers = \
79 | [l for l in self.net.layers.keys() if l not in supported_layers]
80 | if len(not_supported_layers) != 0:
81 | log.error("Following layers are not supported by "
82 | "the plugin for specified device {}:\n {}".
83 | format(device,
84 | ', '.join(not_supported_layers)))
85 | log.error("Please try to specify cpu extensions library path"
86 | " in command line parameters using -l "
87 | "or --cpu_extension command line argument")
88 | sys.exit(1)
89 |
90 | if num_requests == 0:
91 | # Loads network read from IR to the plugin
92 | self.net_plugin = self.plugin.load_network(network=self.net, device_name=device)
93 | else:
94 | self.net_plugin = self.plugin.load_network(network=self.net, num_requests=num_requests, device_name=device)
95 |
96 | self.input_blob = next(iter(self.net.inputs))
97 | self.out_blob = next(iter(self.net.outputs))
98 | assert len(self.net.inputs.keys()) == input_size, \
99 | "Supports only {} input topologies".format(len(self.net.inputs))
100 | assert len(self.net.outputs) == output_size, \
101 | "Supports only {} output topologies".format(len(self.net.outputs))
102 |
103 | return self.plugin, self.get_input_shape()
104 |
105 | def get_input_shape(self):
106 | """
107 | Gives the shape of the input layer of the network.
108 | :return: None
109 | """
110 | return self.net.inputs[self.input_blob].shape
111 |
112 | def performance_counter(self, request_id):
113 | """
114 | Queries performance measures per layer to get feedback of what is the
115 | most time consuming layer.
116 | :param request_id: Index of Infer request value. Limited to device capabilities
117 | :return: Performance of the layer
118 | """
119 | perf_count = self.net_plugin.requests[request_id].get_perf_counts()
120 | return perf_count
121 |
122 | def exec_net(self, request_id, frame):
123 | """
124 | Starts asynchronous inference for specified request.
125 | :param request_id: Index of Infer request value. Limited to device capabilities.
126 | :param frame: Input image
127 | :return: Instance of Executable Network class
128 | """
129 | self.infer_request_handle = self.net_plugin.start_async(
130 | request_id=request_id, inputs={self.input_blob: frame})
131 | return self.net_plugin
132 |
133 | def wait(self, request_id):
134 | """
135 | Waits for the result to become available.
136 | :param request_id: Index of Infer request value. Limited to device capabilities.
137 | :return: Timeout value
138 | """
139 | wait_process = self.net_plugin.requests[request_id].wait(-1)
140 | return wait_process
141 |
142 | def get_output(self, request_id, output=None):
143 | """
144 | Gives a list of results for the output layer of the network.
145 | :param request_id: Index of Infer request value. Limited to device capabilities.
146 | :param output: Name of the output layer
147 | :return: Results for the specified request
148 | """
149 | if output:
150 | res = self.infer_request_handle.outputs[output]
151 | else:
152 | res = self.net_plugin.requests[request_id].outputs[self.out_blob]
153 | return res
154 |
155 | def clean(self):
156 | """
157 | Deletes all the instances
158 | :return: None
159 | """
160 | del self.net_plugin
161 | del self.plugin
162 | del self.net
163 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DISCONTINUATION OF PROJECT #
2 | This project will no longer be maintained by Intel.
3 | Intel has ceased development and contributions including, but not limited to, maintenance, bug fixes, new releases, or updates, to this project.
4 | Intel no longer accepts patches to this project.
5 |
6 | # Restricted Zone Notifier
7 |
8 | | Details | |
9 | |-----------------------|---------------|
10 | | Target OS: | Ubuntu\* 18.04 LTS |
11 | | Programming Language: | Python* 3.5 |
12 | | Time to Complete: | 30 min |
13 |
14 | 
15 |
16 | ## What it does
17 | This application is designed to detect the humans present in a predefined selected assembly line area. If the people enters the marked assembly area, it raises the alert and sends through mqtt. It is intended to demonstrate how to use CV to improve assembly line safety for human operators and factory workers.
18 |
19 | ## Requirements
20 |
21 | ### Hardware
22 |
23 | * 6th to 8th generation Intel® Core™ processors with Iris® Pro graphics or Intel® HD Graphics
24 |
25 | ### Software
26 |
27 | * [Ubuntu\* 18.04 LTS](http://releases.ubuntu.com/16.04/)
28 |
29 | * OpenCL™ Runtime package
30 |
31 | *Note*: We recommend using a 4.14+ kernel to use this software. Run the following command to determine your kernel version:
32 |
33 | uname -a
34 |
35 | * Intel® Distribution of OpenVINO™ toolkit 2020 R3 Release
36 |
37 | ## How It works
38 |
39 | This restricted zone notifier application uses the Inference Engine included in the Intel® Distribution of OpenVINO™ toolkit and the Intel® Deep Learning Deployment Toolkit. A trained neural network detects people within a marked assembly area, which is designed for a machine mounted camera system. It sends an alert if there is at least one person detected in the marked assembly area. The user can select the area coordinates either via command line parameters, or once the application has been started, they can select the region of interest (ROI) by pressing `c` key. This will pause the application, pop up a separate window on which the user can drag the mouse from the upper left ROI corner to whatever the size they require the area to cover. By default the whole frame is selected. Worker safety and alert signal data are sent to a local web server using the Paho MQTT C client libraries.
40 | The program creates two threads for concurrency:
41 |
42 | - Main thread that performs the video i/o, processes video frames using the trained neural network.
43 | - Worker thread that publishes MQTT messages.
44 |
45 | 
46 | **Architectural Diagram**
47 |
48 | ## Setup
49 |
50 | ### Get the code
51 |
52 | Steps to clone the reference implementation:
53 | ```
54 | sudo apt-get update && sudo apt-get install git
55 | git clone https://github.com/intel-iot-devkit/restricted-zone-notifier-python.git
56 | ```
57 | ### Install Intel® Distribution of OpenVINO™ toolkit
58 |
59 | Refer to https://software.intel.com/en-us/articles/OpenVINO-Install-Linux for more information about how to install and setup the Intel® Distribution of OpenVINO™ toolkit.
60 |
61 | You will need the OpenCL™ Runtime package if you plan to run inference on the GPU. It is not mandatory for CPU inference.
62 |
63 | ### Other dependencies
64 | #### Mosquitto*
65 | Mosquitto is an open source message broker that implements the MQTT protocol. The MQTT protocol provides a lightweight method of carrying out messaging using a publish/subscribe model.
66 |
67 | ### Which model to use
68 | This application uses the [person-detection-retail-0013](https://docs.openvinotoolkit.org/2020.3/_models_intel_person_detection_retail_0013_description_person_detection_retail_0013.html)
69 | Intel® pre-trained model, that can be accessed using the **model downloader**. The **model downloader** downloads the __.xml__ and __.bin__ files that will be used by the application.
70 |
71 | To install the dependencies of the RI and to download the **person-detection-retail-0013** Intel® model, run the following command:
72 |
73 | cd
74 | ./setup.sh
75 |
76 | The model will be downloaded inside the following directory:
77 |
78 | /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/
79 |
80 | ### The Config File
81 |
82 | The _resources/config.json_ contains the path to the videos that will be used by the application.
83 | The _config.json_ file is of the form name/value pair, `video: `
84 |
85 | Example of the _config.json_ file:
86 |
87 | ```
88 | {
89 |
90 | "inputs": [
91 | {
92 | "video": "videos/video1.mp4"
93 | }
94 | ]
95 | }
96 | ```
97 |
98 | ### Which Input video to use
99 |
100 | The application works with any input video. Find sample videos for object detection [here](https://github.com/intel-iot-devkit/sample-videos/).
101 |
102 | For first-use, we recommend using the [worker-zone-detection](https://github.com/intel-iot-devkit/sample-videos/blob/master/worker-zone-detection.mp4) video.The video is automatically downloaded to the `resources/` folder.
103 | For example:
104 | The config.json would be:
105 |
106 | ```
107 | {
108 |
109 | "inputs": [
110 | {
111 | "video": "sample-videos/worker-zone-detection.mp4"
112 | }
113 | ]
114 | }
115 | ```
116 | To use any other video, specify the path in config.json file
117 |
118 | ### Using the Camera instead of video
119 |
120 | Replace the path/to/video in the _resources/config.json_ file with the camera ID, where the ID is taken from the video device (the number X in /dev/videoX).
121 |
122 | On Ubuntu, list all available video devices with the following command:
123 |
124 | ```
125 | ls /dev/video*
126 | ```
127 |
128 | For example, if the output of above command is /dev/video0, then config.json would be::
129 |
130 | ```
131 | {
132 |
133 | "inputs": [
134 | {
135 | "video": "0"
136 | }
137 | ]
138 | }
139 | ```
140 |
141 | ## Setup the environment
142 | You must configure the environment to use the Intel® Distribution of OpenVINO™ toolkit one time per session by running the following command:
143 |
144 | source /opt/intel/openvino/bin/setupvars.sh
145 |
146 | __Note__: This command needs to be executed only once in the terminal where the application will be executed. If the terminal is closed, the command needs to be executed again.
147 |
148 | ## Run the application
149 |
150 | Change the current directory to the git-cloned application code location on your system:
151 |
152 | cd /application
153 |
154 | To see a list of the various options:
155 |
156 | python3 restricted_zone_notifier.py --help
157 |
158 | ### Running on the CPU
159 | A user can specify a target device to run on by using the device command-line argument `-d` followed by one of the values `CPU`, `GPU`,`MYRIAD` or `HDDL`.
160 |
161 | Though by default application runs on CPU, this can also be explicitly specified by ```-d CPU``` command-line argument:
162 |
163 | python3 restricted_zone_notifier.py -m /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml -d CPU
164 |
165 | To run the application on sync mode, use `-f sync` as command line argument. By default, the application runs on async mode.
166 |
167 | You can select an area to be used as the "off-limits" area by pressing the `c` key once the program is running. A new window will open showing a still image from the video capture device. Drag the mouse from left top corner to cover an area on the plane and once done (a blue rectangle is drawn) press `ENTER` or `SPACE` to proceed with monitoring.
168 |
169 | Once you have selected the "off-limits" area the coordinates will be displayed in the terminal window like this:
170 |
171 | Assembly Area Selection: -x=429 -y=101 -ht=619 -w=690
172 |
173 | You can run the application using those coordinates by using the `-x`, `-y`, `-ht`, and `-w` flags to select the area.
174 |
175 | For example:
176 |
177 | python3 restricted_zone_notifier.py -m /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml -x 429 -y 101 -ht 619 -w 690
178 |
179 | If you do not select or specify an area, by default the entire window is selected as the off limits area.
180 | To run with multiple devices use -d MULTI:device1,device2. For example: -d MULTI:CPU,GPU,MYRIAD
181 |
182 | ### Running on the GPU
183 | * To run on the integrated Intel® GPU with floating point precision 32 (FP32), use the `-d GPU` command-line argument:
184 | ```
185 | python3 restricted_zone_notifier.py -m /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml -d GPU
186 | ```
187 | **FP32**: FP32 is single-precision floating-point arithmetic uses 32 bits to represent numbers. 8 bits for the magnitude and 23 bits for the precision. For more information, [click here](https://en.wikipedia.org/wiki/Single-precision_floating-point_format)
188 |
189 | * To run on the integrated Intel® GPU with floating point precision 16 (FP16):
190 | ```
191 | python3 restricted_zone_notifier.py -m /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP16/person-detection-retail-0013.xml -d GPU
192 | ```
193 | **FP16**: FP16 is half-precision floating-point arithmetic uses 16 bits. 5 bits for the magnitude and 10 bits for the precision. For more information, [click here](https://en.wikipedia.org/wiki/Half-precision_floating-point_format)
194 |
195 | ### Running on the Intel® Neural Compute Stick
196 | To run on the Intel® Neural Compute Stick, use the ```-d MYRIAD``` command-line argument:
197 |
198 | python3 restricted_zone_notifier.py -m /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP16/person-detection-retail-0013.xml -d MYRIAD
199 |
200 | ### Running on the Intel® Movidius™ VPU
201 | To run on the Intel® Movidius™ VPU, use the ```-d HDDL ``` command-line argument:
202 |
203 | python3 restricted_zone_notifier.py -m /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP16/person-detection-retail-0013.xml -d HDDL
204 |
205 | **Note:** The Intel® Movidius™ VPU can only run FP16 models. The model that is passed to the application, through the `-m ` command-line argument, must be of data type FP16.
206 |
207 | ## Machine to Machine Messaging with MQTT
208 |
209 | If you wish to use a MQTT server to publish data, you should set the following environment variables on a terminal before running the program:
210 |
211 | export MQTT_SERVER=localhost:1883
212 | export MQTT_CLIENT_ID=cvservice
213 |
214 | Change the `MQTT_SERVER` to a value that matches the MQTT server you are connecting to.
215 |
216 | You should change the `MQTT_CLIENT_ID` to a unique value for each monitoring station, so you can track the data for individual locations. For example:
217 |
218 | export MQTT_CLIENT_ID=zone1337
219 |
220 | If you want to monitor the MQTT messages sent to your local server, and you have the `mosquitto` client utilities installed, you can run the following command in new terminal while executing the code:
221 |
222 | mosquitto_sub -h localhost -t Restricted_zone_python
223 |
--------------------------------------------------------------------------------
/Jupyter/restricted_zone_notifier_jupyter.py:
--------------------------------------------------------------------------------
1 | """Restricted Zone Notifier."""
2 |
3 | """
4 | Copyright (c) 2018 Intel Corporation.
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining
7 | a copy of this software and associated documentation files (the
8 | "Software"), to deal in the Software without restriction, including
9 | without limitation the rights to use, copy, modify, merge, publish,
10 | distribute, sublicense, and/or sell copies of the Software, and to
11 | permit person to whom the Software is furnished to do so, subject to
12 | the following conditions:
13 |
14 | The above copyright notice and this permission notice shall be
15 | included in all copies or substantial portions of the Software.
16 |
17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 |
25 | """
26 |
27 | import os
28 | import sys
29 | import json
30 | import time
31 | import socket
32 | import cv2
33 |
34 | import logging as log
35 | import paho.mqtt.client as mqtt
36 |
37 | from threading import Thread
38 | from collections import namedtuple
39 | from argparse import ArgumentParser
40 | from inference import Network
41 |
42 | # Assemblyinfo contains information about assembly area
43 | MyStruct = namedtuple("assemblyinfo", "safe")
44 | INFO = MyStruct(True)
45 |
46 | # Assemblyinfo contains information about assembly line
47 | MyStruct = namedtuple("assemblyinfo", "safe, alert")
48 | INFO = MyStruct(True, False)
49 |
50 | # MQTT server environment variables
51 | HOSTNAME = socket.gethostname()
52 | IPADDRESS = socket.gethostbyname(HOSTNAME)
53 | TOPIC = "Restricted_zone_python"
54 | MQTT_HOST = IPADDRESS
55 | MQTT_PORT = 1883
56 | MQTT_KEEPALIVE_INTERVAL = 60
57 |
58 | # Global variables
59 | accepted_devices = ['CPU', 'GPU', 'MYRIAD', 'HETERO:FPGA,CPU', 'HDDL']
60 | TARGET_DEVICE = 'CPU'
61 | is_async_mode = True
62 | CONFIG_FILE = '../resources/config.json'
63 |
64 | # Flag to control background thread
65 | KEEP_RUNNING = True
66 |
67 | DELAY = 5
68 |
69 |
70 | def ssd_out(res, initial_wh, selected_region):
71 | """
72 | Parse SSD output.
73 |
74 | :param res: Detection results
75 | :param args: Parsed arguments
76 | :param initial_wh: Initial width and height of the frame
77 | :param selected_region: Selected region coordinates
78 | :return: None
79 | """
80 |
81 | global INFO
82 | person = []
83 | INFO = INFO._replace(safe=True)
84 | INFO = INFO._replace(alert=False)
85 |
86 | for obj in res[0][0]:
87 | # Draw only objects when probability more than specified threshold
88 | if obj[2] > prob_threshold:
89 | xmin = int(obj[3] * initial_wh[0])
90 | ymin = int(obj[4] * initial_wh[1])
91 | xmax = int(obj[5] * initial_wh[0])
92 | ymax = int(obj[6] * initial_wh[1])
93 | person.append([xmin, ymin, xmax, ymax])
94 |
95 | for p in person:
96 | # area_of_person gives area of the detected person
97 | area_of_person = (p[2] - p[0]) * (p[3] - p[1])
98 | x_max = max(p[0], selected_region[0])
99 | x_min = min(p[2], selected_region[0] + selected_region[2])
100 | y_min = min(p[3], selected_region[1] + selected_region[3])
101 | y_max = max(p[1], selected_region[1])
102 | point_x = x_min - x_max
103 | point_y = y_min - y_max
104 | # area_of_intersection gives area of intersection of the
105 | # detected person and the selected area
106 | area_of_intersection = point_x * point_y
107 | if point_x < 0 or point_y < 0:
108 | continue
109 | else:
110 | if area_of_person > area_of_intersection:
111 | # assembly line area flags
112 | INFO = INFO._replace(safe=True)
113 | INFO = INFO._replace(alert=False)
114 |
115 | else:
116 | # assembly line area flags
117 | INFO = INFO._replace(safe=False)
118 | INFO = INFO._replace(alert=True)
119 |
120 |
121 | def message_runner():
122 | """
123 | Publish worker status to MQTT topic.
124 | Pauses for rate second(s) between updates
125 |
126 | :return: None
127 | """
128 | while KEEP_RUNNING:
129 | s = json.dumps({"Worker safe": INFO.safe, "Alert": INFO.alert})
130 | time.sleep(rate)
131 | CLIENT.publish(TOPIC, payload=s)
132 |
133 |
134 | def main():
135 | """
136 | Load the network and parse the output.
137 |
138 | :return: None
139 | """
140 | global CLIENT
141 | global KEEP_RUNNING
142 | global DELAY
143 | global SIG_CAUGHT
144 | global prob_threshold
145 | global rate
146 | global TARGET_DEVICE
147 | global is_async_mode
148 |
149 | CLIENT = mqtt.Client()
150 | CLIENT.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)
151 | CLIENT.subscribe(TOPIC)
152 |
153 | try:
154 | pointx = int(os.environ['POINTX'])
155 | pointy = int(os.environ['POINTY'])
156 | width = int(os.environ['WIDTH'])
157 | height = int(os.environ['HEIGHT'])
158 | except KeyError:
159 | pointx = 0
160 | pointy = 0
161 | width = 0
162 | height = 0
163 | try:
164 | # Number of seconds between data updates to MQTT server
165 | rate = float(os.environ['RATE'])
166 | except KeyError:
167 | rate = 1
168 | try:
169 | # Probability threshold for detections filtering
170 | prob_threshold = float(os.environ['PROB_THRESHOLD'])
171 | except KeyError:
172 | prob_threshold = 0.7
173 |
174 | if 'DEVICE' in os.environ.keys():
175 | TARGET_DEVICE = os.environ['DEVICE']
176 |
177 | if 'MULTI' not in TARGET_DEVICE and TARGET_DEVICE not in accepted_devices:
178 | print("Unsupported device: " + TARGET_DEVICE)
179 | sys.exit(2)
180 | elif 'MULTI' in TARGET_DEVICE:
181 | target_devices = TARGET_DEVICE.split(':')[1].split(',')
182 | for multi_device in target_devices:
183 | if multi_device not in accepted_devices:
184 | print("Unsupported device: " + TARGET_DEVICE)
185 | sys.exit(2)
186 |
187 | cpu_extension = os.environ['CPU_EXTENSION'] if 'CPU_EXTENSION' in os.environ.keys() else None
188 |
189 | model = os.environ["MODEL"]
190 | if 'FLAG' in os.environ.keys():
191 | async_mode = os.environ['FLAG']
192 | if async_mode == "sync":
193 | is_async_mode = False
194 | else:
195 | is_async_mode = True
196 |
197 | log.basicConfig(format="[ %(levelname)s ] %(message)s",
198 | level=log.INFO, stream=sys.stdout)
199 | logger = log.getLogger()
200 | render_time = 0
201 | roi_x = pointx
202 | roi_y = pointy
203 | roi_w = width
204 | roi_h = height
205 |
206 | assert os.path.isfile(CONFIG_FILE), "{} file doesn't exist".format(CONFIG_FILE)
207 | config = json.loads(open(CONFIG_FILE).read())
208 |
209 | for idx, item in enumerate(config['inputs']):
210 | if item['video'].isdigit():
211 | input_stream = int(item['video'])
212 | else:
213 | input_stream = item['video']
214 |
215 | cap = cv2.VideoCapture(input_stream)
216 |
217 | if not cap.isOpened():
218 | logger.error("ERROR! Unable to open video source")
219 | sys.exit(1)
220 |
221 | # Init inference request IDs
222 | cur_request_id = 0
223 | next_request_id = 1
224 |
225 | # Initialise the class
226 | infer_network = Network()
227 | # Load the network to IE plugin to get shape of input layer
228 | n, c, h, w = infer_network.load_model(model, TARGET_DEVICE, 1, 1, 2, cpu_extension)[1]
229 |
230 | message_thread = Thread(target=message_runner, args=())
231 | message_thread.setDaemon(True)
232 | message_thread.start()
233 |
234 | if is_async_mode:
235 | print("Application running in async mode...")
236 | else:
237 | print("Application running in sync mode...")
238 |
239 | ret, frame = cap.read()
240 | while ret:
241 |
242 | ret, next_frame = cap.read()
243 | if not ret:
244 | KEEP_RUNNING = False
245 | break
246 |
247 | initial_wh = [cap.get(3), cap.get(4)]
248 |
249 | if next_frame is None:
250 | KEEP_RUNNING = False
251 | log.error("ERROR! blank FRAME grabbed")
252 | break
253 |
254 | # If either default values or negative numbers are given,
255 | # then we will default to start of the FRAME
256 | if roi_x <= 0 or roi_y <= 0:
257 | roi_x = 0
258 | roi_y = 0
259 | if roi_w <= 0:
260 | roi_w = next_frame.shape[1]
261 | if roi_h <= 0:
262 | roi_h = next_frame.shape[0]
263 | key_pressed = cv2.waitKey(1)
264 |
265 | # 'c' key pressed
266 | if key_pressed == 99:
267 | # Give operator chance to change the area
268 | # Select rectangle from left upper corner, dont display crosshair
269 | ROI = cv2.selectROI("Assembly Selection", frame, True, False)
270 | print("Assembly Area Selection: -x = {}, -y = {}, -w = {},"
271 | " -h = {}".format(ROI[0], ROI[1], ROI[2], ROI[3]))
272 | roi_x = ROI[0]
273 | roi_y = ROI[1]
274 | roi_w = ROI[2]
275 | roi_h = ROI[3]
276 | cv2.destroyAllWindows()
277 |
278 | cv2.rectangle(frame, (roi_x, roi_y),
279 | (roi_x + roi_w, roi_y + roi_h), (0, 0, 255), 2)
280 | selected_region = [roi_x, roi_y, roi_w, roi_h]
281 |
282 | in_frame_fd = cv2.resize(next_frame, (w, h))
283 | # Change data layout from HWC to CHW
284 | in_frame_fd = in_frame_fd.transpose((2, 0, 1))
285 | in_frame_fd = in_frame_fd.reshape((n, c, h, w))
286 |
287 | # Start asynchronous inference for specified request.
288 | inf_start = time.time()
289 | if is_async_mode:
290 | # Async enabled and only one video capture
291 | infer_network.exec_net(next_request_id, in_frame_fd)
292 | else:
293 | # Async disabled
294 | infer_network.exec_net(cur_request_id, in_frame_fd)
295 | # Wait for the result
296 | infer_network.wait(cur_request_id)
297 | det_time = time.time() - inf_start
298 | # Results of the output layer of the network
299 | res = infer_network.get_output(cur_request_id)
300 | # Parse SSD output
301 | ssd_out(res, initial_wh, selected_region)
302 |
303 | # Draw performance stats
304 | inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
305 | "Inference time: {:.3f} ms".format(det_time * 1000)
306 | render_time_message = "OpenCV rendering time: {:.3f} ms". \
307 | format(render_time * 1000)
308 |
309 | if not INFO.safe:
310 | warning = "HUMAN IN ASSEMBLY AREA: PAUSE THE MACHINE!"
311 | cv2.putText(frame, warning, (15, 100), cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), 2)
312 |
313 | log_message = "Async mode is on." if is_async_mode else \
314 | "Async mode is off."
315 | cv2.putText(frame, log_message, (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
316 | cv2.putText(frame, inf_time_message, (15, 35), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
317 | cv2.putText(frame, render_time_message, (15, 55), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
318 | cv2.putText(frame, "Worker Safe: {}".format(INFO.safe), (15, 75), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
319 |
320 | render_start = time.time()
321 | cv2.imshow("Restricted Zone Notifier", frame)
322 | render_end = time.time()
323 | render_time = render_end - render_start
324 |
325 | frame = next_frame
326 |
327 | if key_pressed == 27:
328 | print("Attempting to stop background threads")
329 | KEEP_RUNNING = False
330 | break
331 | # Tab key pressed
332 | if key_pressed == 9:
333 | is_async_mode = not is_async_mode
334 | print("Switched to {} mode".format("async" if is_async_mode else "sync"))
335 |
336 | if is_async_mode:
337 | # Swap infer request IDs
338 | cur_request_id, next_request_id = next_request_id, cur_request_id
339 |
340 | infer_network.clean()
341 | message_thread.join()
342 | cap.release()
343 | cv2.destroyAllWindows()
344 | CLIENT.disconnect()
345 |
346 |
347 | if __name__ == '__main__':
348 | main()
349 |
350 |
351 |
--------------------------------------------------------------------------------
/application/restricted_zone_notifier.py:
--------------------------------------------------------------------------------
1 | """Restricted Zone Notifier."""
2 |
3 | """
4 | Copyright (c) 2018 Intel Corporation.
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining
7 | a copy of this software and associated documentation files (the
8 | "Software"), to deal in the Software without restriction, including
9 | without limitation the rights to use, copy, modify, merge, publish,
10 | distribute, sublicense, and/or sell copies of the Software, and to
11 | permit person to whom the Software is furnished to do so, subject to
12 | the following conditions:
13 |
14 | The above copyright notice and this permission notice shall be
15 | included in all copies or substantial portions of the Software.
16 |
17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 |
25 | """
26 |
27 | import os
28 | import sys
29 | import json
30 | import time
31 | import socket
32 | import cv2
33 |
34 | import logging as log
35 | import paho.mqtt.client as mqtt
36 |
37 | from threading import Thread
38 | from collections import namedtuple
39 | from argparse import ArgumentParser
40 | from inference import Network
41 |
42 | # Assemblyinfo contains information about assembly area
43 | MyStruct = namedtuple("assemblyinfo", "safe")
44 | INFO = MyStruct(True)
45 |
46 | # MQTT server environment variables
47 | HOSTNAME = socket.gethostname()
48 | IPADDRESS = socket.gethostbyname(HOSTNAME)
49 | TOPIC = "Restricted_zone_python"
50 | MQTT_HOST = IPADDRESS
51 | MQTT_PORT = 1883
52 | MQTT_KEEPALIVE_INTERVAL = 60
53 |
54 | # Global variables
55 | TARGET_DEVICE = 'CPU'
56 | accepted_devices = ['CPU', 'GPU', 'MYRIAD', 'HETERO:FPGA,CPU', 'HDDL']
57 | is_async_mode = True
58 | CONFIG_FILE = '../resources/config.json'
59 |
60 | # Flag to control background thread
61 | KEEP_RUNNING = True
62 |
63 | DELAY = 5
64 |
65 |
66 | def build_argparser():
67 | """
68 | Parse command line arguments.
69 |
70 | :return: Command line arguments
71 | """
72 | parser = ArgumentParser()
73 | parser.add_argument("-m", "--model", required=True, type=str,
74 | help="Path to an .xml file with a trained model.")
75 | parser.add_argument("-l", "--cpu_extension", type=str, default=None,
76 | help="MKLDNN (CPU)-targeted custom layers. Absolute "
77 | "path to a shared library with the kernels impl.")
78 | parser.add_argument("-d", "--device", default="CPU", type=str,
79 | help="Specify the target device to infer on; "
80 | "CPU, GPU, FPGA, HDDL, MYRIAD is acceptable. To run with multiple devices use "
81 | "MULTI:,,etc. Application "
82 | "will look for a suitable plugin for device specified"
83 | "(CPU by default)")
84 | parser.add_argument("-th", "--prob_threshold", default=0.5, type=float,
85 | help="Probability threshold for detections filtering")
86 | parser.add_argument('-x', '--pointx', default=0, type=int,
87 | help="X coordinate of the top left point of assembly"
88 | " area on camera feed.")
89 | parser.add_argument('-y', '--pointy', default=0, type=int,
90 | help="Y coordinate of the top left point of assembly"
91 | " area on camera feed.")
92 | parser.add_argument('-w', '--width', default=0, type=int,
93 | help="Width of the assembly area in pixels.")
94 | parser.add_argument('-ht', '--height', default=0, type=int,
95 | help="Height of the assembly area in pixels.")
96 | parser.add_argument('-r', '--rate', default=1, type=int,
97 | help="Number of seconds between data updates "
98 | "to MQTT server")
99 | parser.add_argument("-f", "--flag", help="sync or async", default="async", type=str)
100 |
101 | global TARGET_DEVICE, is_async_mode
102 | args = parser.parse_args()
103 | if args.device:
104 | TARGET_DEVICE = args.device
105 | if args.flag == "sync":
106 | is_async_mode = False
107 | else:
108 | is_async_mode = True
109 | return parser
110 |
111 |
112 | def check_args():
113 | # ArgumentParser checks the device
114 |
115 | global TARGET_DEVICE
116 | if 'MULTI' not in TARGET_DEVICE and TARGET_DEVICE not in accepted_devices:
117 | print("Unsupported device: " + TARGET_DEVICE)
118 | sys.exit(2)
119 | elif 'MULTI' in TARGET_DEVICE:
120 | target_devices = TARGET_DEVICE.split(':')[1].split(',')
121 | for multi_device in target_devices:
122 | if multi_device not in accepted_devices:
123 | print("Unsupported device: " + TARGET_DEVICE)
124 | sys.exit(2)
125 |
126 |
127 | def ssd_out(res, args, initial_wh, selected_region):
128 | """
129 | Parse SSD output.
130 |
131 | :param res: Detection results
132 | :param args: Parsed arguments
133 | :param initial_wh: Initial width and height of the frame
134 | :param selected_region: Selected region coordinates
135 | :return: None
136 | """
137 | global INFO
138 | person = []
139 | INFO = INFO._replace(safe=True)
140 |
141 | for obj in res[0][0]:
142 | # Draw objects only when probability is more than specified threshold
143 | if obj[2] > args.prob_threshold:
144 | xmin = int(obj[3] * initial_wh[0])
145 | ymin = int(obj[4] * initial_wh[1])
146 | xmax = int(obj[5] * initial_wh[0])
147 | ymax = int(obj[6] * initial_wh[1])
148 | person.append([xmin, ymin, xmax, ymax])
149 |
150 | for p in person:
151 | # area_of_person gives area of the detected person
152 | area_of_person = (p[2] - p[0]) * (p[3] - p[1])
153 | x_max = max(p[0], selected_region[0])
154 | x_min = min(p[2], selected_region[0] + selected_region[2])
155 | y_min = min(p[3], selected_region[1] + selected_region[3])
156 | y_max = max(p[1], selected_region[1])
157 | point_x = x_min - x_max
158 | point_y = y_min - y_max
159 | # area_of_intersection gives area of intersection of the
160 | # detected person and the selected area
161 | area_of_intersection = point_x * point_y
162 | if point_x < 0 or point_y < 0:
163 | continue
164 | else:
165 | if area_of_person > area_of_intersection:
166 | # assembly line area flags
167 | INFO = INFO._replace(safe=True)
168 | else:
169 | # assembly line area flags
170 | INFO = INFO._replace(safe=False)
171 |
172 |
173 | def message_runner():
174 | """
175 | Publish worker status to MQTT topic.
176 | Pauses for rate second(s) between updates
177 |
178 | :return: None
179 | """
180 | while KEEP_RUNNING:
181 | time.sleep(1)
182 | CLIENT.publish(TOPIC, payload=json.dumps({"Worker safe": INFO.safe,
183 | "Alert": not INFO.safe}))
184 |
185 |
186 | def main():
187 | """
188 | Load the network and parse the output.
189 |
190 | :return: None
191 | """
192 | global DELAY
193 | global CLIENT
194 | global SIG_CAUGHT
195 | global KEEP_RUNNING
196 | global TARGET_DEVICE
197 | global is_async_mode
198 | CLIENT = mqtt.Client()
199 | CLIENT.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)
200 | CLIENT.subscribe(TOPIC)
201 | log.basicConfig(format="[ %(levelname)s ] %(message)s",
202 | level=log.INFO, stream=sys.stdout)
203 | args = build_argparser().parse_args()
204 | logger = log.getLogger()
205 | render_time = 0
206 | roi_x = args.pointx
207 | roi_y = args.pointy
208 | roi_w = args.width
209 | roi_h = args.height
210 | check_args()
211 |
212 | assert os.path.isfile(CONFIG_FILE), "{} file doesn't exist".format(CONFIG_FILE)
213 | config = json.loads(open(CONFIG_FILE).read())
214 |
215 | for idx, item in enumerate(config['inputs']):
216 | if item['video'].isdigit():
217 | input_stream = int(item['video'])
218 | else:
219 | input_stream = item['video']
220 |
221 | cap = cv2.VideoCapture(input_stream)
222 | if not cap.isOpened():
223 | logger.error("ERROR! Unable to open video source")
224 | sys.exit(1)
225 |
226 | # Init inference request IDs
227 | cur_request_id = 0
228 | next_request_id = 1
229 |
230 | # Initialise the class
231 | infer_network = Network()
232 | # Load the network to IE plugin to get shape of input layer
233 | n, c, h, w = infer_network.load_model(args.model, TARGET_DEVICE, 1, 1, 2, args.cpu_extension)[1]
234 |
235 | message_thread = Thread(target=message_runner, args=())
236 | message_thread.setDaemon(True)
237 | message_thread.start()
238 |
239 | if is_async_mode:
240 | print("Application running in async mode...")
241 | else:
242 | print("Application running in sync mode...")
243 |
244 | ret, frame = cap.read()
245 | while ret:
246 |
247 | ret, next_frame = cap.read()
248 | if not ret:
249 | KEEP_RUNNING = False
250 | break
251 |
252 | initial_wh = [cap.get(3), cap.get(4)]
253 |
254 | if next_frame is None:
255 | KEEP_RUNNING = False
256 | log.error("ERROR! blank FRAME grabbed")
257 | break
258 |
259 | # If either default values or negative numbers are given,
260 | # then we will default to start of the FRAME
261 | if roi_x <= 0 or roi_y <= 0:
262 | roi_x = 0
263 | roi_y = 0
264 | if roi_w <= 0:
265 | roi_w = next_frame.shape[1]
266 | if roi_h <= 0:
267 | roi_h = next_frame.shape[0]
268 | key_pressed = cv2.waitKey(1)
269 |
270 | # 'c' key pressed
271 | if key_pressed == 99:
272 | # Give operator chance to change the area
273 | # Select rectangle from left upper corner, dont display crosshair
274 | ROI = cv2.selectROI("Assembly Selection", frame, True, False)
275 | print("Assembly Area Selection: -x = {}, -y = {}, -w = {},"
276 | " -h = {}".format(ROI[0], ROI[1], ROI[2], ROI[3]))
277 | roi_x = ROI[0]
278 | roi_y = ROI[1]
279 | roi_w = ROI[2]
280 | roi_h = ROI[3]
281 | cv2.destroyAllWindows()
282 |
283 | cv2.rectangle(frame, (roi_x, roi_y),
284 | (roi_x + roi_w, roi_y + roi_h), (0, 0, 255), 2)
285 | selected_region = [roi_x, roi_y, roi_w, roi_h]
286 |
287 | in_frame_fd = cv2.resize(next_frame, (w, h))
288 | # Change data layout from HWC to CHW
289 | in_frame_fd = in_frame_fd.transpose((2, 0, 1))
290 | in_frame_fd = in_frame_fd.reshape((n, c, h, w))
291 |
292 | # Start asynchronous inference for specified request.
293 | inf_start = time.time()
294 | if is_async_mode:
295 | # Async enabled and only one video capture
296 | infer_network.exec_net(next_request_id, in_frame_fd)
297 | else:
298 | # Async disabled
299 | infer_network.exec_net(cur_request_id, in_frame_fd)
300 | # Wait for the result
301 | infer_network.wait(cur_request_id)
302 | det_time = time.time() - inf_start
303 | # Results of the output layer of the network
304 | res = infer_network.get_output(cur_request_id)
305 | # Parse SSD output
306 | ssd_out(res, args, initial_wh, selected_region)
307 |
308 | # Draw performance stats
309 | inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
310 | "Inference time: {:.3f} ms".format(det_time * 1000)
311 | render_time_message = "OpenCV rendering time: {:.3f} ms". \
312 | format(render_time * 1000)
313 |
314 | if not INFO.safe:
315 | warning = "HUMAN IN ASSEMBLY AREA: PAUSE THE MACHINE!"
316 | cv2.putText(frame, warning, (15, 100), cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), 2)
317 |
318 | log_message = "Async mode is on." if is_async_mode else \
319 | "Async mode is off."
320 | cv2.putText(frame, log_message, (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
321 | cv2.putText(frame, inf_time_message, (15, 35), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
322 | cv2.putText(frame, render_time_message, (15, 55), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
323 | cv2.putText(frame, "Worker Safe: {}".format(INFO.safe), (15, 75), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
324 |
325 | render_start = time.time()
326 | cv2.imshow("Restricted Zone Notifier", frame)
327 | render_end = time.time()
328 | render_time = render_end - render_start
329 |
330 | frame = next_frame
331 |
332 | if key_pressed == 27:
333 | print("Attempting to stop background threads")
334 | KEEP_RUNNING = False
335 | break
336 | # Tab key pressed
337 | if key_pressed == 9:
338 | is_async_mode = not is_async_mode
339 | print("Switched to {} mode".format("async" if is_async_mode else "sync"))
340 |
341 | if is_async_mode:
342 | # Swap infer request IDs
343 | cur_request_id, next_request_id = next_request_id, cur_request_id
344 |
345 | infer_network.clean()
346 | message_thread.join()
347 | cap.release()
348 | cv2.destroyAllWindows()
349 | CLIENT.disconnect()
350 |
351 |
352 | if __name__ == '__main__':
353 | main()
354 |
355 |
356 |
357 |
358 |
--------------------------------------------------------------------------------
/Jupyter/README.md:
--------------------------------------------------------------------------------
1 | # Restricted Zone Notifier
2 |
3 | | Details | |
4 | |-----------------------|---------------|
5 | | Target OS: | Ubuntu\* 18.04 LTS |
6 | | Programming Language: | Python* 3.5 |
7 | | Time to Complete: | 30 min |
8 |
9 | 
10 |
11 | ## What it does
12 | This application is designed to detect the humans present in a predefined selected assembly line area. If the people enters the marked assembly area, it raises the alert and sends through mqtt. It is intended to demonstrate how to use CV to improve assembly line safety for human operators and factory workers.
13 |
14 | ## Requirements
15 |
16 | ### Hardware
17 |
18 | * 6th to 8th generation Intel® Core™ processors with Iris® Pro graphics or Intel® HD Graphics
19 |
20 | ### Software
21 |
22 | * [Ubuntu\* 18.04 LTS](http://releases.ubuntu.com/18.04/)
23 |
24 | * OpenCL™ Runtime package
25 |
26 | *Note*: We recommend using a 4.14+ kernel to use this software. Run the following command to determine your kernel version:
27 |
28 | uname -a
29 |
30 | * Intel® Distribution of OpenVINO™ toolkit 2020 R3 Release
31 | * Jupyter* Notebook v5.7.0
32 |
33 | ## How It works
34 |
35 | This restricted zone notifier application uses the Inference Engine included in the Intel® Distribution of OpenVINO™ toolkit and the Intel® Deep Learning Deployment Toolkit. A trained neural network detects people within a marked assembly area, which is designed for a machine mounted camera system. It sends an alert if there is at least one person detected in the marked assembly area. The user can select the area coordinates either via command line parameters, or once the application has been started, they can select the region of interest (ROI) by pressing `c` key. This will pause the application, pop up a separate window on which the user can drag the mouse from the upper left ROI corner to whatever the size they require the area to cover. By default the whole frame is selected. Worker safety and alert signal data are sent to a local web server using the Paho MQTT C client libraries.
36 | The program creates two threads for concurrency:
37 |
38 | - Main thread that performs the video i/o, processes video frames using the trained neural network.
39 | - Worker thread that publishes MQTT messages.
40 |
41 | 
42 | **Architectural Diagram**
43 |
44 | ## Setup
45 |
46 | ### Get the code
47 |
48 | Steps to clone the reference implementation:
49 | ```
50 | sudo apt-get update && sudo apt-get install git
51 | git clone https://gitlab.devtools.intel.com/reference-implementations/restricted-zone-notifier-python.git
52 | ```
53 | ### Install Intel® Distribution of OpenVINO™ toolkit
54 |
55 | Refer to https://software.intel.com/en-us/articles/OpenVINO-Install-Linux for more information about how to install and setup the Intel® Distribution of OpenVINO™ toolkit.
56 |
57 | You will need the OpenCL™ Runtime package if you plan to run inference on the GPU. It is not mandatory for CPU inference.
58 |
59 | ### Other dependencies
60 | #### Mosquitto*
61 | Mosquitto is an open source message broker that implements the MQTT protocol. The MQTT protocol provides a lightweight method of carrying out messaging using a publish/subscribe model.
62 |
63 | ### Which model to use
64 | This application uses the [person-detection-retail-0013](https://docs.openvinotoolkit.org/2020.3/_models_intel_person_detection_retail_0013_description_person_detection_retail_0013.html)
65 | Intel® pre-trained model, that can be accessed using the **model downloader**. The **model downloader** downloads the __.xml__ and __.bin__ files that will be used by the application.
66 |
67 | To install the dependencies of the RI and to download the **person-detection-retail-0013** Intel® model, run the following command:
68 |
69 | cd
70 | ./setup.sh
71 |
72 | The model will be downloaded inside the following directory:
73 |
74 | /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/
75 |
76 | ### The Config File
77 |
78 | The _resources/config.json_ contains the path to the videos that will be used by the application.
79 | The _config.json_ file is of the form name/value pair, `video: `
80 |
81 | Example of the _config.json_ file:
82 |
83 | ```
84 | {
85 |
86 | "inputs": [
87 | {
88 | "video": "videos/video1.mp4"
89 | }
90 | ]
91 | }
92 | ```
93 |
94 | ### Which Input video to use
95 |
96 | The application works with any input video. Find sample videos for object detection [here](https://github.com/intel-iot-devkit/sample-videos/).
97 |
98 | For first-use, we recommend using the [worker-zone-detection](https://github.com/intel-iot-devkit/sample-videos/blob/master/worker-zone-detection.mp4) video.The video is automatically downloaded to the `resources/` folder.
99 | For example:
100 | The config.json would be:
101 |
102 | ```
103 | {
104 |
105 | "inputs": [
106 | {
107 | "video": "sample-videos/worker-zone-detection.mp4"
108 | }
109 | ]
110 | }
111 | ```
112 | To use any other video, specify the path in config.json file
113 |
114 | ### Using the Camera instead of video
115 |
116 | Replace the path/to/video in the _resources/config.json_ file with the camera ID, where the ID is taken from the video device (the number X in /dev/videoX).
117 |
118 | On Ubuntu, list all available video devices with the following command:
119 |
120 | ```
121 | ls /dev/video*
122 | ```
123 |
124 | For example, if the output of above command is /dev/video0, then config.json would be::
125 |
126 | ```
127 | {
128 |
129 | "inputs": [
130 | {
131 | "video": "0"
132 | }
133 | ]
134 | }
135 | ```
136 |
137 | ## Setup the environment
138 | You must configure the environment to use the Intel® Distribution of OpenVINO™ toolkit one time per session by running the following command:
139 |
140 | source /opt/intel/openvino/bin/setupvars.sh
141 |
142 | __Note__: This command needs to be executed only once in the terminal where the application will be executed. If the terminal is closed, the command needs to be executed again.
143 |
144 | ## Run the application on Jupyter*
145 |
146 | * Go to the _restricted-zone-notifier-python_ directory and open the Jupyter notebook by running the following command:
147 |
148 | cd /Jupyter
149 |
150 | jupyter notebook
151 |
182 | **Follow the steps to run the code on Jupyter:**
183 |
184 | 
185 |
186 | 1. Click on **New** button on the right side of the Jupyter window.
187 |
188 | 2. Click on **Python 3** option from the drop down list.
189 |
190 | 3. In the first cell type **import os** and press **Shift+Enter** from the keyboard.
191 |
192 | 4. Export the below environment variables in second cell of Jupyter and press **Shift+Enter**.
193 |
194 | %env DEVICE = CPU
195 | %env MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml
196 |
197 | 5. User can set threshold for the detection (PROB_THRESHOLD),
198 | number of seconds between data updates to MQTT server (RATE), also "off-limits" area coordinates (POINTX, POINTY, WIDTH, HEIGHT)
199 | . Export these environment variables as given below if required else skip this step. If user skips this step, these values are set to default values.
200 |
201 | %env PROB_THRESHOLD = 0.4
202 | %env RATE = 1.00
203 | %env POINTX = 429
204 | %env POINTY = 101
205 | %env WIDTH = 619
206 | %env HEIGHT = 690
207 |
208 | To run the application on sync mode, export the environment variable **%env FLAG = sync**. By default, the application runs on async mode.
209 |
210 | 6. Copy the code from **restricted_zone_notifier_jupyter.py** and paste it in the next cell and press **Shift+Enter**.
211 |
212 | 7. Once the video runs, user can select an area to be used as the "off-limits" area by pressing the `c` key. A new window will open showing a still image from the video capture device. Drag the mouse from top left corner to cover an area on the plane and once done (a blue rectangle is drawn) press `ENTER` or `SPACE` to proceed with monitoring.
213 |
214 | 8. Alternatively, code can be run in the following way.
215 |
216 | i. Click on the **restricted_zone_notifier_jupyter.ipynb** file in the Jupyter notebook window.
217 |
218 | ii. Click on the **Kernel** menu and then select **Restart & Run All** from the drop down list.
219 |
220 | iii. Click on Restart and Run All Cells.
221 |
222 | 
223 |
224 | **NOTE:**
225 |
226 | 1. To run the application on **GPU**:
227 | * With the floating point precision 32 (FP32), change the **%env DEVICE = CPU** to **%env DEVICE = GPU**
228 | **FP32**: FP32 is single-precision floating-point arithmetic uses 32 bits to represent numbers. 8 bits for the magnitude and 23 bits for the precision. For more information, [click here](https://en.wikipedia.org/wiki/Single-precision_floating-point_format)
229 |
230 | * With the floating point precision 16 (FP16), change the environment variables as given below:
231 |
232 | %env DEVICE = GPU
233 | %env MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP16/person-detection-retail-0013.xml
234 | **FP16**: FP16 is half-precision floating-point arithmetic uses 16 bits. 5 bits for the magnitude and 10 bits for the precision. For more information, [click here](https://en.wikipedia.org/wiki/Half-precision_floating-point_format)
235 |
236 |
237 | 2. To run the application on **Intel® Neural Compute Stick**:
238 | * Change the **%env DEVICE = CPU** to **%env DEVICE = MYRIAD**
239 | * The Intel® Neural Compute Stick can only run FP16 models. Hence change the environment variable for the model as shown below.
240 |
241 | %env MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP16/person-detection-retail-0013.xml
242 |
243 | 3. To run the application on **Intel® Movidius™ VPU**:
244 | * Change the **%env DEVICE = CPU** to **%env DEVICE = HDDL**
245 | * The Intel® Movidius™ VPU can only run FP16 models. Hence change the environment variable for the model as shown below.
246 |
247 | %env MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP16/person-detection-retail-0013.xml
248 |
249 |
256 |
257 | 5. **%env RATE** should always have float values (e.g. 0.02, 1.00, etc.,).
258 |
259 | 6. To run the application on multiple devices:
260 | For example:
261 | * Change the **%env DEVICE = CPU** to **%env DEVICE = MULTI:CPU,GPU,MYRIAD**
262 | * With the **floating point precision 16 (FP16)**, change the path of the model in the environment variable **MODEL** as given below:
263 | %env MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP16/person-detection-retail-0013.xml
264 |
265 |
266 | ## Machine to Machine Messaging with MQTT
267 |
268 | If you wish to use a MQTT server to publish data, you should set the following environment variables on a terminal before opening the jupyter notebook:
269 |
270 | export MQTT_SERVER=localhost:1883
271 | export MQTT_CLIENT_ID=cvservice
272 |
273 | Change the `MQTT_SERVER` to a value that matches the MQTT server you are connecting to.
274 |
275 | You should change the `MQTT_CLIENT_ID` to a unique value for each monitoring station, so that you can track the data for individual locations. For example:
276 |
277 | export MQTT_CLIENT_ID=zone1337
278 |
279 | If you want to monitor the MQTT messages sent to your local server, and you have the `mosquitto` client utilities installed, you can run the following command in a new terminal while executing the code:
280 |
281 | mosquitto_sub -h localhost -t Restricted_zone_python
282 |
--------------------------------------------------------------------------------
/Jupyter/restricted_zone_notifier_jupyter.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import os"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "%env DEVICE = CPU\n",
19 | "%env MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": null,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "\"\"\"Restricted Zone Notifier.\"\"\"\n",
29 | "\n",
30 | "\"\"\"\n",
31 | " Copyright (c) 2018 Intel Corporation.\n",
32 | "\n",
33 | " Permission is hereby granted, free of charge, to any person obtaining\n",
34 | " a copy of this software and associated documentation files (the\n",
35 | " \"Software\"), to deal in the Software without restriction, including\n",
36 | " without limitation the rights to use, copy, modify, merge, publish,\n",
37 | " distribute, sublicense, and/or sell copies of the Software, and to\n",
38 | " permit person to whom the Software is furnished to do so, subject to\n",
39 | " the following conditions:\n",
40 | "\n",
41 | " The above copyright notice and this permission notice shall be\n",
42 | " included in all copies or substantial portions of the Software.\n",
43 | "\n",
44 | " THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n",
45 | " EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n",
46 | " MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n",
47 | " NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n",
48 | " LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n",
49 | " OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n",
50 | " WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n",
51 | "\n",
52 | "\"\"\"\n",
53 | "\n",
54 | "import os\n",
55 | "import sys\n",
56 | "import json\n",
57 | "import time\n",
58 | "import socket\n",
59 | "import cv2\n",
60 | "\n",
61 | "import logging as log\n",
62 | "import paho.mqtt.client as mqtt\n",
63 | "\n",
64 | "from threading import Thread\n",
65 | "from collections import namedtuple\n",
66 | "from argparse import ArgumentParser\n",
67 | "from inference import Network\n",
68 | "\n",
69 | "# Assemblyinfo contains information about assembly area\n",
70 | "MyStruct = namedtuple(\"assemblyinfo\", \"safe\")\n",
71 | "INFO = MyStruct(True)\n",
72 | "\n",
73 | "# Assemblyinfo contains information about assembly line\n",
74 | "MyStruct = namedtuple(\"assemblyinfo\", \"safe, alert\")\n",
75 | "INFO = MyStruct(True, False)\n",
76 | "\n",
77 | "# MQTT server environment variables\n",
78 | "HOSTNAME = socket.gethostname()\n",
79 | "IPADDRESS = socket.gethostbyname(HOSTNAME)\n",
80 | "TOPIC = \"Restricted_zone_python\"\n",
81 | "MQTT_HOST = IPADDRESS\n",
82 | "MQTT_PORT = 1883\n",
83 | "MQTT_KEEPALIVE_INTERVAL = 60\n",
84 | "\n",
85 | "# Global variables\n",
86 | "accepted_devices = ['CPU', 'GPU', 'MYRIAD', 'HETERO:FPGA,CPU', 'HDDL']\n",
87 | "TARGET_DEVICE = 'CPU'\n",
88 | "is_async_mode = True\n",
89 | "CONFIG_FILE = '../resources/config.json'\n",
90 | "\n",
91 | "# Flag to control background thread\n",
92 | "KEEP_RUNNING = True\n",
93 | "\n",
94 | "DELAY = 5\n",
95 | "\n",
96 | "\n",
97 | "def ssd_out(res, initial_wh, selected_region):\n",
98 | " \"\"\"\n",
99 | " Parse SSD output.\n",
100 | "\n",
101 | " :param res: Detection results\n",
102 | " :param args: Parsed arguments\n",
103 | " :param initial_wh: Initial width and height of the frame\n",
104 | " :param selected_region: Selected region coordinates\n",
105 | " :return: None\n",
106 | " \"\"\"\n",
107 | "\n",
108 | " global INFO\n",
109 | " person = []\n",
110 | " INFO = INFO._replace(safe=True)\n",
111 | " INFO = INFO._replace(alert=False)\n",
112 | "\n",
113 | " for obj in res[0][0]:\n",
114 | " # Draw only objects when probability more than specified threshold\n",
115 | " if obj[2] > prob_threshold:\n",
116 | " xmin = int(obj[3] * initial_wh[0])\n",
117 | " ymin = int(obj[4] * initial_wh[1])\n",
118 | " xmax = int(obj[5] * initial_wh[0])\n",
119 | " ymax = int(obj[6] * initial_wh[1])\n",
120 | " person.append([xmin, ymin, xmax, ymax])\n",
121 | "\n",
122 | " for p in person:\n",
123 | " # area_of_person gives area of the detected person\n",
124 | " area_of_person = (p[2] - p[0]) * (p[3] - p[1])\n",
125 | " x_max = max(p[0], selected_region[0])\n",
126 | " x_min = min(p[2], selected_region[0] + selected_region[2])\n",
127 | " y_min = min(p[3], selected_region[1] + selected_region[3])\n",
128 | " y_max = max(p[1], selected_region[1])\n",
129 | " point_x = x_min - x_max\n",
130 | " point_y = y_min - y_max\n",
131 | " # area_of_intersection gives area of intersection of the\n",
132 | " # detected person and the selected area\n",
133 | " area_of_intersection = point_x * point_y\n",
134 | " if point_x < 0 or point_y < 0:\n",
135 | " continue\n",
136 | " else:\n",
137 | " if area_of_person > area_of_intersection:\n",
138 | " # assembly line area flags\n",
139 | " INFO = INFO._replace(safe=True)\n",
140 | " INFO = INFO._replace(alert=False)\n",
141 | "\n",
142 | " else:\n",
143 | " # assembly line area flags\n",
144 | " INFO = INFO._replace(safe=False)\n",
145 | " INFO = INFO._replace(alert=True)\n",
146 | "\n",
147 | "\n",
148 | "def message_runner():\n",
149 | " \"\"\"\n",
150 | " Publish worker status to MQTT topic.\n",
151 | " Pauses for rate second(s) between updates\n",
152 | "\n",
153 | " :return: None\n",
154 | " \"\"\"\n",
155 | " while KEEP_RUNNING:\n",
156 | " s = json.dumps({\"Worker safe\": INFO.safe, \"Alert\": INFO.alert})\n",
157 | " time.sleep(rate)\n",
158 | " CLIENT.publish(TOPIC, payload=s)\n",
159 | "\n",
160 | "\n",
161 | "def main():\n",
162 | " \"\"\"\n",
163 | " Load the network and parse the output.\n",
164 | "\n",
165 | " :return: None\n",
166 | " \"\"\"\n",
167 | " global CLIENT\n",
168 | " global KEEP_RUNNING\n",
169 | " global DELAY\n",
170 | " global SIG_CAUGHT\n",
171 | " global prob_threshold\n",
172 | " global rate\n",
173 | " global TARGET_DEVICE\n",
174 | " global is_async_mode\n",
175 | "\n",
176 | " CLIENT = mqtt.Client()\n",
177 | " CLIENT.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)\n",
178 | " CLIENT.subscribe(TOPIC)\n",
179 | " \n",
180 | " try:\n",
181 | " pointx = int(os.environ['POINTX'])\n",
182 | " pointy = int(os.environ['POINTY'])\n",
183 | " width = int(os.environ['WIDTH'])\n",
184 | " height = int(os.environ['HEIGHT'])\n",
185 | " except KeyError:\n",
186 | " pointx = 0\n",
187 | " pointy = 0 \n",
188 | " width = 0\n",
189 | " height = 0 \n",
190 | " try:\n",
191 | " # Number of seconds between data updates to MQTT server\n",
192 | " rate = float(os.environ['RATE'])\n",
193 | " except KeyError:\n",
194 | " rate = 1 \n",
195 | " try:\n",
196 | " # Probability threshold for detections filtering\n",
197 | " prob_threshold = float(os.environ['PROB_THRESHOLD'])\n",
198 | " except KeyError:\n",
199 | " prob_threshold = 0.7\n",
200 | "\n",
201 | " if 'DEVICE' in os.environ.keys():\n",
202 | " TARGET_DEVICE = os.environ['DEVICE']\n",
203 | "\n",
204 | " if 'MULTI' not in TARGET_DEVICE and TARGET_DEVICE not in accepted_devices:\n",
205 | " print(\"Unsupported device: \" + TARGET_DEVICE)\n",
206 | " sys.exit(2)\n",
207 | " elif 'MULTI' in TARGET_DEVICE:\n",
208 | " target_devices = TARGET_DEVICE.split(':')[1].split(',')\n",
209 | " for multi_device in target_devices:\n",
210 | " if multi_device not in accepted_devices:\n",
211 | " print(\"Unsupported device: \" + TARGET_DEVICE)\n",
212 | " sys.exit(2)\n",
213 | "\n",
214 | " cpu_extension = os.environ['CPU_EXTENSION'] if 'CPU_EXTENSION' in os.environ.keys() else None\n",
215 | "\n",
216 | " model = os.environ[\"MODEL\"]\n",
217 | " if 'FLAG' in os.environ.keys():\n",
218 | " async_mode = os.environ['FLAG']\n",
219 | " if async_mode == \"sync\":\n",
220 | " is_async_mode = False\n",
221 | " else:\n",
222 | " is_async_mode = True\n",
223 | "\n",
224 | " log.basicConfig(format=\"[ %(levelname)s ] %(message)s\",\n",
225 | " level=log.INFO, stream=sys.stdout)\n",
226 | " logger = log.getLogger()\n",
227 | " render_time = 0\n",
228 | " roi_x = pointx\n",
229 | " roi_y = pointy\n",
230 | " roi_w = width\n",
231 | " roi_h = height\n",
232 | "\n",
233 | " assert os.path.isfile(CONFIG_FILE), \"{} file doesn't exist\".format(CONFIG_FILE)\n",
234 | " config = json.loads(open(CONFIG_FILE).read())\n",
235 | "\n",
236 | " for idx, item in enumerate(config['inputs']):\n",
237 | " if item['video'].isdigit():\n",
238 | " input_stream = int(item['video'])\n",
239 | " else:\n",
240 | " input_stream = item['video']\n",
241 | "\n",
242 | " cap = cv2.VideoCapture(input_stream)\n",
243 | "\n",
244 | " if not cap.isOpened():\n",
245 | " logger.error(\"ERROR! Unable to open video source\")\n",
246 | " sys.exit(1)\n",
247 | "\n",
248 | " # Init inference request IDs\n",
249 | " cur_request_id = 0\n",
250 | " next_request_id = 1\n",
251 | "\n",
252 | " # Initialise the class\n",
253 | " infer_network = Network()\n",
254 | " # Load the network to IE plugin to get shape of input layer\n",
255 | " n, c, h, w = infer_network.load_model(model, TARGET_DEVICE, 1, 1, 2, cpu_extension)[1]\n",
256 | "\n",
257 | " message_thread = Thread(target=message_runner, args=())\n",
258 | " message_thread.setDaemon(True)\n",
259 | " message_thread.start()\n",
260 | "\n",
261 | " if is_async_mode:\n",
262 | " print(\"Application running in async mode...\")\n",
263 | " else:\n",
264 | " print(\"Application running in sync mode...\")\n",
265 | "\n",
266 | " ret, frame = cap.read()\n",
267 | " while ret:\n",
268 | "\n",
269 | " ret, next_frame = cap.read()\n",
270 | " if not ret:\n",
271 | " KEEP_RUNNING = False\n",
272 | " break\n",
273 | "\n",
274 | " initial_wh = [cap.get(3), cap.get(4)]\n",
275 | "\n",
276 | " if next_frame is None:\n",
277 | " KEEP_RUNNING = False\n",
278 | " log.error(\"ERROR! blank FRAME grabbed\")\n",
279 | " break\n",
280 | "\n",
281 | " # If either default values or negative numbers are given,\n",
282 | " # then we will default to start of the FRAME\n",
283 | " if roi_x <= 0 or roi_y <= 0:\n",
284 | " roi_x = 0\n",
285 | " roi_y = 0\n",
286 | " if roi_w <= 0:\n",
287 | " roi_w = next_frame.shape[1]\n",
288 | " if roi_h <= 0:\n",
289 | " roi_h = next_frame.shape[0]\n",
290 | " key_pressed = cv2.waitKey(1)\n",
291 | "\n",
292 | " # 'c' key pressed\n",
293 | " if key_pressed == 99:\n",
294 | " # Give operator chance to change the area\n",
295 | " # Select rectangle from left upper corner, dont display crosshair\n",
296 | " ROI = cv2.selectROI(\"Assembly Selection\", frame, True, False)\n",
297 | " print(\"Assembly Area Selection: -x = {}, -y = {}, -w = {},\"\n",
298 | " \" -h = {}\".format(ROI[0], ROI[1], ROI[2], ROI[3]))\n",
299 | " roi_x = ROI[0]\n",
300 | " roi_y = ROI[1]\n",
301 | " roi_w = ROI[2]\n",
302 | " roi_h = ROI[3]\n",
303 | " cv2.destroyAllWindows()\n",
304 | "\n",
305 | " cv2.rectangle(frame, (roi_x, roi_y),\n",
306 | " (roi_x + roi_w, roi_y + roi_h), (0, 0, 255), 2)\n",
307 | " selected_region = [roi_x, roi_y, roi_w, roi_h]\n",
308 | "\n",
309 | " in_frame_fd = cv2.resize(next_frame, (w, h))\n",
310 | " # Change data layout from HWC to CHW\n",
311 | " in_frame_fd = in_frame_fd.transpose((2, 0, 1))\n",
312 | " in_frame_fd = in_frame_fd.reshape((n, c, h, w))\n",
313 | "\n",
314 | " # Start asynchronous inference for specified request.\n",
315 | " inf_start = time.time()\n",
316 | " if is_async_mode:\n",
317 | " # Async enabled and only one video capture\n",
318 | " infer_network.exec_net(next_request_id, in_frame_fd)\n",
319 | " else:\n",
320 | " # Async disabled\n",
321 | " infer_network.exec_net(cur_request_id, in_frame_fd)\n",
322 | " # Wait for the result\n",
323 | " infer_network.wait(cur_request_id)\n",
324 | " det_time = time.time() - inf_start\n",
325 | " # Results of the output layer of the network\n",
326 | " res = infer_network.get_output(cur_request_id)\n",
327 | " # Parse SSD output\n",
328 | " ssd_out(res, initial_wh, selected_region)\n",
329 | "\n",
330 | " # Draw performance stats\n",
331 | " inf_time_message = \"Inference time: N\\A for async mode\" if is_async_mode else \\\n",
332 | " \"Inference time: {:.3f} ms\".format(det_time * 1000)\n",
333 | " render_time_message = \"OpenCV rendering time: {:.3f} ms\". \\\n",
334 | " format(render_time * 1000)\n",
335 | "\n",
336 | " if not INFO.safe:\n",
337 | " warning = \"HUMAN IN ASSEMBLY AREA: PAUSE THE MACHINE!\"\n",
338 | " cv2.putText(frame, warning, (15, 100), cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), 2)\n",
339 | "\n",
340 | " log_message = \"Async mode is on.\" if is_async_mode else \\\n",
341 | " \"Async mode is off.\"\n",
342 | " cv2.putText(frame, log_message, (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)\n",
343 | " cv2.putText(frame, inf_time_message, (15, 35), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)\n",
344 | " cv2.putText(frame, render_time_message, (15, 55), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)\n",
345 | " cv2.putText(frame, \"Worker Safe: {}\".format(INFO.safe), (15, 75), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)\n",
346 | "\n",
347 | " render_start = time.time()\n",
348 | " cv2.imshow(\"Restricted Zone Notifier\", frame)\n",
349 | " render_end = time.time()\n",
350 | " render_time = render_end - render_start\n",
351 | "\n",
352 | " frame = next_frame\n",
353 | "\n",
354 | " if key_pressed == 27:\n",
355 | " print(\"Attempting to stop background threads\")\n",
356 | " KEEP_RUNNING = False\n",
357 | " break\n",
358 | " # Tab key pressed\n",
359 | " if key_pressed == 9:\n",
360 | " is_async_mode = not is_async_mode\n",
361 | " print(\"Switched to {} mode\".format(\"async\" if is_async_mode else \"sync\"))\n",
362 | "\n",
363 | " if is_async_mode:\n",
364 | " # Swap infer request IDs\n",
365 | " cur_request_id, next_request_id = next_request_id, cur_request_id\n",
366 | "\n",
367 | " infer_network.clean()\n",
368 | " message_thread.join()\n",
369 | " cap.release()\n",
370 | " cv2.destroyAllWindows()\n",
371 | " CLIENT.disconnect()\n",
372 | "\n",
373 | "\n",
374 | "if __name__ == '__main__':\n",
375 | " main()\n",
376 | "\n"
377 | ]
378 | },
379 | {
380 | "cell_type": "code",
381 | "execution_count": null,
382 | "metadata": {},
383 | "outputs": [],
384 | "source": []
385 | }
386 | ],
387 | "metadata": {
388 | "kernelspec": {
389 | "display_name": "Python 3",
390 | "language": "python",
391 | "name": "python3"
392 | },
393 | "language_info": {
394 | "codemirror_mode": {
395 | "name": "ipython",
396 | "version": 3
397 | },
398 | "file_extension": ".py",
399 | "mimetype": "text/x-python",
400 | "name": "python",
401 | "nbconvert_exporter": "python",
402 | "pygments_lexer": "ipython3",
403 | "version": "3.5.2"
404 | }
405 | },
406 | "nbformat": 4,
407 | "nbformat_minor": 2
408 | }
409 |
--------------------------------------------------------------------------------