├── docs
└── images
│ ├── grafana1.png
│ ├── grafana2.png
│ ├── jupyter.png
│ ├── jupyter_code1.png
│ ├── jupyter_code2.png
│ ├── retail-analytics.png
│ └── architectural-diagram.png
├── docker
├── images
│ ├── grafana1.png
│ ├── grafana2.png
│ └── grafana3.png
├── DockerGrafanaInfluxKit
│ ├── influxdb
│ │ ├── Dockerfile
│ │ └── entrypoint.sh
│ ├── grafana
│ │ ├── datasources
│ │ │ └── influx.json
│ │ ├── Dockerfile
│ │ └── entrypoint.sh
│ └── configuration.env
├── README.md
└── DockerOpenvino
│ └── Dockerfile
├── templates
└── index.html
├── resources
├── labels.txt
└── config.json
├── Dockerfile
├── LICENSE
├── docker-compose.yml
├── setup.sh
├── Jupyter
├── inference.py
├── README.md
└── smart_retail_analytics_jupyter.py
├── application
├── inference.py
└── smart_retail_analytics.py
├── README.md
└── retail-analytics.json
/docs/images/grafana1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/smart-retail-analytics/HEAD/docs/images/grafana1.png
--------------------------------------------------------------------------------
/docs/images/grafana2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/smart-retail-analytics/HEAD/docs/images/grafana2.png
--------------------------------------------------------------------------------
/docs/images/jupyter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/smart-retail-analytics/HEAD/docs/images/jupyter.png
--------------------------------------------------------------------------------
/docker/images/grafana1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/smart-retail-analytics/HEAD/docker/images/grafana1.png
--------------------------------------------------------------------------------
/docker/images/grafana2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/smart-retail-analytics/HEAD/docker/images/grafana2.png
--------------------------------------------------------------------------------
/docker/images/grafana3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/smart-retail-analytics/HEAD/docker/images/grafana3.png
--------------------------------------------------------------------------------
/docs/images/jupyter_code1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/smart-retail-analytics/HEAD/docs/images/jupyter_code1.png
--------------------------------------------------------------------------------
/docs/images/jupyter_code2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/smart-retail-analytics/HEAD/docs/images/jupyter_code2.png
--------------------------------------------------------------------------------
/docs/images/retail-analytics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/smart-retail-analytics/HEAD/docs/images/retail-analytics.png
--------------------------------------------------------------------------------
/docs/images/architectural-diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel-iot-devkit/smart-retail-analytics/HEAD/docs/images/architectural-diagram.png
--------------------------------------------------------------------------------
/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
 }})
5 |
6 |
7 |
--------------------------------------------------------------------------------
/docker/DockerGrafanaInfluxKit/influxdb/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM influxdb:1.3.1-alpine
2 |
3 | WORKDIR /app
4 | COPY entrypoint.sh ./
5 | RUN chmod u+x entrypoint.sh
6 |
7 | ENTRYPOINT ["/app/entrypoint.sh"]
8 |
--------------------------------------------------------------------------------
/resources/labels.txt:
--------------------------------------------------------------------------------
1 | plane
2 | bicycle
3 | bird
4 | boat
5 | bottle
6 | bus
7 | car
8 | cat
9 | chair
10 | cow
11 | table
12 | dog
13 | horse
14 | motorcycle
15 | person
16 | plant
17 | sheep
18 | sofa
19 | train
20 | monitor
21 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:16.04
2 |
3 | ADD . /app
4 | WORKDIR /app
5 | ENV http_proxy 'http://proxy-iind.intel.com:911'
6 | ENV https_proxy 'http://proxy-iind.intel.com:911'
7 | RUN apt-get update
8 | RUN apt-get install -y python3-pip
9 | RUN pip3 install numpy requests influxdb flask pyyaml pathlib
10 |
--------------------------------------------------------------------------------
/docker/DockerGrafanaInfluxKit/grafana/datasources/influx.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Retail_Analytics",
3 | "type": "influxdb",
4 | "url": "http://influxdb:8086",
5 | "access": "proxy",
6 | "user": "$INFLUX_USER",
7 | "password": "$INFLUX_PASSWORD",
8 | "database": "$INFLUX_DB",
9 | "basicAuth": false
10 | }
11 |
--------------------------------------------------------------------------------
/docker/DockerGrafanaInfluxKit/configuration.env:
--------------------------------------------------------------------------------
1 | ###################
2 | # Grafana options
3 | ###################
4 |
5 | GF_SECURITY_ADMIN_USER=admin
6 | GF_SECURITY_ADMIN_PASSWORD=admin
7 | GF_INSTALL_PLUGINS=ryantxu-ajax-panel
8 |
9 | ####################
10 | # InfluxDB options
11 | ####################
12 |
13 | INFLUX_USER=admin
14 | INFLUX_PASSWORD=admin
15 | INFLUX_DB=Retail_Analytics
16 | INFLUXDB_HTTP_ENABLED=true
17 |
--------------------------------------------------------------------------------
/docker/DockerGrafanaInfluxKit/grafana/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM grafana/grafana:5.3.1
2 |
3 | USER root
4 | ENV http_proxy 'http://proxy-iind.intel.com:911'
5 | ENV https_proxy 'http://proxy-iind.intel.com:911'
6 | RUN apt-get update && apt-get install -y curl gettext-base && rm -rf /var/lib/apt/lists/*
7 |
8 | WORKDIR /etc/grafana
9 | COPY datasources ./datasources
10 |
11 | WORKDIR /app
12 | COPY entrypoint.sh ./
13 | RUN chmod u+x entrypoint.sh
14 |
15 | ENTRYPOINT ["/app/entrypoint.sh"]
16 |
--------------------------------------------------------------------------------
/resources/config.json:
--------------------------------------------------------------------------------
1 | {
2 |
3 | "inputs": [
4 |
5 | {
6 | "video": "../resources/head-pose-face-detection-female.mp4",
7 | "type": "shopper"
8 | },
9 | {
10 | "video": "../resources/bottle-detection.mp4",
11 | "label": "bottle",
12 | "type": "shelf"
13 | },
14 | {
15 | "video": "../resources/face-demographics-walking.mp4",
16 | "type": "traffic"
17 | }
18 | ]
19 |
20 | }
--------------------------------------------------------------------------------
/docker/DockerGrafanaInfluxKit/influxdb/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | if [ ! -f "/var/lib/influxdb/.init" ]; then
4 | exec influxd $@ &
5 |
6 | until wget -q "http://localhost:8086/ping" 2> /dev/null; do
7 | sleep 1
8 | done
9 |
10 | influx -host=localhost -port=8086 -execute="CREATE USER ${INFLUX_USER} WITH PASSWORD '${INFLUX_PASSWORD}' WITH ALL PRIVILEGES"
11 | influx -host=localhost -port=8086 -execute="CREATE DATABASE ${INFLUX_DB}"
12 |
13 | touch "/var/lib/influxdb/.init"
14 |
15 | kill -s TERM %1
16 | fi
17 |
18 | exec influxd $@
19 |
--------------------------------------------------------------------------------
/docker/DockerGrafanaInfluxKit/grafana/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | url="http://$GF_SECURITY_ADMIN_USER:$GF_SECURITY_ADMIN_PASSWORD@localhost:3000"
4 |
5 | post() {
6 | curl -s -X POST -d "$1" \
7 | -H 'Content-Type: application/json;charset=UTF-8' \
8 | "$url$2" 2> /dev/null
9 | }
10 |
11 | if [ ! -f "/var/lib/grafana/.init" ]; then
12 | exec /run.sh $@ &
13 |
14 | until curl -s "$url/api/datasources" 2> /dev/null; do
15 | sleep 1
16 | done
17 |
18 | for datasource in /etc/grafana/datasources/*; do
19 | post "$(envsubst < $datasource)" "/api/datasources"
20 | done
21 |
22 | touch "/var/lib/grafana/.init"
23 |
24 | kill $(pgrep grafana)
25 | fi
26 |
27 | exec /run.sh $@
28 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2021, Intel Corporation
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.2'
2 |
3 | services:
4 | influxdb:
5 | build: ./docker/DockerGrafanaInfluxKit/influxdb
6 | env_file: ./docker/DockerGrafanaInfluxKit/configuration.env
7 | hostname: influxdb
8 | networks:
9 | - network
10 | ports:
11 | - '8086:8086'
12 | volumes:
13 | - influxdb_data:/var/lib/influxdb
14 |
15 | grafana:
16 | networks:
17 | - network
18 | build: ./docker/DockerGrafanaInfluxKit/grafana
19 | env_file: ./docker/DockerGrafanaInfluxKit/configuration.env
20 | links:
21 | - influxdb
22 | ports:
23 | - '3000:3000'
24 | volumes:
25 | - grafana_data:/var/lib/grafana
26 | depends_on:
27 | - influxdb
28 |
29 | retail-analytics:
30 | image: retail-analytics:latest
31 | ports:
32 | - "5000:5000"
33 | container_name: retail-analytics
34 | networks:
35 | - network
36 | depends_on:
37 | - influxdb
38 | - grafana
39 | command: ["bash", "-c", "source /opt/intel/openvino/bin/setupvars.sh && cd application && python3 smart_retail_analytics.py -fm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/face-detection-adas-0001/FP32/face-detection-adas-0001.xml -pm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml -mm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml -om ../resources/FP32/mobilenet-ssd.xml -pr /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0002/FP32/person-detection-retail-0002.xml -lb ../resources/labels.txt -ip influxdb"]
40 |
41 | volumes:
42 | grafana_data: {}
43 | influxdb_data: {}
44 |
45 | networks:
46 | network:
47 | driver: "bridge"
48 |
49 |
--------------------------------------------------------------------------------
/docker/README.md:
--------------------------------------------------------------------------------
1 | # Containerization
2 |
3 | ## Install docker
4 | To install docker, please refer the link: [https://docs.docker.com/install/linux/docker-ce/ubuntu/](https://docs.docker.com/install/linux/docker-ce/ubuntu/)
5 |
6 | ### Install docker-compose
7 | Install docker-compose using the commands below:
8 | ```
9 | sudo curl -L "https://github.com/docker/compose/releases/download/1.24.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
10 | sudo chmod +x /usr/local/bin/docker-compose
11 | ```
12 |
13 | ## Containerize the Application
14 | The application has three parts:
15 | * OpenVINO application
16 | * InfluxDB
17 | * Grafana
18 |
19 | Each part of the application will run in separate container.
20 |
21 |
22 | To containerize the application:
23 | 1. Go to the smart-retail-analytics-python directory.
24 |
25 | ```
26 | cd
27 | ```
28 |
29 | 2. Build the docker image with the name __retail-analytics__.
30 | ```
31 | docker build -t retail-analytics -f docker/DockerOpenvino/Dockerfile .
32 | ```
33 |
34 | 3. To run the retail-analytics container with influxdb and grafana containers. Run the below command:
35 | ```
36 | docker-compose up
37 | ```
38 | * Docker Compose tool is used to define and run multi-container docker application
39 |
40 | 4. To see the output of the application running in the container, configure the Grafana dashboard.
41 |
42 | * In your browser, go to [localhost:3000](http://localhost:3000).
43 |
44 | * Log in with user as **admin** and password as **admin**.
45 |
46 | * Click on **Configuration**.
47 |
48 | * Select **“Data Sources”**.
49 |
50 | * Click on **“+ Add data source”** and provide inputs below.
51 |
52 | - *Name*: Retail_Analytics
53 | - *Type*: InfluxDB
54 | - *URL*: http://influxdb:8086
55 | - *Database*: Retail_Analytics
56 | - Click on “Save and Test”
57 |
58 | 
59 |
60 | * Click on **+** icon present on the left side of the browser, select **import**.
61 |
62 | * Click on **Upload.json File**.
63 |
64 | * Select the file name __retail-analytics.json__ from smart-retail-analytics-python directory.
65 |
66 | * Select "Retail_Analytics" in **Select a influxDB data source**.
67 |
68 | 
69 |
70 | * Click on import.
71 |
72 | 
73 |
--------------------------------------------------------------------------------
/docker/DockerOpenvino/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:16.04
2 |
3 | ADD . /app
4 | WORKDIR /app
5 | ENV http_proxy 'http://proxy-iind.intel.com:911'
6 | ENV https_proxy 'http://proxy-iind.intel.com:911'
7 |
8 | ARG INSTALL_DIR=/opt/intel/openvino
9 | ARG OPENVINO=l_openvino_toolkit_p_2020.3.194.tgz
10 | ARG COUNTRY=1
11 | ARG IMAGE=mssi
12 | ARG VERSION=1.0
13 |
14 | RUN apt-get update && apt-get -y upgrade && apt-get autoremove
15 |
16 | #Install needed dependences
17 | RUN apt-get install -y --no-install-recommends \
18 | build-essential \
19 | cpio \
20 | curl \
21 | git \
22 | lsb-release \
23 | pciutils \
24 | python3.5 \
25 | python3.5-dev \
26 | python3-pip \
27 | python3-setuptools \
28 | wget \
29 | sudo
30 |
31 | #Upgrading the pip version
32 | RUN pip3 install --upgrade pip
33 |
34 | #Downloading Openvino toolkit
35 | RUN cd /app && \
36 | wget -O $OPENVINO http://registrationcenter-download.intel.com/akdlm/irc_nas/16670/$OPENVINO
37 |
38 | RUN tar -xvzf $OPENVINO
39 |
40 | # Installing OpenVINO dependencies
41 | RUN cd /app/l_openvino_toolkit* && \
42 | ./install_openvino_dependencies.sh
43 |
44 | RUN pip3 install numpy requests influxdb flask pyyaml pathlib
45 |
46 | # Installing OpenVINO itself
47 | RUN cd /app/l_openvino_toolkit* && \
48 | sed -i 's/decline/accept/g' silent.cfg && \
49 | ./install.sh --silent silent.cfg
50 |
51 | RUN /bin/bash -c "source $INSTALL_DIR/bin/setupvars.sh"
52 |
53 | RUN echo "source $INSTALL_DIR/bin/setupvars.sh" >> /root/.bashrc
54 |
55 | RUN cd $INSTALL_DIR/deployment_tools/model_optimizer/install_prerequisites/ && \
56 | ./install_prerequisites.sh && \
57 | ./install_prerequisites_caffe.sh
58 |
59 | RUN cd $INSTALL_DIR/deployment_tools/tools/model_downloader && \
60 | ./downloader.py --name face-detection-adas-0001 && \
61 | ./downloader.py --name head-pose-estimation-adas-0001 && \
62 | ./downloader.py --name emotions-recognition-retail-0003 && \
63 | ./downloader.py --name person-detection-retail-0002 && \
64 | ./downloader.py --name mobilenet-ssd
65 |
66 | RUN cd $INSTALL_DIR/deployment_tools/model_optimizer/ && \
67 | ./mo_caffe.py --input_model $INSTALL_DIR/deployment_tools/open_model_zoo/tools/downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel -o /app/resources/FP32 --data_type FP32 --scale 256 --mean_values [127,127,127] && \
68 | ./mo_caffe.py --input_model $INSTALL_DIR/deployment_tools/open_model_zoo/tools/downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel -o /app/resources/FP16 --data_type FP16 --scale 256 --mean_values [127,127,127]
69 |
70 | RUN cd /app/resources && \
71 | wget -O face-demographics-walking.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/face-demographics-walking.mp4 && \
72 | wget -O bottle-detection.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/bottle-detection.mp4 && \
73 | wget -O head-pose-face-detection-female.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/head-pose-face-detection-female.mp4
74 |
75 | CMD ["/bin/bash"]
76 |
--------------------------------------------------------------------------------
/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Copyright (c) 2018 Intel Corporation.
4 | # Permission is hereby granted, free of charge, to any person obtaining
5 | # a copy of this software and associated documentation files (the
6 | # "Software"), to deal in the Software without restriction, including
7 | # without limitation the rights to use, copy, modify, merge, publish,
8 | # distribute, sublicense, and/or sell copies of the Software, and to
9 | # permit persons to whom the Software is furnished to do so, subject to
10 | # the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be
13 | # included in all copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 | # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 | # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 | # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19 | # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20 | # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21 | # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 |
23 | #Install the dependencies
24 | sudo apt-get update
25 | sudo apt install curl
26 | sudo curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add -
27 | source /etc/lsb-release
28 | echo "deb https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list
29 | sudo apt-get install influxdb
30 | sudo service influxdb start
31 | wget -O grafana_5.3.2_amd64.deb https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.3.2_amd64.deb
32 | sudo apt-get install -y adduser libfontconfig
33 | sudo dpkg -i grafana_5.3.2_amd64.deb
34 | sudo /bin/systemctl start grafana-server
35 | sudo grafana-cli plugins install ryantxu-ajax-panel
36 | sudo apt-get install python3-pip
37 | sudo pip3 install influxdb numpy flask jupyter
38 | sudo pip3 install networkx
39 | BASE_DIR=`pwd`
40 |
41 | #Download the videos
42 | cd resources
43 | wget -O face-demographics-walking.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/face-demographics-walking.mp4
44 | wget -O bottle-detection.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/bottle-detection.mp4
45 | wget -O head-pose-face-detection-female.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/head-pose-face-detection-female.mp4
46 |
47 | #Download the models
48 | cd /opt/intel/openvino/deployment_tools/tools/model_downloader
49 | sudo ./downloader.py --name face-detection-adas-0001
50 | sudo ./downloader.py --name head-pose-estimation-adas-0001
51 | sudo ./downloader.py --name emotions-recognition-retail-0003
52 | sudo ./downloader.py --name person-detection-retail-0002
53 | sudo ./downloader.py --name mobilenet-ssd
54 |
55 | #Optimize the model
56 | cd /opt/intel/openvino/deployment_tools/model_optimizer/
57 | ./mo_caffe.py --input_model /opt/intel/openvino/deployment_tools/tools/model_downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel -o $BASE_DIR/resources/FP32 --data_type FP32 --scale 256 --mean_values [127,127,127]
58 | ./mo_caffe.py --input_model /opt/intel/openvino/deployment_tools/tools/model_downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel -o $BASE_DIR/resources/FP16 --data_type FP16 --scale 256 --mean_values [127,127,127]
59 |
--------------------------------------------------------------------------------
/Jupyter/inference.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Copyright (c) 2018 Intel Corporation.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining
6 | a copy of this software and associated documentation files (the
7 | "Software"), to deal in the Software without restriction, including
8 | without limitation the rights to use, copy, modify, merge, publish,
9 | distribute, sublicense, and/or sell copies of the Software, and to
10 | permit persons to whom the Software is furnished to do so, subject to
11 | the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be
14 | included in all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 | """
24 |
25 | import os
26 | import sys
27 | import logging as log
28 | from openvino.inference_engine import IENetwork, IECore
29 |
30 |
31 | log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
32 | logger = log.getLogger()
33 |
34 |
35 | class Network:
36 | """
37 | Load and configure inference plugins for the specified target devices
38 | and performs synchronous and asynchronous modes for the specified infer requests.
39 | """
40 |
41 | def __init__(self):
42 | self.net = None
43 | self.plugin = None
44 | self.input_blob = None
45 | self.out_blob = None
46 | self.net_plugin = None
47 | self.infer_request_handle = None
48 |
49 | def load_model(self, model, device, input_size, output_size, num_requests,
50 | cpu_extension=None, tag=None, plugin=None):
51 | """
52 | Loads a network and an image to the Inference Engine plugin.
53 | :param model: .xml file of pre trained model
54 | :param cpu_extension: extension for the CPU device
55 | :param device: Target device
56 | :param input_size: Number of input layers
57 | :param output_size: Number of output layers
58 | :param num_requests: Index of Infer request value. Limited to device capabilities.
59 | :param plugin: Plugin for specified device
60 | :return: Shape of input layer
61 | """
62 |
63 | model_xml = model
64 | model_bin = os.path.splitext(model_xml)[0] + ".bin"
65 | # Plugin initialization for specified device
66 | # and load extensions library if specified
67 | if not plugin:
68 | log.info("Initializing plugin for {} device...".format(device))
69 | self.plugin = IECore()
70 | else:
71 | self.plugin = plugin
72 |
73 | if cpu_extension and 'CPU' in device:
74 | self.plugin.add_extension(cpu_extension, "CPU")
75 | if not device == 'HDDL':
76 | tag = {}
77 | # Read IR
78 | log.info("Reading IR...")
79 | self.net = self.plugin.read_network(model=model_xml, weights=model_bin)
80 | log.info("Loading IR to the plugin...")
81 |
82 | if "CPU" in device:
83 | supported_layers = self.plugin.query_network(self.net, "CPU")
84 | not_supported_layers = \
85 | [l for l in self.net.layers.keys() if l not in supported_layers]
86 | if len(not_supported_layers) != 0:
87 | log.error("Following layers are not supported by "
88 | "the plugin for specified device {}:\n {}".
89 | format(device,
90 | ', '.join(not_supported_layers)))
91 | log.error("Please try to specify cpu extensions library path"
92 | " in command line parameters using -l "
93 | "or --cpu_extension command line argument")
94 | sys.exit(1)
95 |
96 | if num_requests == 0:
97 | # Loads network read from IR to the plugin
98 | self.net_plugin = self.plugin.load_network(network=self.net, device_name=device, config=tag)
99 | else:
100 | self.net_plugin = self.plugin.load_network(network=self.net, num_requests=num_requests, device_name=device, config=tag)
101 |
102 | self.input_blob = next(iter(self.net.inputs))
103 | if len(self.net.inputs.keys()) == 2:
104 | self.input_blob = "data"
105 | self.out_blob = next(iter(self.net.outputs))
106 | assert len(self.net.inputs.keys()) == input_size, \
107 | "Supports only {} input topologies".format(len(self.net.inputs))
108 | assert len(self.net.outputs) == output_size, \
109 | "Supports only {} output topologies".format(len(self.net.outputs))
110 |
111 | return self.plugin, self.get_input_shape()
112 |
113 | def get_input_shape(self):
114 | """
115 | Gives the shape of the input layer of the network.
116 | :return: None
117 | """
118 | return self.net.inputs[self.input_blob].shape
119 |
120 | def performance_counter(self, request_id):
121 | """
122 | Queries performance measures per layer to get feedback of what is the
123 | most time consuming layer.
124 | :param request_id: Index of Infer request value. Limited to device capabilities
125 | :return: Performance of the layer
126 | """
127 | perf_count = self.net_plugin.requests[request_id].get_perf_counts()
128 | return perf_count
129 |
130 | def exec_net(self, request_id, frame, input_blob=None, initial_w=None, initial_h=None):
131 | """
132 | Starts asynchronous inference for specified request.
133 | :param request_id: Index of Infer request value. Limited to device capabilities.
134 | :param frame: Input image
135 | :return: Instance of Executable Network class
136 | """
137 | if input_blob:
138 | self.infer_request_handle = self.net_plugin.start_async(
139 | request_id=request_id, inputs={input_blob[0]: frame,
140 | input_blob[1]: [544, 992, 992/initial_w,
141 | 544/initial_h, 992/initial_w, 544/initial_h]})
142 | else:
143 | self.infer_request_handle = self.net_plugin.start_async(
144 | request_id=request_id, inputs={self.input_blob: frame})
145 | return self.net_plugin
146 |
147 | def wait(self, request_id):
148 | """
149 | Waits for the result to become available.
150 | :param request_id: Index of Infer request value. Limited to device capabilities.
151 | :return: Timeout value
152 | """
153 | wait_process = self.net_plugin.requests[request_id].wait(-1)
154 | return wait_process
155 |
156 | def get_output(self, request_id, output=None):
157 | """
158 | Gives a list of results for the output layer of the network.
159 | :param request_id: Index of Infer request value. Limited to device capabilities.
160 | :param output: Name of the output layer
161 | :return: Results for the specified request
162 | """
163 | if output:
164 | res = self.net_plugin.requests[request_id].outputs[output]
165 | else:
166 | res = self.net_plugin.requests[request_id].outputs[self.out_blob]
167 | return res
168 |
169 | def clean(self):
170 | """
171 | Deletes all the instances
172 | :return: None
173 | """
174 | del self.net_plugin
175 | del self.plugin
176 | del self.net
177 |
--------------------------------------------------------------------------------
/application/inference.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Copyright (c) 2018 Intel Corporation.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining
6 | a copy of this software and associated documentation files (the
7 | "Software"), to deal in the Software without restriction, including
8 | without limitation the rights to use, copy, modify, merge, publish,
9 | distribute, sublicense, and/or sell copies of the Software, and to
10 | permit persons to whom the Software is furnished to do so, subject to
11 | the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be
14 | included in all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 | """
24 |
25 | import os
26 | import sys
27 | import logging as log
28 | from openvino.inference_engine import IENetwork, IECore
29 |
30 |
31 | log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
32 | logger = log.getLogger()
33 |
34 |
35 | class Network:
36 | """
37 | Load and configure inference plugins for the specified target devices
38 | and performs synchronous and asynchronous modes for the specified infer requests.
39 | """
40 |
41 | def __init__(self):
42 | self.net = None
43 | self.plugin = None
44 | self.input_blob = None
45 | self.out_blob = None
46 | self.net_plugin = None
47 | self.infer_request_handle = None
48 |
49 | def load_model(self, model, device, input_size, output_size, num_requests,
50 | cpu_extension=None, tag=None, plugin=None):
51 | """
52 | Loads a network and an image to the Inference Engine plugin.
53 | :param model: .xml file of pre trained model
54 | :param cpu_extension: extension for the CPU device
55 | :param device: Target device
56 | :param input_size: Number of input layers
57 | :param output_size: Number of output layers
58 | :param num_requests: Index of Infer request value. Limited to device capabilities.
59 | :param plugin: Plugin for specified device
60 | :return: Shape of input layer
61 | """
62 |
63 | model_xml = model
64 | model_bin = os.path.splitext(model_xml)[0] + ".bin"
65 | # Plugin initialization for specified device
66 | # and load extensions library if specified
67 | if not plugin:
68 | log.info("Initializing plugin for {} device...".format(device))
69 | self.plugin = IECore()
70 | else:
71 | self.plugin = plugin
72 |
73 | if cpu_extension and 'CPU' in device:
74 | self.plugin.add_extension(cpu_extension, "CPU")
75 | if not device == 'HDDL':
76 | tag = {}
77 | # Read IR
78 | log.info("Reading IR...")
79 | self.net = self.plugin.read_network(model=model_xml, weights=model_bin)
80 | log.info("Loading IR to the plugin...")
81 |
82 | if "CPU" in device:
83 | supported_layers = self.plugin.query_network(self.net, "CPU")
84 | not_supported_layers = \
85 | [l for l in self.net.layers.keys() if l not in supported_layers]
86 | if len(not_supported_layers) != 0:
87 | log.error("Following layers are not supported by "
88 | "the plugin for specified device {}:\n {}".
89 | format(device,
90 | ', '.join(not_supported_layers)))
91 | log.error("Please try to specify cpu extensions library path"
92 | " in command line parameters using -l "
93 | "or --cpu_extension command line argument")
94 | sys.exit(1)
95 |
96 | if num_requests == 0:
97 | # Loads network read from IR to the plugin
98 | self.net_plugin = self.plugin.load_network(network=self.net, device_name=device, config=tag)
99 | else:
100 | self.net_plugin = self.plugin.load_network(network=self.net, num_requests=num_requests, device_name=device, config=tag)
101 |
102 | self.input_blob = next(iter(self.net.inputs))
103 | if len(self.net.inputs.keys()) == 2:
104 | self.input_blob = "data"
105 | self.out_blob = next(iter(self.net.outputs))
106 | assert len(self.net.inputs.keys()) == input_size, \
107 | "Supports only {} input topologies".format(len(self.net.inputs))
108 | assert len(self.net.outputs) == output_size, \
109 | "Supports only {} output topologies".format(len(self.net.outputs))
110 |
111 | return self.plugin, self.get_input_shape()
112 |
113 | def get_input_shape(self):
114 | """
115 | Gives the shape of the input layer of the network.
116 | :return: None
117 | """
118 | return self.net.inputs[self.input_blob].shape
119 |
120 | def performance_counter(self, request_id):
121 | """
122 | Queries performance measures per layer to get feedback of what is the
123 | most time consuming layer.
124 | :param request_id: Index of Infer request value. Limited to device capabilities
125 | :return: Performance of the layer
126 | """
127 | perf_count = self.net_plugin.requests[request_id].get_perf_counts()
128 | return perf_count
129 |
130 | def exec_net(self, request_id, frame, input_blob=None, initial_w=None, initial_h=None):
131 | """
132 | Starts asynchronous inference for specified request.
133 | :param request_id: Index of Infer request value. Limited to device capabilities.
134 | :param frame: Input image
135 | :return: Instance of Executable Network class
136 | """
137 | if input_blob:
138 | self.infer_request_handle = self.net_plugin.start_async(
139 | request_id=request_id, inputs={input_blob[0]: frame,
140 | input_blob[1]: [544, 992, 992/initial_w,
141 | 544/initial_h, 992/initial_w, 544/initial_h]})
142 | else:
143 | self.infer_request_handle = self.net_plugin.start_async(
144 | request_id=request_id, inputs={self.input_blob: frame})
145 | return self.net_plugin
146 |
147 | def wait(self, request_id):
148 | """
149 | Waits for the result to become available.
150 | :param request_id: Index of Infer request value. Limited to device capabilities.
151 | :return: Timeout value
152 | """
153 | wait_process = self.net_plugin.requests[request_id].wait(-1)
154 | return wait_process
155 |
156 | def get_output(self, request_id, output=None):
157 | """
158 | Gives a list of results for the output layer of the network.
159 | :param request_id: Index of Infer request value. Limited to device capabilities.
160 | :param output: Name of the output layer
161 | :return: Results for the specified request
162 | """
163 | if output:
164 | res = self.net_plugin.requests[request_id].outputs[output]
165 | else:
166 | res = self.net_plugin.requests[request_id].outputs[self.out_blob]
167 | return res
168 |
169 | def clean(self):
170 | """
171 | Deletes all the instances
172 | :return: None
173 | """
174 | del self.net_plugin
175 | del self.plugin
176 | del self.net
177 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Retail Analytics
2 |
3 | | Details | |
4 | |-----------------------|---------------|
5 | | Target OS: | Ubuntu\* 18.04 LTS |
6 | | Programming Language: | Python* 3.5 |
7 | | Time to Complete: | 50-70min |
8 |
9 | 
10 |
11 | ## What it does
12 | This smart retail analytics application monitors people activity, counts total number of people inside a retail store and keeps a check on the inventory by detecting the products specified by the user. It detects objects on any number of screens by using video or camera resources.
13 |
14 | ## Requirements
15 | ### Hardware
16 | * 6th to 8th Generation Intel® Core™ processors with Iris® Pro graphics or Intel® HD Graphics
17 |
18 | ### Software
19 | * [Ubuntu\* 18.04 LTS](http://releases.ubuntu.com/18.04/)
20 | *Note*: We recommend using a 4.14+ Linux* kernel with this software. Run the following command to determine your kernel version:
21 | ```
22 | uname -a
23 | ```
24 | * OpenCL™ Runtime Package
25 | * Intel® Distribution of OpenVINO™ toolkit 2020 R3 release
26 | * Grafana* v5.3.2
27 | * InfluxDB* v1.6.2
28 |
29 | ## How It works
30 | The application uses the Inference Engine included in the Intel® Distribution of OpenVINO™ toolkit. It accepts multiple video input feeds and user can specify the feed type for each video.
31 | There are three feed types that application supports:
32 | * Shopper: If the feed type of the video is shopper, the application grabs the frame from that input stream and uses a Deep Neural Network model for detecting the faces in it. If there is anybody present in the frame, it is counted as a shopper. Once the face is detected, the application uses head-pose estimation model to check the head pose of the person. If the person is looking at the camera then his emotions are detected using emotions recognition model. Using the data obtained from this, it infers if the person is interested or not and gives the total number of people detected. It also measures the duration for which the person is present in the frame and the duration for which he was looking at the camera.
33 |
34 | * Store traffic: If the video feed type is traffic, the application uses a Deep Neural Network model to detect people in the frame. The total number of people visited and the number of people currently present in front the camera is obtained from this.
35 |
36 | * Shelf: This feed type is used to keep a check on the product inventory. If the video feed type is shelf, an object detection model is used to detect the product specified by the user in the frame from this video stream. It detects the objects and gives the number of objects present in the frame.
37 |
38 | The application is capable of processing multiple video input feeds, each having different feed type. The data obtained from these videos is stored in InfluxDB for analysis and visualized on Grafana. It uses Flask python web framework to live stream the output videos to the Grafana.
39 |
40 |
41 | 
42 |
43 | **Architectural Diagram**
44 |
45 | ## Setup
46 |
47 | ### Get the code
48 |
49 | Steps to clone the reference implementation: (smart-retail-analytics)
50 |
51 | sudo apt-get update && sudo apt-get install git
52 | git clone https://github.com/intel-iot-devkit/smart-retail-analytics.git
53 |
54 | ### Install the Intel® Distribution of OpenVINO™ toolkit
55 | Refer to https://software.intel.com/en-us/articles/OpenVINO-Install-Linux on how to install and setup the Intel® Distribution of OpenVINO™ toolkit.
56 |
57 | You will need the OpenCL™ Runtime Package if you plan to run inference on the GPU. It is not mandatory for CPU inference.
58 |
59 | ### Other dependencies
60 | #### InfluxDB*
61 |
62 | InfluxDB is a time series database designed to handle high write and query loads. It is an integral component of the TICK stack. InfluxDB is meant to be used as a backing store for any use case involving large amounts of timestamped data, including DevOps monitoring, application metrics, IoT sensor data, and real-time analytics.
63 |
64 | #### Grafana*
65 |
66 | Grafana is an open-source, general purpose dashboard and graph composer, which runs as a web application. It supports Graphite, InfluxDB, Prometheus, Google Stackdriver, AWS CloudWatch, Azure Monitor, Loki, MySQL, PostgreSQL, Microsoft SQL Server, Testdata, Mixed, OpenTSDB and Elasticsearch as backends. Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored.
67 |
68 | #### AJAX*
69 |
70 | The AJAX Panel is a general way to load external content into a grafana dashboard.
71 |
72 | ### Which model to use
73 | The application uses Intel® Pre-Trained models in the feed type `shopper` i.e.[face-detection-adas-0001](https://docs.openvinotoolkit.org/2020.3/_models_intel_face_detection_adas_0001_description_face_detection_adas_0001.html), [head-pose-estimation-adas-0001](https://docs.openvinotoolkit.org/2020.3/_models_intel_head_pose_estimation_adas_0001_description_head_pose_estimation_adas_0001.html), [emotion-recognition-retail-0003](https://docs.openvinotoolkit.org/2020.3/_models_intel_emotions_recognition_retail_0003_description_emotions_recognition_retail_0003.html). For the feed type `traffic`, [person-detection-retail-0002](https://docs.openvinotoolkit.org/2020.3/person-detection-retail-0002.html) is used and these can be downloaded using **model downloader** script.
74 |
75 | For video feed type __shelf__, mobilenet-ssd model is used that can be downloaded using `downloader` script present in Intel® Distribution of OpenVINO™ toolkit.
76 | The `mobilenet-ssd` model is a Single-Shot multibox Detection (SSD) network intended to perform object detection. This model is implemented using the Caffe\* framework. For details about this model, check out the [repository](https://github.com/chuanqi305/MobileNet-SSD).
77 |
78 | To install the dependencies and to download the models and optimize **mobilenet-ssd** model, run the below command:
79 | ```
80 | cd
81 | ./setup.sh
82 | ```
83 | * These models will be downloaded in the locations given below:
84 | * **face-detection**: /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/face-detection-adas-0001/
85 | * **head-pose-estimation**: /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/head-pose-estimation-adas-0001/
86 | * **emotions-recognition**: /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/emotions-recognition-retail-0003/
87 | * **person-detection-retail**: /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0002/
88 |
89 |
90 |
91 | ### The Labels file
92 | The shelf feed type in the application requires a _labels_ file associated with the model being used for detection. All detection models work with integer labels and not string labels (e.g. for the ssd300 and mobilenet-ssd models, the number 15 represents the class "person"), that is why each model must have a _labels_ file, which associates an integer (the label the algorithm detects) with a string (denoting the human-readable label).
93 | The _labels_ file is a text file containing all the classes/labels that the model can recognize, in the order that it was trained to recognize them (one class per line).
94 | For mobilenet-ssd model, _labels.txt_ file is provided in the _resources_ directory.
95 |
96 | ### The Config file
97 | The **resources/config.json** contains the videos along with the video feed type.
98 | The _config.json_ file is of the form name/value pair, `"video": ` and `"type": `
99 | For example:
100 | ```
101 | {
102 |
103 | "inputs": [
104 |
105 | {
106 | "video": "path-to-video",
107 | "type": "video-feed-type"
108 | }
109 | ]
110 |
111 | }
112 | ```
113 | The `path-to-video` is the path, on the local system, to a video to use as input.
114 |
115 | If the video type is shelf, then the labels of the class (person, bottle, etc.) to be detected on that video is provided in the next column. The labels used in the _config.json_ file must be present in the labels from the _labels_ file.
116 |
117 | The application can use any number of videos for detection (i.e. the _config.json_ file can have any number of blocks), but the more videos the application uses in parallel, the more the frame rate of each video scales down. This can be solved by adding more computation power to the machine the application is running on.
118 |
119 | ### What input video to use
120 | The application works with any input video. Sample videos for object detection are provided [here](https://github.com/intel-iot-devkit/sample-videos/).
121 |
122 | For first-use, we recommend using the [face-demographics-walking](https://github.com/intel-iot-devkit/sample-videos/blob/master/face-demographics-walking.mp4), [head-pose-face-detection-female](https://github.com/intel-iot-devkit/sample-videos/blob/master/head-pose-face-detection-female.mp4), [bottle-detection](https://github.com/intel-iot-devkit/sample-videos/blob/master/bottle-detection.mp4) videos. The videos are automatically downloaded in the `resources/` folder by setup.sh.
123 | For example:
124 | The config.json would be:
125 |
126 | ```
127 | {
128 |
129 | "inputs": [
130 |
131 | {
132 | "video": "sample-videos/head-pose-face-detection-female.mp4",
133 | "type": "shopper"
134 | },
135 | {
136 | "video": "sample-videos/bottle-detection.mp4",
137 | "label": "bottle",
138 | "type": "shelf"
139 | },
140 | {
141 | "video": "sample-videos/face-demographics-walking.mp4",
142 | "type": "traffic"
143 | }
144 | ]
145 |
146 | }
147 | ```
148 | To use any other video, specify the path in config.json file
149 |
150 | ### Using camera stream instead of the video file
151 | Replace `path/to/video` with the camera ID in config.json and the label to be found, where the ID is taken from the video device (the number X in /dev/videoX).
152 | On Ubuntu, to list all available video devices use the following command:
153 | ```
154 | ls /dev/video*
155 | ```
156 | For example, if the output of above command is `/dev/video0`, then config.json would be:
157 |
158 | ```
159 | {
160 |
161 | "inputs": [
162 |
163 | {
164 | "video": "0",
165 | "type": "shopper"
166 | }
167 | ]
168 |
169 | }
170 | ```
171 |
172 | ## Setup the environment
173 | You must configure the environment to use the Intel® Distribution of OpenVINO™ toolkit one time per session by running the following command:
174 | ```
175 | source /opt/intel/openvino/bin/setupvars.sh
176 | ```
177 | __Note__: This command needs to be executed only once in the terminal where the application will be executed. If the terminal is closed, the command needs to be executed again.
178 |
179 | ## Run the application
180 |
181 | Change the current directory to the git-cloned application code location on your system:
182 |
183 | ```
184 | cd /application
185 | ```
186 |
187 | A user can specify a target device to run on by using the device command-line argument `-d_ (Ex. d_fm, d_pm, d_mm, d_om or d_pd)` followed by one of the values `CPU`, `GPU`,`MYRIAD` or `HDDL`.
188 |
189 | Not specifying any target device means by default all the models will run on CPU, although this can also be explicitly specified by the device command-line argument
190 |
191 | To run the application with the required models :
192 | ```
193 | python3 smart_retail_analytics.py -fm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/face-detection-adas-0001/FP32/face-detection-adas-0001.xml -pm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml -mm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml -om ../resources/FP32/mobilenet-ssd.xml -pr /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0002/FP32/person-detection-retail-0002.xml -lb ../resources/labels.txt
194 | ```
195 |
196 | Once the command is executed in the terminal, configure the Grafana dashboard using the instructions given in the next section to see the output.
197 | To run the application on sync mode, use **-f sync** as command line argument. By default, the application runs on async mode.
198 |
199 | ## Running on different hardware
200 |
201 | The application can use different hardware accelerator for different models. The user can specify the target device for each model using the command line argument as below:
202 | * `-d_fm `: Target device for Face Detection network (CPU, GPU, MYRIAD, HETERO:FPGA,CPU or HDDL).
203 | * `-d_pm `: Target device for Head Pose Estimation network (CPU, GPU, MYRIAD, HETERO:FPGA,CPU or HDDL).
204 | * `-d_mm `: Target device for Emotions Recognition network (CPU, GPU, MYRIAD, HETERO:FPGA,CPU or HDDL).
205 | * `-d_om `: Target device for mobilenet-ssd network (CPU, GPU, MYRIAD, HETERO:FPGA,CPU or HDDL).
206 | * `-d_pd `: Target device for Person Detection Retail network (CPU, GPU, MYRIAD, HETERO:FPGA,CPU or HDDL).
207 |
208 |
209 | __For example:__
210 | To run Face Detection model with FP16 and Emotions Recognition model with FP32 on GPU, Head Pose Estimation model on MYRIAD, mobilenet-ssd and person-detection model on CPU, use the below command:
211 | ```
212 | python3 smart_retail_analytics.py -fm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml -pm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001.xml -mm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml -om ../resources/FP32/mobilenet-ssd.xml -pr /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0002/FP32/person-detection-retail-0002.xml -lb ../resources/labels.txt -d_fm GPU -d_pm MYRIAD -d_mm GPU -d_pd CPU -d_om CPU
213 | ```
214 |
215 | To run with multiple devices use MULTI:device1,device2. For example: `-d_fm MULTI:CPU,GPU,MYRIAD`
216 |
217 | **Note:**
218 | * The Intel® Neural Compute Stick and Intel® Movidius™ VPU can only run FP16 models. The model that is passed to the application, must be of data type FP16.
219 |
220 |
221 | **FP32**: FP32 is single-precision floating-point arithmetic uses 32 bits to represent numbers. 8 bits for the magnitude and 23 bits for the precision. For more information, [click here](https://en.wikipedia.org/wiki/Single-precision_floating-point_format)
222 | **FP16**: FP16 is half-precision floating-point arithmetic uses 16 bits. 5 bits for the magnitude and 10 bits for the precision. For more information, [click here](https://en.wikipedia.org/wiki/Half-precision_floating-point_format)
223 |
224 | ### Run on the Intel® Movidius™ VPU
225 |
226 | To run the application on Intel® Movidius™ VPU, configure the hddldaemon by following the below steps:
227 | * Open the hddl_service.config using the below command:
228 | ```
229 | sudo vi ${HDDL_INSTALL_DIR}/config/hddl_service.config
230 | ```
231 | * Update **"device_snapshot_mode": "None"** to **"device_snapshot_mode": "full"**.
232 | * Update HDDL configuration for tags.
233 | ```
234 | "graph_tag_map":{"tagFace":1,"tagPose":1,"tagMood":2,"tagMobile":2,"tagPerson":2}
235 | ```
236 | * Save and close the file.
237 |
238 | * Run hddldaemon.
239 | ```
240 | ${HDDL_INSTALL_DIR}/bin/hddldaemon
241 | ```
242 | To run the application on the Intel® Movidius™ VPU, use the `-d HDDL` command-line argument:
243 | ```
244 | python3 smart_retail_analytics.py -fm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml -pm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001.xml -mm /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/emotions-recognition-retail-0003/FP16/emotions-recognition-retail-0003.xml -om ../resources/FP16/mobilenet-ssd.xml -pr /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0002/FP16/person-detection-retail-0002.xml -lb ../resources/labels.txt -d_pd HDDL -d_fm HDDL -d_pm HDDL -d_mm HDDL -d_om HDDL
245 | ```
246 | ### Visualize on Grafana
247 |
248 | 1. Open a new tab on the terminal and start the Grafana server using the following command:
249 |
250 | ```
251 | sudo service grafana-server start
252 | ```
253 |
254 | 2. In your browser, go to [localhost:3000](http://localhost:3000).
255 |
256 | 3. Log in with user as **admin** and password as **admin**.
257 |
258 | 4. Click on **Configuration**.
259 |
260 | 5. Select **“Data Sources”**.
261 |
262 | 6. Click on **“+ Add data source”** and provide inputs below.
263 |
264 | - *Name*: Retail_Analytics
265 | - *Type*: InfluxDB
266 | - *URL*: http://localhost:8086
267 | - *Database*: Retail_Analytics
268 | - Click on “Save and Test”
269 |
270 | 
271 |
272 | 7. Click on **+** icon present on the left side of the browser, select **import**.
273 |
274 | 8. Click on **Upload.json File**.
275 |
276 | 9. Select the file name __retail-analytics.json__ from smart-retail-analytics-python directory.
277 |
278 | 10. Select "Retail_Analytics" in **Select a influxDB data source**.
279 |
280 | 
281 |
282 | 11. Click on import.
283 |
284 |
285 | ## Containerize the Application
286 |
287 | To containerize the smart-retail-analytics-python application using docker container, follow the instruction provided [here](./docker).
288 |
--------------------------------------------------------------------------------
/Jupyter/README.md:
--------------------------------------------------------------------------------
1 | # Retail Analytics
2 |
3 | | Details | |
4 | |-----------------------|---------------|
5 | | Target OS: | Ubuntu\* 18.04 LTS |
6 | | Programming Language: | Python* 3.5 |
7 | | Time to Complete: | 50-70min |
8 |
9 | 
10 |
11 |
12 | ## What it does
13 | This smart retail analytics application monitors people activity, counts total number of people inside a retail store and keeps a check on the inventory by detecting the products specified by the user. It detects objects on any number of screens by using video or camera resources.
14 |
15 | ## Requirements
16 | ### Hardware
17 | * 6th to 8th Generation Intel® Core™ processors with Iris® Pro graphics or Intel® HD Graphics
18 |
19 | ### Software
20 | * [Ubuntu\* 18.04 LTS](http://releases.ubuntu.com/18.04/)
21 | *Note*: We recommend using a 4.14+ Linux* kernel with this software. Run the following command to determine your kernel version:
22 | ```
23 | uname -a
24 | ```
25 | * OpenCL™ Runtime Package
26 | * Intel® Distribution of OpenVINO™ toolkit 2020 R3 release
27 | * Grafana* v5.3.2
28 | * InfluxDB* v1.6.2
29 | * Jupyter* Notebook v5.7.0
30 |
31 | ## How It works
32 | The application uses the Inference Engine included in the Intel® Distribution of OpenVINO™ toolkit. It accepts multiple video input feeds and user can specify the feed type for each video.
33 | There are three feed types that application supports:
34 | * Shopper: If the feed type of the video is shopper, the application grabs the frame from that input stream and uses a Deep Neural Network model for detecting the faces in it. If there is anybody present in the frame, it is counted as a shopper. Once the face is detected, the application uses head-pose estimation model to check the head pose of the person. If the person is looking at the camera then his emotions are detected using emotions recognition model. Using the data obtained from this, it infers if the person is interested or not and gives the total number of people detected. It also measures the duration for which the person is present in the frame and the duration for which he was looking at the camera.
35 |
36 | * Store traffic: If the video feed type is traffic, the application uses a Deep Neural Network model to detect people in the frame. The total number of people visited and the number of people currently present in front the camera is obtained from this.
37 |
38 | * Shelf: This feed type is used to keep a check on the product inventory. If the video feed type is shelf, an object detection model is used to detect the product specified by the user in the frame from this video stream. It detects the objects and gives the number of objects present in the frame.
39 |
40 | The application is capable of processing multiple video input feeds, each having different feed type. The data obtained from these videos is stored in InfluxDB for analysis and visualized on Grafana. It uses Flask python web framework to live stream the output videos to the Grafana.
41 |
42 |
43 | 
44 |
45 | **Architectural Diagram**
46 |
47 | ## Setup
48 |
49 | ### Get the code
50 |
51 | Steps to clone the reference implementation: (smart-retail-analytics)
52 |
53 | sudo apt-get update && sudo apt-get install git
54 | git clone https://gitlab.devtools.intel.com/reference-implementations/smart-retail-analytics-python.git
55 |
56 | ### Install the Intel® Distribution of OpenVINO™ toolkit
57 | Refer to https://software.intel.com/en-us/articles/OpenVINO-Install-Linux on how to install and setup the Intel® Distribution of OpenVINO™ toolkit.
58 |
59 | You will need the OpenCL™ Runtime Package if you plan to run inference on the GPU. It is not mandatory for CPU inference.
60 |
61 | ### Other dependencies
62 | #### InfluxDB*
63 |
64 | InfluxDB is a time series database designed to handle high write and query loads. It is an integral component of the TICK stack. InfluxDB is meant to be used as a backing store for any use case involving large amounts of timestamped data, including DevOps monitoring, application metrics, IoT sensor data, and real-time analytics.
65 |
66 | #### Grafana*
67 |
68 | Grafana is an open-source, general purpose dashboard and graph composer, which runs as a web application. It supports Graphite, InfluxDB, Prometheus, Google Stackdriver, AWS CloudWatch, Azure Monitor, Loki, MySQL, PostgreSQL, Microsoft SQL Server, Testdata, Mixed, OpenTSDB and Elasticsearch as backends. Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored.
69 |
70 | #### AJAX*
71 |
72 | The AJAX Panel is a general way to load external content into a grafana dashboard.
73 |
74 | ### Which model to use
75 | The application uses Intel® Pre-Trained models in the feed type `shopper` i.e.[face-detection-adas-0001](https://docs.openvinotoolkit.org/2020.3/_models_intel_face_detection_adas_0001_description_face_detection_adas_0001.html), [head-pose-estimation-adas-0001](https://docs.openvinotoolkit.org/2020.3/_models_intel_head_pose_estimation_adas_0001_description_head_pose_estimation_adas_0001.html), [emotion-recognition-retail-0003](https://docs.openvinotoolkit.org/2020.3/_models_intel_emotions_recognition_retail_0003_description_emotions_recognition_retail_0003.html). For the feed type `traffic`, [person-detection-retail-0002](https://docs.openvinotoolkit.org/2020.3/person-detection-retail-0002.html) is used and these can be downloaded using **model downloader** script.
76 |
77 | For video feed type __shelf__, mobilenet-ssd model is used that can be downloaded using `downloader` script present in Intel® Distribution of OpenVINO™ toolkit.
78 | The `mobilenet-ssd` model is a Single-Shot multibox Detection (SSD) network intended to perform object detection. This model is implemented using the Caffe\* framework. For details about this model, check out the [repository](https://github.com/chuanqi305/MobileNet-SSD).
79 |
80 | To install the dependencies and to download the models and optimize **mobilenet-ssd** model, run the below command:
81 | ```
82 | cd
83 | ./setup.sh
84 | ```
85 | * These models will be downloaded in the locations given below:
86 | * **face-detection**: /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/face-detection-adas-0001/
87 | * **head-pose-estimation**: /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/head-pose-estimation-adas-0001/
88 | * **emotions-recognition**: /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/emotions-recognition-retail-0003/
89 | * **person-detection-retail**: /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0002/
90 |
91 |
92 |
93 | ### The Labels file
94 | The shelf feed type in the application requires a _labels_ file associated with the model being used for detection. All detection models work with integer labels and not string labels (e.g. for the ssd300 and mobilenet-ssd models, the number 15 represents the class "person"), that is why each model must have a _labels_ file, which associates an integer (the label the algorithm detects) with a string (denoting the human-readable label).
95 | The _labels_ file is a text file containing all the classes/labels that the model can recognize, in the order that it was trained to recognize them (one class per line).
96 | For mobilenet-ssd model, _labels.txt_ file is provided in the _resources_ directory.
97 |
98 | ### The Config file
99 | The **resources/config.json** contains the videos along with the video feed type.
100 | The _config.json_ file is of the form name/value pair, `"video": ` and `"type": `
101 | For example:
102 | ```
103 | {
104 |
105 | "inputs": [
106 |
107 | {
108 | "video": "path-to-video",
109 | "type": "video-feed-type"
110 | }
111 | ]
112 |
113 | }
114 | ```
115 | The `path-to-video` is the path, on the local system, to a video to use as input.
116 |
117 | If the video type is shelf, then the labels of the class (person, bottle, etc.) to be detected on that video is provided in the next column. The labels used in the _config.json_ file must be present in the labels from the _labels_ file.
118 |
119 | The application can use any number of videos for detection (i.e. the _config.json_ file can have any number of blocks), but the more videos the application uses in parallel, the more the frame rate of each video scales down. This can be solved by adding more computation power to the machine the application is running on.
120 |
121 | ### What input video to use
122 | The application works with any input video. Sample videos for object detection are provided [here](https://github.com/intel-iot-devkit/sample-videos/).
123 |
124 | For first-use, we recommend using the [face-demographics-walking](https://github.com/intel-iot-devkit/sample-videos/blob/master/face-demographics-walking.mp4), [head-pose-face-detection-female](https://github.com/intel-iot-devkit/sample-videos/blob/master/head-pose-face-detection-female.mp4), [bottle-detection](https://github.com/intel-iot-devkit/sample-videos/blob/master/bottle-detection.mp4) videos. The videos are automatically downloaded in the `resources/` folder by setup.sh.
125 | For example:
126 | The config.json would be:
127 |
128 | ```
129 | {
130 |
131 | "inputs": [
132 |
133 | {
134 | "video": "sample-videos/head-pose-face-detection-female.mp4",
135 | "type": "shopper"
136 | },
137 | {
138 | "video": "sample-videos/bottle-detection.mp4",
139 | "label": "bottle",
140 | "type": "shelf"
141 | },
142 | {
143 | "video": "sample-videos/face-demographics-walking.mp4",
144 | "type": "traffic"
145 | }
146 | ]
147 |
148 | }
149 | ```
150 | To use any other video, specify the path in config.json file
151 |
152 | ### Using camera stream instead of the video file
153 | Replace `path/to/video` with the camera ID in config.json and the label to be found, where the ID is taken from the video device (the number X in /dev/videoX).
154 | On Ubuntu, to list all available video devices use the following command:
155 | ```
156 | ls /dev/video*
157 | ```
158 | For example, if the output of above command is `/dev/video0`, then config.json would be:
159 |
160 | ```
161 | {
162 |
163 | "inputs": [
164 |
165 | {
166 | "video": "0",
167 | "type": "shopper"
168 | }
169 | ]
170 |
171 | }
172 | ```
173 |
174 | ## Setup the environment
175 | You must configure the environment to use the Intel® Distribution of OpenVINO™ toolkit one time per session by running the following command:
176 | ```
177 | source /opt/intel/openvino/bin/setupvars.sh
178 | ```
179 | __Note__: This command needs to be executed only once in the terminal where the application will be executed. If the terminal is closed, the command needs to be executed again.
180 |
181 | ## Run the application on Jupyter*
182 |
183 | * Go to the _smart-retail-analytics-python_ directory and open the Jupyter notebook by running the following command:
184 |
185 | ```
186 | cd /Jupyter
187 |
188 | jupyter notebook
189 | ```
190 |
219 |
220 | 
221 |
222 |
223 | **Follow the below instructions to run the code on Jupyter:**
224 |
225 | 1. Click on **New** button on the right side of the Jupyter window.
226 |
227 | 2. Click on **Python 3** option from the drop down list.
228 |
229 | 3. Export the below environment variables in the first cell of Jupyter and press **Shift+Enter**.
230 |
231 | ```
232 | %env FACE_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/face-detection-adas-0001/FP32/face-detection-adas-0001.xml
233 | %env POSE_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml
234 | %env MOOD_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml
235 | %env PERSON_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0002/FP32/person-detection-retail-0002.xml
236 | %env OBJ_MODEL=../resources/FP32/mobilenet-ssd.xml
237 | %env LABEL_FILE=../resources/labels.txt
238 | ```
239 |
240 | To run the application on sync mode, export the environment variable **%env FLAG = sync**. By default, the application runs on async mode.
241 |
242 | 5. Copy the code from **smart_retail_analytics_jupyter.py** and paste it in the next cell and press **Shift+Enter**.
243 | 6. Output of the application will be streamed on Grafana. To configure the Grafana dashboard follow the instructions in the next section.
244 |
245 | 6. Alternatively, code can be run in the following way.
246 |
247 | i. Click on the smart_retail_analytics_jupyter.ipynb file in the Jupyter notebook window.
248 |
249 | ii. Click on the Kernel menu and then select **Restart & Run All** from the drop-down list.
250 |
251 | iii. Click on Restart and Run All Cells.
252 |
253 | 
254 |
255 | ## Running on different hardware
256 | The application can use different hardware accelerator for different models. The user can specify the target device for each model using the environmental variables FACE_DEVICE, MOOD_DEVICE, POSE_DEVICE, OBJ_DEVICE, PERSON_DEVICE. Target devices supported by the application are `CPU`, `GPU`, `MYRIAD` and `HDDL`.
257 |
258 | For example :
259 | To run Face Detection model with FP16 and Emotions Recognition model with FP32 on GPU, Head Pose Estimation model on MYRIAD, Person Detection Model and mobilenet-ssd on CPU, export the environmental variables given below in the first cell, then click on the Kernel menu and select **Restart & Run All** from the drop-down list to run the application.
260 |
261 | ```
262 | %env FACE_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml
263 | %env POSE_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001.xml
264 | %env MOOD_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml
265 | %env PERSON_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0002/FP32/person-detection-retail-0002.xml
266 | %env OBJ_MODEL=../resources/FP32/mobilenet-ssd.xml
267 | %env LABEL_FILE=../resources/labels.txt
268 | %env FLAG=async
269 | %env FACE_DEVICE=GPU
270 | %env MOOD_DEVICE=GPU
271 | %env POSE_DEVICE=MYRIAD
272 | %env PERSON_DEVICE=CPU
273 | %env OBJ_DEVICE=CPU
274 | ```
275 |
276 | 
277 |
278 | **Note:** The Intel® Neural Compute Stick and Intel® Movidius™ VPU can only run FP16 models. The model that is passed to the application, must be of data type FP16.
279 |
280 | ### Run on the Intel® Movidius™ VPU
281 |
282 | To run the application on Intel® Movidius™ VPU, configure the hddldaemon by following the below steps:
283 | * Open the hddl_service.config using the below command:
284 | ```
285 | sudo vi ${HDDL_INSTALL_DIR}/config/hddl_service.config
286 | ```
287 | * Update **"device_snapshot_mode": "None"** to **"device_snapshot_mode": "full"**.
288 | * Update Intel® Movidius™ VPU configuration for tags
289 | ```
290 | "graph_tag_map":{"tagFace":1,"tagPose":1,"tagMood":2,"tagMobile":2,"tagPerson":2}
291 | ```
292 | * Save and close the file.
293 |
294 | * Run hddldaemon.
295 | ```
296 | ${HDDL_INSTALL_DIR}/bin/hddldaemon
297 | ```
298 | To run the application on the Intel® Movidius™ VPU, export the environmental variables given below in the first cell, then click on the Kernel menu and select **Restart & Run All** from the drop-down list.
299 | ```
300 | %env FACE_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml
301 | %env POSE_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001.xml
302 | %env MOOD_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/emotions-recognition-retail-0003/FP16/emotions-recognition-retail-0003.xml
303 | %env PERSON_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0002/FP16/person-detection-retail-0002.xml
304 | %env OBJ_MODEL=../resources/FP16/mobilenet-ssd.xml
305 | %env LABEL_FILE=../resources/labels.txt
306 | %env FLAG=async
307 | %env FACE_DEVICE=HDDL
308 | %env POSE_DEVICE=HDDL
309 | %env MOOD_DEVICE=HDDL
310 | %env OBJ_DEVICE=HDDL
311 | %env PERSON_DEVICE=HDDL
312 | ```
313 |
314 |
331 | ### Run with multiple devices
332 | To run the application with multiple devices use **MULTI:device1,device2**, export the environmental variables given below in the first cell, then click on the Kernel menu and select **Restart & Run All** from the drop-down list.
333 | For example: `MULTI:CPU,GPU,MYRIAD`
334 | ```
335 | %env FACE_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml
336 | %env POSE_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001.xml
337 | %env MOOD_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/emotions-recognition-retail-0003/FP16/emotions-recognition-retail-0003.xml
338 | %env PERSON_MODEL=/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/intel/person-detection-retail-0002/FP16/person-detection-retail-0002.xml
339 | %env OBJ_MODEL=../resources/FP16/mobilenet-ssd.xml
340 | %env LABEL_FILE=../resources/labels.txt
341 | %env FLAG=async
342 | %env FACE_DEVICE=MULTI:CPU,GPU,MYRIAD
343 | %env POSE_DEVICE=MULTI:CPU,GPU
344 | %env MOOD_DEVICE=MULTI:GPU,MYRIAD
345 | %env OBJ_DEVICE=MULTI:GPU,CPU,MYRIAD
346 | %env PERSON_DEVICE=MULTI:CPU,GPU,MYRIAD
347 | ```
348 |
349 | ### Visualize on Grafana
350 |
351 | 1. Open a new tab on the terminal and start the Grafana server using the following command:
352 | ```
353 | sudo service grafana-server start
354 | ```
355 |
356 | 2. In your browser, go to [localhost:3000](http://localhost:3000).
357 |
358 | 3. Log in with user as **admin** and password as **admin**.
359 |
360 | 4. Click on **Configuration**.
361 |
362 | 5. Select **“Data Sources”**.
363 |
364 | 6. Click on **“+ Add data source”** and provide inputs below.
365 |
366 | - *Name*: Retail_Analytics
367 | - *Type*: InfluxDB
368 | - *URL*: http://localhost:8086
369 | - *Database*: Retail_Analytics
370 | - Click on “Save and Test”
371 |
372 | 
373 |
374 | 7. Click on **+** icon present on the left side of the browser, select **import**.
375 |
376 | 8. Click on **Upload.json File**.
377 |
378 | 9. Select the file name "retail-analytics.json" from smart-retail-analytics-python directory.
379 |
380 | 10. Select "Retail_Analytics" in **Select a influxDB data source**.
381 |
382 | 
383 |
384 | 11. Click on import.
385 |
--------------------------------------------------------------------------------
/Jupyter/smart_retail_analytics_jupyter.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Copyright (c) 2018 Intel Corporation.
4 | Permission is hereby granted, free of charge, to any person obtaining
5 | a copy of this software and associated documentation files (the
6 | "Software"), to deal in the Software without restriction, including
7 | without limitation the rights to use, copy, modify, merge, publish,
8 | distribute, sublicense, and/or sell copies of the Software, and to
9 | permit persons to whom the Software is furnished to do so, subject to
10 | the following conditions:
11 | The above copyright notice and this permission notice shall be
12 | included in all copies or substantial portions of the Software.
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
15 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
17 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
18 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
19 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 | """
21 |
22 | import os
23 | import sys
24 | import math
25 | import time
26 | from collections import namedtuple
27 | from argparse import ArgumentParser
28 | import logging as log
29 | import numpy as np
30 | from inference import Network
31 | from influxdb import InfluxDBClient
32 | from flask import Flask, render_template, Response
33 | import cv2
34 | import json
35 |
36 | # Constants
37 | CONFIG_FILE = "../resources/config.json"
38 | MAX_FRAME_GONE = 3
39 | INTEREST_COUNT_TIME = 5
40 | SENTIMENT_LABEL = ['neutral', 'happy', 'sad', 'surprise', 'anger']
41 | IPADDRESS = "localhost"
42 | PORT = 8086
43 | DATABASE_NAME = "Retail_Analytics"
44 | CENTROID_DISTANCE = 150
45 |
46 | # Global variables
47 | check_feed_type = [False, False, False] # [shopper, traffic, shelf]
48 | centroids = []
49 | tracked_person = []
50 | person_id = 0
51 | interested = 0
52 | not_interested = 0
53 | db_client = None
54 | myriad_plugin = None
55 | Point = namedtuple("Point", "x,y")
56 | accepted_devices = ['CPU', 'GPU', 'MYRIAD', 'HETERO:FPGA,CPU', 'HDDL']
57 | is_async_mode = True
58 | template_dir = os.path.abspath('../templates')
59 |
60 | class Centroid:
61 | """
62 | Store centroid details of the face detected for tracking
63 | """
64 |
65 | def __init__(self, p_id, point, gone_count):
66 | self.id = p_id
67 | self.point = point
68 | self.gone_count = gone_count
69 |
70 |
71 | class Person:
72 | """
73 | Store the data of the people for tracking
74 | """
75 |
76 | def __init__(self, p_id, in_time):
77 | self.id = p_id
78 | self.counted = False
79 | self.gone = False
80 | self.in_time = in_time
81 | self.out_time = None
82 | self.looking = 0
83 | self.positive = 0
84 | self.negative = 0
85 | self.neutral = 0
86 | self.sentiment = ''
87 |
88 |
89 | class VideoCap:
90 | """
91 | Store the data and manage multiple input video feeds
92 | """
93 |
94 | def __init__(self, input_name, input_number, feed_type, labels=[]):
95 | self.vc = cv2.VideoCapture(input_name)
96 | self.input_number = input_number
97 | self.type = feed_type
98 | self.infer_network = None
99 | self.nchw = []
100 | self.utime = time.time()
101 | self.curr_req = 0
102 | self.next_req = 1
103 |
104 | if self.type == 'shopper':
105 | self.nchw_hp = []
106 | self.nchw_md = []
107 | self.thresh = 0.7
108 |
109 | if self.type == 'shelf' or self.type == 'traffic':
110 | self.thresh = 0.145
111 | self.labels = labels
112 | self.labels_map = []
113 | self.last_correct_count = [0] * len(self.labels)
114 | self.total_count = [0] * len(self.labels)
115 | self.current_count = [0] * len(self.labels)
116 | self.changed_count = [False] * len(self.labels)
117 | self.candidate_count = [0] * len(self.labels)
118 | self.candidate_confidence = [0] * len(self.labels)
119 | self.CONF_CANDIDATE_CONFIDENCE = 6
120 |
121 | if self.type == 'traffic':
122 | self.mog = cv2.createBackgroundSubtractorMOG2()
123 | self.CONF_CANDIDATE_CONFIDENCE = 3
124 | self.thresh = 0.45
125 |
126 |
127 | def parse_conf_file():
128 | """
129 | Parse the configuration file and store the data in VideoCap object
130 |
131 | :return video_caps: List of VideoCap object containing input stream data
132 | """
133 | global CONFIG_FILE
134 | global check_feed_type
135 |
136 | video_caps = []
137 |
138 | assert os.path.isfile(CONFIG_FILE), "{} file doesn't exist".format(CONFIG_FILE)
139 | config = json.loads(open(CONFIG_FILE).read())
140 | for idx, item in enumerate(config['inputs']):
141 | labels = []
142 | parse_video = item['video']
143 | input_number = idx + 1
144 | if 'type' in item.keys():
145 | feed_type = item['type']
146 | if len(feed_type) == 0:
147 | print("Ignoring video {}... Format error".format(parse_video))
148 | continue
149 | if feed_type == 'shelf':
150 | check_feed_type[2] = True
151 | if 'label' in item.keys():
152 | labels = [item['label']]
153 | if len(labels) == 0:
154 | print("Ignoring video {}... Format error".format(parse_video))
155 | continue
156 | else:
157 | print("Format error while reading labels for {}".format(feed_type))
158 | continue
159 | elif feed_type == 'traffic':
160 | check_feed_type[1] = True
161 | labels = ['person']
162 | elif feed_type == 'shopper':
163 | check_feed_type[0] = True
164 | if parse_video.isdigit():
165 | video_cap = VideoCap(int(parse_video), input_number, feed_type, labels)
166 | else:
167 | assert os.path.isfile(parse_video), "{} doesn't exist".format(parse_video)
168 | video_cap = VideoCap(parse_video, input_number, feed_type, labels)
169 | video_cap.input_name = parse_video
170 | video_caps.append(video_cap)
171 | else:
172 | print("Feed type not specified for ", parse_video)
173 |
174 | for video_cap in video_caps:
175 | assert video_cap.vc.isOpened(), "Could not open {} for reading".format(video_cap.input_name)
176 | video_cap.input_width = video_cap.vc.get(3)
177 | video_cap.input_height = video_cap.vc.get(4)
178 | if video_cap.type == 'traffic':
179 | video_cap.accumulated_frame = np.zeros(
180 | (int(video_cap.input_height), int(video_cap.input_width)), np.uint8)
181 |
182 | return video_caps
183 |
184 |
185 | def load_model_device(infer_network, model, device, in_size, out_size, num_requests, cpu_extension, tag):
186 | """
187 | Loads the networks
188 |
189 | :param infer_network: Object of the Network() class
190 | :param model: .xml file of pre trained model
191 | :param device: Target device
192 | :param in_size: Number of input layers
193 | :param out_size: Number of output layers
194 | :param num_requests: Index of Infer request value. Limited to device capabilities
195 | :param cpu_extension: extension for the CPU device
196 | :return: Shape of input layer
197 | """
198 | if 'MULTI' not in device and device not in accepted_devices:
199 | print("Unsupported device: " + device)
200 | sys.exit(1)
201 | elif 'MULTI' in device:
202 | target_devices = device.split(':')[1].split(',')
203 | for multi_device in target_devices:
204 | if multi_device not in accepted_devices:
205 | print("Unsupported device: " + device)
206 | sys.exit(1)
207 |
208 | global myriad_plugin
209 | if device == 'MYRIAD':
210 | if myriad_plugin is None:
211 | myriad_plugin, (nchw) = infer_network.load_model(model, device, in_size, out_size, num_requests)
212 | else:
213 | nchw = infer_network.load_model(model, device, in_size, out_size, num_requests, plugin=myriad_plugin)[1]
214 | else:
215 | nchw = infer_network.load_model(model, device, in_size, out_size, num_requests, cpu_extension, tag)[1]
216 |
217 | return nchw
218 |
219 |
220 | def load_models(video_caps):
221 | """
222 | Load the required models
223 |
224 | :param video_caps: List of VideoCap objects
225 | :return: None
226 | """
227 | global check_feed_type
228 | plugin = None
229 |
230 | face_device = os.environ['FACE_DEVICE'] if 'FACE_DEVICE' in os.environ.keys() else "CPU"
231 | mood_device = os.environ['MOOD_DEVICE'] if 'MOOD_DEVICE' in os.environ.keys() else "CPU"
232 | pose_device = os.environ['POSE_DEVICE'] if 'POSE_DEVICE' in os.environ.keys() else "CPU"
233 | obj_device = os.environ['OBJ_DEVICE'] if 'OBJ_DEVICE' in os.environ.keys() else "CPU"
234 | person_device = os.environ['PERSON_DEVICE'] if 'PERSON_DEVICE' in os.environ.keys() else "CPU"
235 |
236 | cpu_extension = os.environ['CPU_EXTENSION'] if 'CPU_EXTENSION' in os.environ.keys() else None
237 | face_model = os.environ['FACE_MODEL'] if 'FACE_MODEL' in os.environ.keys() else None
238 | pose_model = os.environ['POSE_MODEL'] if 'POSE_MODEL' in os.environ.keys() else None
239 | mood_model = os.environ['MOOD_MODEL'] if 'MOOD_MODEL' in os.environ.keys() else None
240 | obj_model = os.environ['OBJ_MODEL'] if 'OBJ_MODEL' in os.environ.keys() else None
241 | person_model = os.environ['PERSON_MODEL'] if 'PERSON_MODEL' in os.environ.keys() else None
242 |
243 | # Check if one the feed type is "shopper". If yes, load the face, head pose and mood detection model
244 | if check_feed_type[0]:
245 | assert face_model, 'Please specify the path to face detection model using the environment variable FACE_MODEL'
246 | assert pose_model, 'Please specify the path to head pose model using the environment variable POSE_MODEL'
247 | assert mood_model, 'Please specify the path to mood detection model using the environment variable MOOD_MODEL'
248 |
249 | infer_network_face = Network()
250 | infer_network_pose = Network()
251 | infer_network_mood = Network()
252 |
253 | tag_face = {"VPU_HDDL_GRAPH_TAG":"tagFace"}
254 | tag_pose = {"VPU_HDDL_GRAPH_TAG":"tagPose"}
255 | tag_mood = {"VPU_HDDL_GRAPH_TAG":"tagMood"}
256 | nchw_fd = load_model_device(infer_network_face, face_model, face_device, 1, 1, 2, cpu_extension, tag_face)
257 | nchw_hp = load_model_device(infer_network_pose, pose_model, pose_device, 1, 3, 2, cpu_extension, tag_pose)
258 | nchw_md = load_model_device(infer_network_mood, mood_model, mood_device, 1, 1, 2, cpu_extension, tag_mood)
259 |
260 | # Check if one the feed type is "traffic" or "shelf". If yes, load the mobilenet-ssd model
261 | if check_feed_type[2]:
262 | infer_network = Network()
263 | tag_obj = {"VPU_HDDL_GRAPH_TAG":"tagMobile"}
264 | nchw = load_model_device(infer_network, obj_model, obj_device, 1, 1, 2, cpu_extension, tag_obj)
265 | if check_feed_type[1]:
266 | infer_network_person = Network()
267 | tag_person = {"VPU_HDDL_GRAPH_TAG":"tagPerson"}
268 | nchw_pr = load_model_device(infer_network_person, person_model, person_device, 2, 1, 2,
269 | cpu_extension, tag_person)
270 | for video_cap in video_caps:
271 | if video_cap.type == 'shopper':
272 | video_cap.infer_network = infer_network_face
273 | video_cap.infer_network_hp = infer_network_pose
274 | video_cap.infer_network_md = infer_network_mood
275 | video_cap.nchw.extend(nchw_fd)
276 | video_cap.nchw_hp.extend(nchw_hp)
277 | video_cap.nchw_md.extend(nchw_md)
278 |
279 | if video_cap.type == 'shelf':
280 | video_cap.infer_network = infer_network
281 | video_cap.nchw.extend(nchw)
282 | if video_cap.type == 'traffic':
283 | video_cap.infer_network = infer_network_person
284 | video_cap.nchw.extend(nchw_pr)
285 |
286 |
287 | def object_detection(video_cap, res):
288 | """
289 | Parse the inference result to get the detected object
290 |
291 | :param video_cap: VideoCap object of the frame on which object is detected
292 | :param res: Inference output
293 | :return obj_det: List of coordinates of bounding boxes of the objects detected
294 | """
295 | obj_det = []
296 |
297 | for obj in res[0][0]:
298 | label = int(obj[1]) - 1
299 |
300 | # Draw objects only when probability is more than specified threshold
301 | if obj[2] > video_cap.thresh:
302 | # If the feed type is traffic shelf, look only for the person
303 | if video_cap.type == 'traffic' and label == 0:
304 | video_cap.current_count[label] += 1
305 |
306 | # If the feed type is traffic or shelf, look only for the objects specified by the user
307 | if video_cap.type == 'shelf':
308 | if label not in video_cap.labels_map:
309 | continue
310 | label_idx = video_cap.labels_map.index(label)
311 | video_cap.current_count[label_idx] += 1
312 |
313 | if obj[3] < 0:
314 | obj[3] = 0
315 | if obj[4] < 0:
316 | obj[4] = 0
317 |
318 | xmin = int(obj[3] * video_cap.input_width)
319 | ymin = int(obj[4] * video_cap.input_height)
320 | xmax = int(obj[5] * video_cap.input_width)
321 | ymax = int(obj[6] * video_cap.input_height)
322 | obj_det.append([xmin, ymin, xmax, ymax])
323 |
324 | return obj_det
325 |
326 |
327 | def get_used_labels(video_caps):
328 | """
329 | Read the model's label file and get the position of labels required by the application
330 |
331 | :param video_caps: List of VideoCap objects
332 | :return labels: List of labels present in the label file
333 | """
334 | global check_feed_type
335 |
336 | if check_feed_type[1] is False and check_feed_type[2] is False:
337 | return
338 |
339 | label_file = os.environ['LABEL_FILE'] if 'LABEL_FILE' in os.environ.keys() else None
340 | assert label_file, "Please specify the path label file using the environmental variable LABEL_FILE"
341 | assert os.path.isfile(label_file), "{} file doesn't exist".format(label_file)
342 | with open(label_file, 'r') as label_file:
343 | labels = [x.strip() for x in label_file]
344 |
345 | assert labels != [], "No labels found in {} file".format(label_file)
346 | for video_cap in video_caps:
347 | if video_cap.type == 'shelf' or video_cap.type == 'traffic':
348 | for label in video_cap.labels:
349 | if label in labels:
350 | label_idx = labels.index(label)
351 | video_cap.labels_map.append(label_idx)
352 | else:
353 | video_cap.labels_map.append(False)
354 |
355 | return labels
356 |
357 |
358 | def process_output(video_cap):
359 | """
360 | Count the number of object detected
361 |
362 | :param video_cap: VideoCap object
363 | :return: None
364 | """
365 | for i in range(len(video_cap.labels)):
366 | if video_cap.candidate_count[i] == video_cap.current_count[i]:
367 | video_cap.candidate_confidence[i] += 1
368 | else:
369 | video_cap.candidate_confidence[i] = 0
370 | video_cap.candidate_count[i] = video_cap.current_count[i]
371 |
372 | if video_cap.candidate_confidence[i] == video_cap.CONF_CANDIDATE_CONFIDENCE:
373 | video_cap.candidate_confidence[i] = 0
374 | video_cap.changed_count[i] = True
375 | else:
376 | continue
377 | if video_cap.current_count[i] > video_cap.last_correct_count[i]:
378 | video_cap.total_count[i] += video_cap.current_count[i] - video_cap.last_correct_count[i]
379 |
380 | video_cap.last_correct_count[i] = video_cap.current_count[i]
381 |
382 |
383 | def remove_centroid(p_id):
384 | """
385 | Remove the centroid from the "centroids" list when the person is out of the frame and
386 | set the person.gone variable as true
387 |
388 | :param p_id: ID of the person whose centroid data has to be deleted
389 | :return: None
390 | """
391 | global centroids
392 | global tracked_person
393 |
394 | for idx, centroid in enumerate(centroids):
395 | if centroid.id is p_id:
396 | del centroids[idx]
397 | break
398 |
399 | if tracked_person[p_id]:
400 | tracked_person[p_id].gone = True
401 | tracked_person[p_id].out_time = time.time()
402 |
403 |
404 | def add_centroid(point):
405 | """
406 | Add the centroid of the object to the "centroids" list
407 |
408 | :param point: Centroid point to be added
409 | :return: None
410 | """
411 | global person_id
412 | global centroids
413 | global tracked_person
414 |
415 | centroid = Centroid(person_id, point, gone_count=0)
416 | person = Person(person_id, time.time())
417 | centroids.append(centroid)
418 | tracked_person.append(person)
419 | person_id += 1
420 |
421 |
422 | def closest_centroid(point):
423 | """
424 | Find the closest centroid
425 |
426 | :param point: Coordinate of the point for which the closest centroid point has to be detected
427 | :return p_idx: Id of the closest centroid
428 | dist: Distance of point from the closest centroid
429 | """
430 | global centroids
431 | p_idx = 0
432 | dist = sys.float_info.max
433 |
434 | for idx, centroid in enumerate(centroids):
435 | _point = centroid.point
436 | dx = point.x - _point.x
437 | dy = point.y - _point.y
438 | _dist = math.sqrt(dx * dx + dy * dy)
439 | if _dist < dist:
440 | dist = _dist
441 | p_idx = centroid.id
442 |
443 | return [p_idx, dist]
444 |
445 |
446 | def update_centroid(points, looking, sentiment, fps):
447 | """
448 | Update the centroid data in the centroids list and check whether the person is interested or not interested
449 |
450 | :param points: List of centroids of the faces detected
451 | :param looking: List of bool values indicating if the person is looking at the camera or not
452 | :param sentiment: List containing the mood of the people looking at the camera
453 | :param fps: FPS of the input stream
454 | :return: None
455 | """
456 | global MAX_FRAME_GONE
457 | global INTEREST_COUNT_TIME
458 | global interested
459 | global not_interested
460 | global centroids
461 | global tracked_person
462 |
463 | if len(points) is 0:
464 | for idx, centroid in enumerate(centroids):
465 | centroid.gone_count += 1
466 | if centroid.gone_count > MAX_FRAME_GONE:
467 | remove_centroid(centroid.id)
468 |
469 | if not centroids:
470 | for idx, point in enumerate(points):
471 | add_centroid(point)
472 | else:
473 | checked_points = len(points) * [None]
474 | checked_points_dist = len(points) * [None]
475 | for idx, point in enumerate(points):
476 | p_id, dist = closest_centroid(point)
477 | if dist > CENTROID_DISTANCE:
478 | continue
479 |
480 | if p_id in checked_points:
481 | p_idx = checked_points.index(p_id)
482 | if checked_points_dist[p_idx] > dist:
483 | checked_points[p_idx] = None
484 | checked_points_dist[p_idx] = None
485 |
486 | checked_points[idx] = p_id
487 | checked_points_dist[idx] = dist
488 |
489 | for centroid in centroids:
490 | if centroid.id in checked_points:
491 | p_idx = checked_points.index(centroid.id)
492 | centroid.point = points[p_idx]
493 | centroid.gone_count = 0
494 | else:
495 | centroid.gone_count += 1
496 | if centroid.gone_count > MAX_FRAME_GONE:
497 | remove_centroid(centroid.id)
498 |
499 | for idx in range(len(checked_points)):
500 | if checked_points[idx] is None:
501 | add_centroid(points[idx])
502 | else:
503 | if looking[idx] is True:
504 | tracked_person[checked_points[idx]].sentiment = sentiment[idx]
505 | tracked_person[checked_points[idx]].looking += 1
506 | if sentiment[idx] == "happy" or sentiment[idx] == "surprise":
507 | tracked_person[checked_points[idx]].positive += 1
508 | elif sentiment[idx] == 'sad' or sentiment[idx] == 'anger':
509 | tracked_person[checked_points[idx]].negative += 1
510 | elif sentiment[idx] == 'neutral':
511 | tracked_person[checked_points[idx]].neutral += 1
512 | else:
513 | tracked_person[checked_points[idx]].sentiment = "Not looking"
514 |
515 | for person in tracked_person:
516 | if person.counted is False:
517 | positive = person.positive + person.neutral
518 |
519 | # If the person is looking at the camera for specified time
520 | # and his mood is positive, increment the interested variable
521 | if (person.looking > fps * INTEREST_COUNT_TIME) and (positive > person.negative):
522 | interested += 1
523 | person.counted = True
524 |
525 | # If the person is gone out of the frame, increment the not_interested variable
526 | if person.gone is True:
527 | not_interested += 1
528 | person.counted = True
529 |
530 |
531 | def detect_head_pose_and_emotions(video_cap, object_det, cur_request_id_sh, next_request_id_sh):
532 | """
533 | Detect the head pose and emotions of the faces detected
534 |
535 | :param video_cap: VideoCap object
536 | :param object_det: List of faces detected in the frame
537 | :return: None
538 | """
539 |
540 | global SENTIMENT_LABEL
541 | global centroids
542 | global is_async_mode
543 |
544 | frame_centroids = []
545 | looking = []
546 | sentiment = []
547 |
548 | for face in object_det:
549 | xmin, ymin, xmax, ymax = face
550 |
551 | # Find the centroid of the face
552 | width = xmax - xmin
553 | height = ymax - ymin
554 | x = xmin + int(width / 2)
555 | y = ymin + int(height / 2)
556 | point = Point(x, y)
557 | frame_centroids.append(point)
558 |
559 | # Check the head pose
560 | if is_async_mode:
561 | head_pose = video_cap.next_frame[ymin:ymax, xmin:xmax]
562 | in_frame = cv2.resize(head_pose, (video_cap.nchw_hp[3], video_cap.nchw_hp[2]))
563 | in_frame = in_frame.transpose((2, 0, 1))
564 | in_frame = in_frame.reshape((video_cap.nchw_hp[0], video_cap.nchw_hp[1],
565 | video_cap.nchw_hp[2], video_cap.nchw_hp[3]))
566 |
567 | video_cap.infer_network_hp.exec_net(video_cap.next_req, in_frame)
568 | else:
569 | head_pose = video_cap.frame[ymin:ymax, xmin:xmax]
570 | in_frame = cv2.resize(head_pose, (video_cap.nchw_hp[3], video_cap.nchw_hp[2]))
571 | in_frame = in_frame.transpose((2, 0, 1))
572 | in_frame = in_frame.reshape((video_cap.nchw_hp[0], video_cap.nchw_hp[1],
573 | video_cap.nchw_hp[2], video_cap.nchw_hp[3]))
574 |
575 | video_cap.infer_network_hp.exec_net(video_cap.curr_req, in_frame)
576 |
577 | if video_cap.infer_network_hp.wait(video_cap.curr_req) == 0:
578 |
579 | # Parse head pose detection results
580 | angle_p_fc = video_cap.infer_network_hp.get_output(video_cap.curr_req, "angle_p_fc")
581 | angle_y_fc = video_cap.infer_network_hp.get_output(video_cap.curr_req, "angle_y_fc")
582 |
583 | # Check if the person is looking at the camera
584 | if (angle_y_fc > -22.5) & (angle_y_fc < 22.5) & (angle_p_fc > -22.5) & (angle_p_fc < 22.5):
585 | looking.append(True)
586 |
587 | # Find the emotions of the person
588 | in_frame = cv2.resize(head_pose, (video_cap.nchw_md[3], video_cap.nchw_md[2]))
589 | in_frame = in_frame.transpose((2, 0, 1))
590 | in_frame = in_frame.reshape((video_cap.nchw_md[0], video_cap.nchw_md[1],
591 | video_cap.nchw_md[2], video_cap.nchw_md[3]))
592 | video_cap.infer_network_md.exec_net(0, in_frame)
593 | video_cap.infer_network_md.wait(0)
594 | res = video_cap.infer_network_md.get_output(0)
595 | emotions = np.argmax(res)
596 | sentiment.append(SENTIMENT_LABEL[emotions])
597 | else:
598 | looking.append(False)
599 | sentiment.append(-1)
600 |
601 | if is_async_mode:
602 | video_cap.curr_req, video_cap.next_req = video_cap.next_req, video_cap.curr_req
603 | video_cap.frame = video_cap.next_frame
604 | update_centroid(frame_centroids, looking, sentiment, video_cap.vc.get(cv2.CAP_PROP_FPS))
605 | for idx, centroid in enumerate(centroids):
606 | cv2.rectangle(video_cap.frame, (centroid.point.x, centroid.point.y),
607 | (centroid.point.x + 1, centroid.point.y + 1), (0, 255, 0), 4, 16)
608 | cv2.putText(video_cap.frame, "person:{}".format(centroid.id), (centroid.point.x + 1, centroid.point.y - 5),
609 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
610 |
611 |
612 | def heatmap_generation(video_cap):
613 | """
614 | Generates the heatmap
615 |
616 | :param video_cap: VideoCap of input feed for which the heatmap has to be generated
617 | :return: None
618 | """
619 | # Convert to grayscale
620 | gray = cv2.cvtColor(video_cap.frame, cv2.COLOR_BGR2GRAY)
621 |
622 | # Remove the background
623 | fgbgmask = video_cap.mog.apply(gray)
624 |
625 | # Threshold the image
626 | thresh = 2
627 | max_value = 2
628 | threshold_frame = cv2.threshold(fgbgmask, thresh, max_value, cv2.THRESH_BINARY)[1]
629 |
630 | # Add threshold image to the accumulated image
631 | video_cap.accumulated_frame = cv2.add(threshold_frame, video_cap.accumulated_frame)
632 | colormap_frame = cv2.applyColorMap(video_cap.accumulated_frame, cv2.COLORMAP_HOT)
633 | video_cap.frame = cv2.addWeighted(video_cap.frame, 0.6, colormap_frame, 0.4, 0)
634 |
635 |
636 | def update_info_shopper(video_cap):
637 | """
638 | Send "shopper" data to InfluxDB
639 |
640 | :param video_cap: List of VideoCap object
641 | :return: None
642 | """
643 | global tracked_person
644 | global interested
645 | global not_interested
646 | global db_client
647 |
648 | json_body = [{
649 | "measurement": "{}_interest".format(video_cap.type),
650 | "fields": {
651 | "time": time.time(),
652 | "Interested": interested,
653 | "Not Interested": not_interested,
654 | "Total Count": len(tracked_person)
655 | }
656 | }]
657 | db_client.write_points(json_body)
658 | for person in tracked_person:
659 | if person.gone is False:
660 | tm = time.time() - person.in_time
661 | looking_time = person.looking / video_cap.vc.get(cv2.CAP_PROP_FPS)
662 | json_body = [{
663 | "measurement": "{}_duration".format(video_cap.type),
664 | "fields": {
665 | "person": person.id,
666 | "Looking time": looking_time,
667 | "Time in frame": tm,
668 | "Current Mood": person.sentiment
669 | }
670 | }]
671 | db_client.write_points(json_body)
672 |
673 |
674 | def update_info_object(labels, video_cap):
675 | """
676 | Send "traffic" and "shelf" data to InfluxDB
677 |
678 | :param labels: List of labels present in label file
679 | :param video_cap: VideoCap object
680 | :return: None
681 | """
682 | global db_client
683 |
684 | for idx, label in enumerate(video_cap.labels_map):
685 | json_body = [
686 | {"measurement": video_cap.type,
687 | "tags": {
688 | "object": labels[label],
689 | },
690 | "fields": {
691 | "time": time.time(),
692 | "Current Count": video_cap.current_count[idx],
693 | "Total Count": video_cap.total_count[idx],
694 | }
695 | }]
696 | db_client.write_points(json_body)
697 |
698 |
699 | def create_database():
700 | """
701 | Connect to InfluxDB and create the database
702 |
703 | :return: None
704 | """
705 | global db_client
706 | global IPADDRESS
707 | IPADDRESS = os.environ['DB_IPADDRESS'] if 'DB_IPADDRESS' in os.environ.keys() else "localhost"
708 | proxy = {"http": "http://{}:{}".format(IPADDRESS, PORT)}
709 | db_client = InfluxDBClient(host=IPADDRESS, port=PORT, proxies=proxy, database=DATABASE_NAME)
710 | db_client.create_database(DATABASE_NAME)
711 |
712 |
713 | def retail_analytics():
714 | """
715 | Detect objects on multiple input video feeds and process the output
716 |
717 | :return: None
718 | """
719 |
720 | global centroids
721 | global tracked_person
722 | global db_client
723 | global is_async_mode, cur_request_id, next_request_id
724 |
725 | objdetect = []
726 | log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
727 | logger = log.getLogger()
728 |
729 | video_caps = parse_conf_file()
730 | assert len(video_caps) != 0, "No input source given in Configuration file"
731 | flag = os.environ['FLAG'] if 'FLAG' in os.environ.keys() else "async"
732 | load_models(video_caps)
733 | labels = get_used_labels(video_caps)
734 | create_database()
735 |
736 | min_fps = min([i.vc.get(cv2.CAP_PROP_FPS) for i in video_caps])
737 | no_more_data = [False] * len(video_caps)
738 | frames = [None] * len(video_caps)
739 | start_time = time.time()
740 | if flag == "async":
741 | is_async_mode = True
742 | print('Application running in async mode')
743 | else:
744 | is_async_mode = False
745 | print('Application running in sync mode')
746 | cur_request_id_tr = 0
747 | next_request_id_tr = 1
748 | cur_request_id = 0
749 | next_request_id = 1
750 | cur_request_id_sf = 0
751 | next_request_id_sf = 1
752 | cur_request_id_sh = 0
753 | next_request_id_sh = 1
754 | input_blob = ["data", "im_info"]
755 | det_time = 0
756 | cur_request_id = 0
757 |
758 | # Main loop for object detection in multiple video streams
759 | while True:
760 | for idx, video_cap in enumerate(video_caps):
761 | vfps = int(round(video_cap.vc.get(cv2.CAP_PROP_FPS)))
762 | for i in range(0, int(round(vfps / min_fps))):
763 | if is_async_mode:
764 | ret, video_cap.next_frame = video_cap.vc.read()
765 | else:
766 | ret, video_cap.frame = video_cap.vc.read()
767 |
768 | # If no new frame or error in reading the frame, exit the loop
769 | if not ret:
770 | no_more_data[idx] = True
771 | break
772 |
773 | if video_cap.type == 'traffic' or video_cap.type == 'shelf':
774 | video_cap.current_count = [0] * len(video_cap.labels)
775 | video_cap.changed_count = [False] * len(video_cap.labels)
776 | inf_start = time.time()
777 | if is_async_mode:
778 | in_frame = cv2.resize(video_cap.next_frame, (video_cap.nchw[3], video_cap.nchw[2]))
779 | in_frame = in_frame.transpose((2, 0, 1))
780 | in_frame = in_frame.reshape((video_cap.nchw[0], video_cap.nchw[1], video_cap.nchw[2], video_cap.nchw[3]))
781 | if video_cap.type == 'traffic':
782 | video_cap.infer_network.exec_net(next_request_id_tr, in_frame, input_blob, video_cap.vc.get(3), video_cap.vc.get(4))
783 | cur_request_id = cur_request_id_tr
784 | elif video_cap.type == 'shelf':
785 | video_cap.infer_network.exec_net(next_request_id_sf, in_frame)
786 | cur_request_id = cur_request_id_sf
787 | else:
788 | video_cap.infer_network.exec_net(next_request_id_sh, in_frame)
789 | cur_request_id = cur_request_id_sh
790 | video_cap.frame = video_cap.next_frame
791 | else:
792 | in_frame = cv2.resize(video_cap.frame, (video_cap.nchw[3], video_cap.nchw[2]))
793 | in_frame = in_frame.transpose((2, 0, 1))
794 | in_frame = in_frame.reshape((video_cap.nchw[0], video_cap.nchw[1], video_cap.nchw[2], video_cap.nchw[3]))
795 | if video_cap.type == 'traffic':
796 | video_cap.infer_network.exec_net(cur_request_id_tr, in_frame, input_blob, video_cap.vc.get(3), video_cap.vc.get(4))
797 | elif video_cap.type == 'shelf':
798 | video_cap.infer_network.exec_net(cur_request_id_sf, in_frame)
799 | else:
800 | video_cap.infer_network.exec_net(cur_request_id_sh, in_frame)
801 | if video_cap.infer_network.wait(cur_request_id) == 0:
802 | inf_end = time.time()
803 | det_time = inf_end - inf_start
804 |
805 | # Pass the frame to the inference engine and get the results
806 | res = video_cap.infer_network.get_output(cur_request_id)
807 |
808 | # Process the result obtained from the inference engine
809 | object_det = object_detection(video_cap, res)
810 |
811 | # If the feed type is "traffic" or "shelf", check the current and total count of the object
812 | if video_cap.type == 'traffic' or video_cap.type == 'shelf':
813 | process_output(video_cap)
814 |
815 | # If feed type is "traffic", generate the heatmap
816 | if video_cap.type == 'traffic':
817 | heatmap_generation(video_cap)
818 |
819 | # Send the data to InfluxDB
820 | if time.time() >= video_cap.utime + 1:
821 | update_info_object(labels, video_cap)
822 | video_cap.utime = time.time()
823 |
824 | else:
825 | # Detect head pose and emotions of the faces detected
826 | detect_head_pose_and_emotions(video_cap, object_det, cur_request_id_sh, next_request_id_sh)
827 |
828 | # Send the data to InfluxDB
829 | if time.time() >= video_cap.utime + 1:
830 | update_info_shopper(video_cap)
831 | video_cap.utime = time.time()
832 |
833 | if is_async_mode:
834 | if video_cap.type == 'traffic':
835 | cur_request_id_tr, next_request_id_tr = next_request_id_tr, cur_request_id_tr
836 | elif video_cap.type == 'shopper':
837 | cur_request_id_sh, next_request_id_sh = next_request_id_sh, cur_request_id_sh
838 | else:
839 | cur_request_id_sf, next_request_id_sf = next_request_id_sf, cur_request_id_sf
840 |
841 | fps_time = time.time() - start_time
842 | fps_message = "FPS: {:.3f} fps".format(1 / fps_time)
843 | start_time = time.time()
844 | inf_time_message = "Inference time: N\A for async mode" if is_async_mode else\
845 | "Inference time: {:.3f} ms".format(det_time * 1000)
846 | cv2.putText(video_cap.frame, inf_time_message, (10, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5,
847 | (200, 10, 10), 1)
848 | cv2.putText(video_cap.frame, fps_message, (10, int(video_cap.input_height) - 10),
849 | cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
850 |
851 | # If no new frame, continue to the next input feed
852 | if no_more_data[idx] is True:
853 | continue
854 |
855 | # Print the results on the frame and stream it
856 | message = "Feed Type: {}".format(video_cap.type)
857 | cv2.putText(video_cap.frame, message, (10, 30),
858 | cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
859 |
860 | if video_cap.type == 'traffic' or video_cap.type == 'shelf':
861 | ht = 50
862 | for indx, label in enumerate(video_cap.labels_map):
863 | message = "{} -> Total Count: {}, Current Count: {}".format(labels[label],
864 | video_cap.total_count[indx],
865 | video_cap.current_count[indx])
866 | cv2.putText(video_cap.frame, message, (10, ht), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
867 | ht += 20
868 | else:
869 | message = "Face -> Total Count: {}, Current Count: {}".format(len(tracked_person), len(centroids))
870 | cv2.putText(video_cap.frame, message, (10, 50), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
871 | ht = 75
872 | for person in tracked_person:
873 | if person.gone is False:
874 | message = "Person {} is {}".format(person.id, person.sentiment)
875 | cv2.putText(video_cap.frame, message, (10, ht), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
876 | ht += 20
877 |
878 | frames[idx] = video_cap.frame
879 |
880 | # Resize the processed frames to stream on Grafana
881 | for idx, img in enumerate(frames):
882 | frames[idx] = cv2.resize(img, (480, 360))
883 |
884 | # Encode the frames into a memory buffer.
885 | ret, img = cv2.imencode('.jpg', np.hstack(frames))
886 | img = img.tobytes()
887 |
888 | # Yield the output frame to the server
889 | yield (b'--frame\r\n'
890 | b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n\r\n')
891 |
892 | # If no more frames, exit the loop
893 | if False not in no_more_data:
894 | break
895 |
896 |
897 | # Create object for Flask class
898 | app = Flask(__name__, template_folder=template_dir)
899 | app.logger.disabled = True
900 | log_ = log.getLogger('werkzeug')
901 | log_.disabled = True
902 |
903 |
904 | # Trigger the index() function on opening "0.0.0.0:5000/" URL
905 | @app.route('/')
906 | def index():
907 | """
908 | Trigger the index() function on opening "0.0.0.0:5000/" URL
909 | :return: html file
910 | """
911 | return render_template('index.html')
912 |
913 |
914 | # Trigger the video_feed() function on opening "0.0.0.0:5000/video_feed" URL
915 | @app.route('/video_feed')
916 | def video_feed():
917 | """
918 | Trigger the video_feed() function on opening "0.0.0.0:5000/video_feed" URL
919 | :return:
920 | """
921 | return Response(retail_analytics(), mimetype='multipart/x-mixed-replace; boundary=frame')
922 |
923 |
924 | if __name__ == '__main__':
925 | app.run(host='0.0.0.0')
926 |
927 |
--------------------------------------------------------------------------------
/application/smart_retail_analytics.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Copyright (c) 2018 Intel Corporation.
4 | Permission is hereby granted, free of charge, to any person obtaining
5 | a copy of this software and associated documentation files (the
6 | "Software"), to deal in the Software without restriction, including
7 | without limitation the rights to use, copy, modify, merge, publish,
8 | distribute, sublicense, and/or sell copies of the Software, and to
9 | permit persons to whom the Software is furnished to do so, subject to
10 | the following conditions:
11 | The above copyright notice and this permission notice shall be
12 | included in all copies or substantial portions of the Software.
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
15 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
17 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
18 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
19 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 | """
21 |
22 | import os
23 | import sys
24 | import math
25 | import time
26 | from collections import namedtuple
27 | from argparse import ArgumentParser
28 | import logging as log
29 | import numpy as np
30 | from inference import Network
31 | from influxdb import InfluxDBClient
32 | from flask import Flask, render_template, Response
33 | import cv2
34 | import json
35 |
36 | # Constants
37 | CONFIG_FILE = "../resources/config.json"
38 | MAX_FRAME_GONE = 3
39 | INTEREST_COUNT_TIME = 5
40 | SENTIMENT_LABEL = ['neutral', 'happy', 'sad', 'surprise', 'anger']
41 | IPADDRESS = "localhost"
42 | PORT = 8086
43 | DATABASE_NAME = "Retail_Analytics"
44 | CENTROID_DISTANCE = 150
45 |
46 | # Global variables
47 | check_feed_type = [False, False, False] # [shopper, traffic, shelf]
48 | centroids = []
49 | tracked_person = []
50 | person_id = 0
51 | interested = 0
52 | not_interested = 0
53 | db_client = None
54 | myriad_plugin = None
55 | Point = namedtuple("Point", "x,y")
56 | accepted_devices = ['CPU', 'GPU', 'MYRIAD', 'HETERO:FPGA,CPU', 'HDDL']
57 | is_async_mode = True
58 | template_dir = os.path.abspath('../templates')
59 |
60 |
61 | class Centroid:
62 | """
63 | Store centroid details of the face detected for tracking
64 | """
65 | def __init__(self, p_id, point, gone_count):
66 | self.id = p_id
67 | self.point = point
68 | self.gone_count = gone_count
69 |
70 |
71 | class Person:
72 | """
73 | Store the data of the people for tracking
74 | """
75 | def __init__(self, p_id, in_time):
76 | self.id = p_id
77 | self.counted = False
78 | self.gone = False
79 | self.in_time = in_time
80 | self.out_time = None
81 | self.looking = 0
82 | self.positive = 0
83 | self.negative = 0
84 | self.neutral = 0
85 | self.sentiment = ''
86 |
87 |
88 | class VideoCap:
89 | """
90 | Store the data and manage multiple input video feeds
91 | """
92 | def __init__(self, input_name, input_number, feed_type, labels=None):
93 | self.vc = cv2.VideoCapture(input_name)
94 | self.input_number = input_number
95 | self.type = feed_type
96 | self.infer_network = None
97 | self.utime = time.time()
98 | self.nchw = []
99 | self.curr_req = 0
100 | self.next_req = 1
101 | if self.type == 'shopper':
102 | self.nchw_hp = []
103 | self.nchw_md = []
104 | self.thresh = 0.7
105 |
106 | if self.type == 'shelf' or self.type == 'traffic':
107 | self.thresh = 0.145
108 | self.labels = labels
109 | self.labels_map = []
110 | self.last_correct_count = [0] * len(self.labels)
111 | self.total_count = [0] * len(self.labels)
112 | self.current_count = [0] * len(self.labels)
113 | self.changed_count = [False] * len(self.labels)
114 | self.candidate_count = [0] * len(self.labels)
115 | self.candidate_confidence = [0] * len(self.labels)
116 | self.CONF_CANDIDATE_CONFIDENCE = 6
117 | if self.type == 'traffic':
118 | self.nchw_pr = []
119 | self.mog = cv2.createBackgroundSubtractorMOG2()
120 | self.CONF_CANDIDATE_CONFIDENCE = 3
121 | self.thresh = 0.45
122 |
123 |
124 | def args_parser():
125 | """
126 | Parse command line arguments.
127 |
128 | :return: Command line arguments
129 | """
130 | parser = ArgumentParser()
131 | parser.add_argument("-fm", "--facemodel", help="Path to an .xml file with a pre-trained face detection model")
132 | parser.add_argument("-pm", "--posemodel", help="Path to an .xml file with a pre-trained model head pose model")
133 | parser.add_argument("-mm", "--moodmodel", help="Path to an .xml file with a pre-trained model "
134 | "mood detection model")
135 | parser.add_argument("-om", "--objmodel", help="Path to an .xml file with a pre-trained model"
136 | " object detection model")
137 | parser.add_argument("-pr", "--personmodel", help="Path to an .xml file with a pre-trained model "
138 | "person detection model")
139 | parser.add_argument("-l", "--cpu_extension", type=str, default=None,
140 | help="MKLDNN (CPU)-targeted custom layers. Absolute "
141 | "path to a shared library with the kernels impl.")
142 | parser.add_argument("-d_fm", "--facedevice", default="CPU", type=str,
143 | help="Specify the target device for face detection model to infer on; "
144 | "CPU, GPU, FPGA or MYRIAD is acceptable. To run with multiple devices use "
145 | "MULTI:,,etc. Looks" "for a suitable plugin for device specified"
146 | "(CPU by default)")
147 | parser.add_argument("-d_pm", "--posedevice", default="CPU", type=str,
148 | help="Specify the target device for head pose model to infer on; "
149 | "CPU, GPU, FPGA or MYRIAD is acceptable. To run with multiple devices use "
150 | "MULTI:,,etc. Looks" "for a suitable plugin for device specified"
151 | "(CPU by default)")
152 | parser.add_argument("-d_mm", "--mooddevice", default="CPU", type=str,
153 | help="Specify the target device for mood model to infer on; "
154 | "CPU, GPU, FPGA or MYRIAD is acceptable. To run with multiple devices use "
155 | "MULTI:,,etc. Looks" "for a suitable plugin for device specified"
156 | "(CPU by default)")
157 | parser.add_argument("-d_om", "--objectdevice", default="CPU", type=str,
158 | help="Specify the target device for object detection model to infer on; "
159 | "CPU, GPU, FPGA or MYRIAD is acceptable. To run with multiple devices use "
160 | "MULTI:,,etc. Looks" "for a suitable plugin for device specified"
161 | "(CPU by default)")
162 | parser.add_argument("-d_pd", "--persondevice", default="CPU", type=str,
163 | help="Specify the target device for person detection model to infer on; "
164 | "CPU, GPU, FPGA or MYRIAD is acceptable. To run with multiple devices use "
165 | "MULTI:,,etc. Looks" "for a suitable plugin for device specified"
166 | "(CPU by default)")
167 | parser.add_argument("-lb", "--labels", help="Labels mapping file", default=None, type=str)
168 | parser.add_argument("-f", "--flag", help="sync or async", default="async", type=str)
169 | parser.add_argument("-ip", "--db_ipaddress", help="InfluxDB container IP address", default="localhost")
170 | args = parser.parse_args()
171 | return args
172 |
173 |
174 | def check_args(args):
175 | """
176 | Validate the command line arguments
177 |
178 | :param args: Command line arguments
179 | :return: None
180 | """
181 | global check_feed_type
182 | global IPADDRESS
183 |
184 | if check_feed_type[0] is True:
185 | assert args.facemodel, 'Please specify the path to face detection model using the argument --facemodel or -fm'
186 | assert args.posemodel, 'Please specify the path to head pose model using the argument --posemodel or -pm'
187 | assert args.moodmodel, 'Please specify the path to mood detection model using the argument --moodmodel or -mm'
188 |
189 | if check_feed_type[1] is True:
190 | assert args.personmodel, 'Please specify the path to person detection model using the argument ' \
191 | '--personmodel or -pr'
192 |
193 | if check_feed_type[2] is True:
194 | assert args.objmodel, 'Please specify the path to object detection model using the argument --objmodel or -om'
195 | assert args.labels, 'Please specify the path label file using the argument --labels or -lb'
196 |
197 | IPADDRESS = args.db_ipaddress
198 |
199 |
200 | def parse_conf_file():
201 | """
202 | Parse the configuration file and store the data in VideoCap object
203 |
204 | :return video_caps: List of VideoCap object containing input stream data
205 | """
206 | global CONFIG_FILE
207 | global check_feed_type
208 |
209 | video_caps = []
210 |
211 | assert os.path.isfile(CONFIG_FILE), "{} file doesn't exist".format(CONFIG_FILE)
212 | config = json.loads(open(CONFIG_FILE).read())
213 | for idx, item in enumerate(config['inputs']):
214 | labels = []
215 | parse_video = item['video']
216 | input_number = idx + 1
217 | if 'type' in item.keys():
218 | feed_type = item['type']
219 | if len(feed_type) == 0:
220 | print("Ignoring video {}... Format error".format(parse_video))
221 | continue
222 | if feed_type == 'shelf':
223 | check_feed_type[2] = True
224 | if 'label' in item.keys():
225 | labels = [item['label']]
226 | if len(labels) == 0:
227 | print("Ignoring video {}... Format error".format(parse_video))
228 | continue
229 | else:
230 | print("Format error while reading labels for {}".format(feed_type))
231 | continue
232 | elif feed_type == 'traffic':
233 | check_feed_type[1] = True
234 | labels = ['person']
235 | elif feed_type == 'shopper':
236 | check_feed_type[0] = True
237 | if parse_video.isdigit():
238 | video_cap = VideoCap(int(parse_video), input_number, feed_type, labels)
239 | else:
240 | assert os.path.isfile(parse_video), "{} doesn't exist".format(parse_video)
241 | video_cap = VideoCap(parse_video, input_number, feed_type, labels)
242 | video_cap.input_name = parse_video
243 | video_caps.append(video_cap)
244 | else:
245 | print("Feed type not specified for ", parse_video)
246 |
247 | for video_cap in video_caps:
248 | assert video_cap.vc.isOpened(), "Could not open {} for reading".format(video_cap.input_name)
249 | video_cap.input_width = video_cap.vc.get(3)
250 | video_cap.input_height = video_cap.vc.get(4)
251 | if video_cap.type == 'traffic':
252 | video_cap.accumulated_frame = np.zeros(
253 | (int(video_cap.input_height), int(video_cap.input_width)), np.uint8)
254 |
255 | return video_caps
256 |
257 |
258 | def load_model_device(infer_network, model, device, in_size, out_size, num_requests, cpu_extension, tag):
259 | """
260 | Loads the networks
261 |
262 | :param infer_network: Object of the Network() class
263 | :param model: .xml file of pre trained model
264 | :param device: Target device
265 | :param in_size: Number of input layers
266 | :param out_size: Number of output layers
267 | :param num_requests: Index of Infer request value. Limited to device capabilities
268 | :param cpu_extension: extension for the CPU device
269 | :return: Shape of input layer
270 | """
271 | if 'MULTI' not in device and device not in accepted_devices:
272 | print("Unsupported device: " + device)
273 | sys.exit(1)
274 | elif 'MULTI' in device:
275 | target_devices = device.split(':')[1].split(',')
276 | for multi_device in target_devices:
277 | if multi_device not in accepted_devices:
278 | print("Unsupported device: " + device)
279 | sys.exit(1)
280 |
281 | global myriad_plugin
282 | if device == 'MYRIAD':
283 | if myriad_plugin is None:
284 | myriad_plugin, (nchw) = infer_network.load_model(model, device, in_size, out_size, num_requests)
285 | else:
286 | nchw = infer_network.load_model(model, device, in_size, out_size, num_requests, plugin=myriad_plugin)[1]
287 | else:
288 | nchw = infer_network.load_model(model, device, in_size, out_size, num_requests, cpu_extension, tag)[1]
289 |
290 | return nchw
291 |
292 |
293 | def load_models(video_caps, args):
294 | """
295 | Load the required models
296 |
297 | :param video_caps: List of VideoCap objects
298 | :param args: Command line arguments
299 | :return: None
300 | """
301 | global check_feed_type
302 |
303 | # Check if one of the feed type is "shopper". If yes, load the face, head pose and mood detection model
304 | if check_feed_type[0]:
305 | infer_network_face = Network()
306 | infer_network_pose = Network()
307 | infer_network_mood = Network()
308 | tag_face = {"VPU_HDDL_GRAPH_TAG": "tagFace"}
309 | tag_pose = {"VPU_HDDL_GRAPH_TAG": "tagPose"}
310 | tag_mood = {"VPU_HDDL_GRAPH_TAG": "tagMood"}
311 | nchw_fd = load_model_device(infer_network_face, args.facemodel, args.facedevice, 1, 1, 2,
312 | args.cpu_extension, tag_face)
313 | nchw_hp = load_model_device(infer_network_pose, args.posemodel, args.posedevice, 1, 3, 2,
314 | args.cpu_extension, tag_pose)
315 | nchw_md = load_model_device(infer_network_mood, args.moodmodel, args.mooddevice, 1, 1, 2,
316 | args.cpu_extension, tag_mood)
317 |
318 | if check_feed_type[2]:
319 | infer_network = Network()
320 | tag_obj = {"VPU_HDDL_GRAPH_TAG": "tagMobile"}
321 | nchw = load_model_device(infer_network, args.objmodel, args.objectdevice, 1, 1, 2, args.cpu_extension, tag_obj)
322 |
323 | if check_feed_type[1]:
324 | infer_network_person = Network()
325 | tag_person = {"VPU_HDDL_GRAPH_TAG": "tagPerson"}
326 | nchw_pr = load_model_device(infer_network_person, args.personmodel, args.persondevice, 2, 1, 2,
327 | args.cpu_extension, tag_person)
328 |
329 | for video_cap in video_caps:
330 | if video_cap.type == 'shopper':
331 | video_cap.infer_network = infer_network_face
332 | video_cap.infer_network_hp = infer_network_pose
333 | video_cap.infer_network_md = infer_network_mood
334 | video_cap.nchw.extend(nchw_fd)
335 | video_cap.nchw_hp.extend(nchw_hp)
336 | video_cap.nchw_md.extend(nchw_md)
337 |
338 | if video_cap.type == 'shelf':
339 | video_cap.infer_network = infer_network
340 | video_cap.nchw.extend(nchw)
341 |
342 | if video_cap.type == 'traffic':
343 | video_cap.infer_network = infer_network_person
344 | video_cap.nchw.extend(nchw_pr)
345 |
346 |
347 | def object_detection(video_cap, res):
348 | """
349 | Parse the inference result to get the detected object
350 |
351 | :param video_cap: VideoCap object of the frame on which the object is detected
352 | :param res: Inference output
353 | :return obj_det: List of coordinates of bounding boxes of the objects detected
354 | """
355 | obj_det = []
356 |
357 | for obj in res[0][0]:
358 | label = int(obj[1]) - 1
359 |
360 | # Draw only objects when probability is more than specified threshold
361 | if obj[2] > video_cap.thresh:
362 | # If the feed type is traffic shelf, look only for the person
363 | if video_cap.type == 'traffic' and label == 0:
364 | video_cap.current_count[label] += 1
365 |
366 | # If the feed type is traffic shelf, look only for the objects that are specified by the user
367 | if video_cap.type == 'shelf':
368 | if label not in video_cap.labels_map:
369 | continue
370 | label_idx = video_cap.labels_map.index(label)
371 | video_cap.current_count[label_idx] += 1
372 |
373 | if obj[3] < 0:
374 | obj[3] = 0
375 | if obj[4] < 0:
376 | obj[4] = 0
377 |
378 | xmin = int(obj[3] * video_cap.input_width)
379 | ymin = int(obj[4] * video_cap.input_height)
380 | xmax = int(obj[5] * video_cap.input_width)
381 | ymax = int(obj[6] * video_cap.input_height)
382 | obj_det.append([xmin, ymin, xmax, ymax])
383 | return obj_det
384 |
385 |
386 | def get_used_labels(video_caps, args):
387 | """
388 | Read the model's label file and get the position of labels required by the application
389 |
390 | :param video_caps: List of VideoCap objects
391 | :param args: Command line arguments
392 | :return labels: List of labels present in the label file
393 | """
394 | global check_feed_type
395 |
396 | if check_feed_type[1] is False and check_feed_type[2] is False:
397 | return
398 |
399 | assert os.path.isfile(args.labels), "{} file doesn't exist".format(args.labels)
400 | with open(args.labels, 'r') as label_file:
401 | labels = [x.strip() for x in label_file]
402 |
403 | assert labels != [], "No labels found in {} file".format(args.labels)
404 | for video_cap in video_caps:
405 | if video_cap.type == 'shelf' or video_cap.type == 'traffic':
406 | for label in video_cap.labels:
407 | if label in labels:
408 | label_idx = labels.index(label)
409 | video_cap.labels_map.append(label_idx)
410 | else:
411 | video_cap.labels_map.append(False)
412 |
413 | return labels
414 |
415 |
416 | def process_output(video_cap):
417 | """
418 | Count the number of object detected
419 |
420 | :param video_cap: VideoCap object
421 | :return: None
422 | """
423 | for i in range(len(video_cap.labels)):
424 | if video_cap.candidate_count[i] == video_cap.current_count[i]:
425 | video_cap.candidate_confidence[i] += 1
426 | else:
427 | video_cap.candidate_confidence[i] = 0
428 | video_cap.candidate_count[i] = video_cap.current_count[i]
429 |
430 | if video_cap.candidate_confidence[i] == video_cap.CONF_CANDIDATE_CONFIDENCE:
431 |
432 | video_cap.candidate_confidence[i] = 0
433 | video_cap.changed_count[i] = True
434 | else:
435 | continue
436 | if video_cap.current_count[i] > video_cap.last_correct_count[i]:
437 | video_cap.total_count[i] += video_cap.current_count[i] - video_cap.last_correct_count[i]
438 | video_cap.last_correct_count[i] = video_cap.current_count[i]
439 |
440 |
441 | def remove_centroid(p_id):
442 | """
443 | Remove the centroid from the "centroids" list when the person is out of the frame and
444 | set the person.gone variable as true
445 |
446 | :param p_id: ID of the person whose centroid data has to be deleted
447 | :return: None
448 | """
449 | global centroids
450 | global tracked_person
451 |
452 | for idx, centroid in enumerate(centroids):
453 | if centroid.id is p_id:
454 | del centroids[idx]
455 | break
456 |
457 | if tracked_person[p_id]:
458 | tracked_person[p_id].gone = True
459 | tracked_person[p_id].out_time = time.time()
460 |
461 |
462 | def add_centroid(point):
463 | """
464 | Add the centroid of the object to the "centroids" list
465 |
466 | :param point: Centroid point to be added
467 | :return: None
468 | """
469 | global person_id
470 | global centroids
471 | global tracked_person
472 |
473 | centroid = Centroid(person_id, point, gone_count=0)
474 | person = Person(person_id, time.time())
475 | centroids.append(centroid)
476 | tracked_person.append(person)
477 | person_id += 1
478 |
479 |
480 | def closest_centroid(point):
481 | """
482 | Find the closest centroid
483 |
484 | :param point: Coordinate of the point for which the closest centroid point has to be detected
485 | :return p_idx: Id of the closest centroid
486 | dist: Distance of point from the closest centroid
487 | """
488 | global centroids
489 | p_idx = 0
490 | dist = sys.float_info.max
491 |
492 | for idx, centroid in enumerate(centroids):
493 | _point = centroid.point
494 | dx = point.x - _point.x
495 | dy = point.y - _point.y
496 | _dist = math.sqrt(dx*dx + dy*dy)
497 | if _dist < dist:
498 | dist = _dist
499 | p_idx = centroid.id
500 |
501 | return [p_idx, dist]
502 |
503 |
504 | def update_centroid(points, looking, sentiment, fps):
505 | """
506 | Update the centroid data in the centroids list and check whether the person is interested or not interested
507 |
508 | :param points: List of centroids of the faces detected
509 | :param looking: List of bool values indicating if the person is looking at the camera or not
510 | :param sentiment: List containing the mood of the people looking at the camera
511 | :param fps: FPS of the input stream
512 | :return: None
513 | """
514 | global MAX_FRAME_GONE
515 | global INTEREST_COUNT_TIME
516 | global interested
517 | global not_interested
518 | global centroids
519 | global tracked_person
520 |
521 | if len(points) is 0:
522 | for idx, centroid in enumerate(centroids):
523 | centroid.gone_count += 1
524 | if centroid.gone_count > MAX_FRAME_GONE:
525 | remove_centroid(centroid.id)
526 |
527 | if not centroids:
528 | for idx, point in enumerate(points):
529 | add_centroid(point)
530 | else:
531 | checked_points = len(points) * [None]
532 | checked_points_dist = len(points) * [None]
533 | for idx, point in enumerate(points):
534 | p_id, dist = closest_centroid(point)
535 | if dist > CENTROID_DISTANCE:
536 | continue
537 |
538 | if p_id in checked_points:
539 | p_idx = checked_points.index(p_id)
540 | if checked_points_dist[p_idx] > dist:
541 | checked_points[p_idx] = None
542 | checked_points_dist[p_idx] = None
543 |
544 | checked_points[idx] = p_id
545 | checked_points_dist[idx] = dist
546 |
547 | for centroid in centroids:
548 | if centroid.id in checked_points:
549 | p_idx = checked_points.index(centroid.id)
550 | centroid.point = points[p_idx]
551 | centroid.gone_count = 0
552 | else:
553 | centroid.gone_count += 1
554 | if centroid.gone_count > MAX_FRAME_GONE:
555 | remove_centroid(centroid.id)
556 |
557 | for idx in range(len(checked_points)):
558 | if checked_points[idx] is None:
559 | add_centroid(points[idx])
560 | else:
561 | if looking[idx] is True:
562 | tracked_person[checked_points[idx]].sentiment = sentiment[idx]
563 | tracked_person[checked_points[idx]].looking += 1
564 | if sentiment[idx] == "happy" or sentiment[idx] == "surprise":
565 | tracked_person[checked_points[idx]].positive += 1
566 | elif sentiment[idx] == 'sad' or sentiment[idx] == 'anger':
567 | tracked_person[checked_points[idx]].negative += 1
568 | elif sentiment[idx] == 'neutral':
569 | tracked_person[checked_points[idx]].neutral += 1
570 | else:
571 | tracked_person[checked_points[idx]].sentiment = "Not looking"
572 |
573 | for person in tracked_person:
574 | if person.counted is False:
575 | positive = person.positive + person.neutral
576 |
577 | # If the person is looking at the camera for specified time
578 | # and his mood is positive, increment the interested variable
579 | if (person.looking > fps * INTEREST_COUNT_TIME) and (positive > person.negative):
580 | interested += 1
581 | person.counted = True
582 |
583 | # If the person is gone out of the frame, increment the not_interested variable
584 | if person.gone is True:
585 | not_interested += 1
586 | person.counted = True
587 |
588 |
589 | def detect_head_pose_and_emotions(video_cap, object_det):
590 | """
591 | Detect the head pose and emotions of the faces detected
592 |
593 | :param video_cap: VideoCap object
594 | :param object_det: List of faces detected in the frame
595 | :return: None
596 | """
597 |
598 | global SENTIMENT_LABEL
599 | global centroids
600 | global is_async_mode
601 |
602 | frame_centroids = []
603 | looking = []
604 | sentiment = []
605 |
606 | for face in object_det:
607 | xmin, ymin, xmax, ymax = face
608 |
609 | # Find the centroid of the face
610 | width = xmax - xmin
611 | height = ymax - ymin
612 | x = xmin + int(width / 2)
613 | y = ymin + int(height / 2)
614 | point = Point(x, y)
615 | frame_centroids.append(point)
616 |
617 | # Check the head pose
618 | if is_async_mode:
619 | head_pose = video_cap.next_frame[ymin:ymax, xmin:xmax]
620 | in_frame = cv2.resize(head_pose, (video_cap.nchw_hp[3], video_cap.nchw_hp[2]))
621 | in_frame = in_frame.transpose((2, 0, 1))
622 | in_frame = in_frame.reshape((video_cap.nchw_hp[0], video_cap.nchw_hp[1],
623 | video_cap.nchw_hp[2], video_cap.nchw_hp[3]))
624 |
625 | video_cap.infer_network_hp.exec_net(video_cap.next_req, in_frame)
626 | else:
627 | head_pose = video_cap.frame[ymin:ymax, xmin:xmax]
628 | in_frame = cv2.resize(head_pose, (video_cap.nchw_hp[3], video_cap.nchw_hp[2]))
629 | in_frame = in_frame.transpose((2, 0, 1))
630 | in_frame = in_frame.reshape((video_cap.nchw_hp[0], video_cap.nchw_hp[1],
631 | video_cap.nchw_hp[2], video_cap.nchw_hp[3]))
632 |
633 | video_cap.infer_network_hp.exec_net(video_cap.curr_req, in_frame)
634 | if video_cap.infer_network_hp.wait(video_cap.curr_req) == 0:
635 |
636 | # Parse head pose detection results
637 | angle_p_fc = video_cap.infer_network_hp.get_output(video_cap.curr_req, "angle_p_fc")
638 | angle_y_fc = video_cap.infer_network_hp.get_output(video_cap.curr_req, "angle_y_fc")
639 |
640 | # Check if the person is looking at the camera
641 | if (angle_y_fc > -22.5) & (angle_y_fc < 22.5) & (angle_p_fc > -22.5) & (angle_p_fc < 22.5):
642 | looking.append(True)
643 |
644 | # Find the emotions of the person
645 | in_frame = cv2.resize(head_pose, (video_cap.nchw_md[3], video_cap.nchw_md[2]))
646 | in_frame = in_frame.transpose((2, 0, 1))
647 | in_frame = in_frame.reshape((video_cap.nchw_md[0], video_cap.nchw_md[1],
648 | video_cap.nchw_md[2], video_cap.nchw_md[3]))
649 | if is_async_mode:
650 | video_cap.infer_network_md.exec_net(video_cap.next_req, in_frame)
651 | else:
652 | video_cap.infer_network_md.exec_net(video_cap.curr_req, in_frame)
653 | video_cap.infer_network_md.wait(video_cap.curr_req)
654 | res = video_cap.infer_network_md.get_output(video_cap.curr_req)
655 | emotions = np.argmax(res)
656 | sentiment.append(SENTIMENT_LABEL[emotions])
657 | else:
658 | looking.append(False)
659 | sentiment.append(-1)
660 | if is_async_mode:
661 | video_cap.curr_req, video_cap.next_req = video_cap.next_req, video_cap.curr_req
662 | video_cap.frame = video_cap.next_frame
663 | update_centroid(frame_centroids, looking, sentiment, video_cap.vc.get(cv2.CAP_PROP_FPS))
664 | for idx, centroid in enumerate(centroids):
665 | cv2.rectangle(video_cap.frame, (centroid.point.x, centroid.point.y),
666 | (centroid.point.x + 1, centroid.point.y + 1), (0, 255, 0), 4, 16)
667 | cv2.putText(video_cap.frame, "person:{}".format(centroid.id), (centroid.point.x + 1, centroid.point.y - 5),
668 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
669 |
670 |
671 | def heatmap_generation(video_cap):
672 | """
673 | Generates the heatmap
674 |
675 | :param video_cap: VideoCap of input feed for which the heatmap has to be generated
676 | :return: None
677 | """
678 | # Convert to grayscale
679 | gray = cv2.cvtColor(video_cap.frame, cv2.COLOR_BGR2GRAY)
680 |
681 | # Remove the background
682 | fgbgmask = video_cap.mog.apply(gray)
683 |
684 | # Threshold the image
685 | thresh = 2
686 | max_value = 2
687 | threshold_frame = cv2.threshold(fgbgmask, thresh, max_value, cv2.THRESH_BINARY)[1]
688 |
689 | # Add threshold image to the accumulated image
690 | video_cap.accumulated_frame = cv2.add(threshold_frame, video_cap.accumulated_frame)
691 | colormap_frame = cv2.applyColorMap(video_cap.accumulated_frame, cv2.COLORMAP_HOT)
692 | video_cap.frame = cv2.addWeighted(video_cap.frame, 0.6, colormap_frame, 0.4, 0)
693 |
694 |
695 | def update_info_shopper(video_cap):
696 | """
697 | Send "shopper" data to InfluxDB
698 |
699 | :param video_cap: List of VideoCap object
700 | :return: None
701 | """
702 | global tracked_person
703 | global interested
704 | global not_interested
705 | global db_client
706 | json_body = [{"measurement": "{}_interest".format(video_cap.type),
707 | "fields": {
708 | "time": time.time(),
709 | "Interested": interested,
710 | "Not Interested": not_interested,
711 | "Total Count": len(tracked_person)
712 | }
713 | }]
714 | db_client.write_points(json_body)
715 | for person in tracked_person:
716 | if person.gone is False:
717 | tm = time.time() - person.in_time
718 | looking_time = person.looking/video_cap.vc.get(cv2.CAP_PROP_FPS)
719 | json_body = [{
720 | "measurement": "{}_duration".format(video_cap.type),
721 | "fields": {
722 | "person": person.id,
723 | "Looking time": looking_time,
724 | "Time in frame": tm,
725 | "Current Mood": person.sentiment
726 | }
727 | }]
728 | db_client.write_points(json_body)
729 |
730 |
731 | def update_info_object(labels, video_cap):
732 | """
733 | Send "traffic" and "shelf" data to InfluxDB
734 |
735 | :param labels: List of labels present in label file
736 | :param video_cap: VideoCap object
737 | :return: None
738 | """
739 | global db_client
740 | for idx, label in enumerate(video_cap.labels_map):
741 | json_body = [
742 | {
743 | "measurement": video_cap.type,
744 | "tags": {
745 | "object": labels[label],
746 | },
747 | "fields": {
748 | "time": time.time(),
749 | "Current Count": video_cap.current_count[idx],
750 | "Total Count": video_cap.total_count[idx],
751 | }
752 | }]
753 | db_client.write_points(json_body)
754 |
755 |
756 | def create_database():
757 | """
758 | Connect to InfluxDB and create the database
759 |
760 | :return: None
761 | """
762 | global db_client
763 |
764 | proxy = {"http": "http://{}:{}".format(IPADDRESS, PORT)}
765 | db_client = InfluxDBClient(host=IPADDRESS, port=PORT, proxies=proxy, database=DATABASE_NAME)
766 | db_client.create_database(DATABASE_NAME)
767 |
768 |
769 | def retail_analytics():
770 | """
771 | Detect objects on multiple input video feeds and process the output
772 |
773 | :return: None
774 | """
775 | global centroids
776 | global tracked_person
777 | global db_client
778 |
779 | log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
780 | video_caps = parse_conf_file()
781 | assert len(video_caps) != 0, "No input source given in Configuration file"
782 | args = args_parser()
783 | check_args(args)
784 | load_models(video_caps, args)
785 | labels = get_used_labels(video_caps, args)
786 | create_database()
787 | min_fps = min([i.vc.get(cv2.CAP_PROP_FPS) for i in video_caps])
788 | no_more_data = [False] * len(video_caps)
789 | frames = [None] * len(video_caps)
790 | start_time = time.time()
791 | global is_async_mode
792 | if args.flag == "async":
793 | is_async_mode = True
794 | print('Application running in async mode')
795 | else:
796 | is_async_mode = False
797 | print('Application running in sync mode')
798 | cur_request_id_tr = 0
799 | next_request_id_tr = 1
800 | cur_request_id_sf = 0
801 | next_request_id_sf = 1
802 | cur_request_id_sh = 0
803 | next_request_id_sh = 1
804 | det_time = 0
805 | input_blob = ["data", "im_info"]
806 | cur_request_id = 0
807 | # Main loop for object detection in multiple video streams
808 | while True:
809 | for idx, video_cap in enumerate(video_caps):
810 | vfps = int(round(video_cap.vc.get(cv2.CAP_PROP_FPS)))
811 | for i in range(0, int(round(vfps / min_fps))):
812 | if is_async_mode:
813 | ret, video_cap.next_frame = video_cap.vc.read()
814 | else:
815 | ret, video_cap.frame = video_cap.vc.read()
816 |
817 | # If no new frame or error in reading the frame, exit the loop
818 | if not ret:
819 | no_more_data[idx] = True
820 | break
821 |
822 | if video_cap.type == 'traffic' or video_cap.type == 'shelf':
823 | video_cap.current_count = [0] * len(video_cap.labels)
824 | video_cap.changed_count = [False] * len(video_cap.labels)
825 | inf_start = time.time()
826 | if is_async_mode:
827 | in_frame = cv2.resize(video_cap.next_frame, (video_cap.nchw[3], video_cap.nchw[2]))
828 | in_frame = in_frame.transpose((2, 0, 1))
829 | in_frame = in_frame.reshape((video_cap.nchw[0], video_cap.nchw[1],
830 | video_cap.nchw[2], video_cap.nchw[3]))
831 | if video_cap.type == 'traffic':
832 | video_cap.infer_network.exec_net(next_request_id_tr, in_frame,
833 | input_blob, video_cap.vc.get(3), video_cap.vc.get(4))
834 | cur_request_id = cur_request_id_tr
835 | elif video_cap.type == 'shelf':
836 | video_cap.infer_network.exec_net(next_request_id_sf, in_frame)
837 | cur_request_id = cur_request_id_sf
838 | else:
839 | video_cap.infer_network.exec_net(next_request_id_sh, in_frame)
840 | cur_request_id = cur_request_id_sh
841 | video_cap.frame = video_cap.next_frame
842 | else:
843 | in_frame = cv2.resize(video_cap.frame, (video_cap.nchw[3], video_cap.nchw[2]))
844 | in_frame = in_frame.transpose((2, 0, 1))
845 | in_frame = in_frame.reshape((video_cap.nchw[0], video_cap.nchw[1],
846 | video_cap.nchw[2], video_cap.nchw[3]))
847 | if video_cap.type == 'traffic':
848 | video_cap.infer_network.exec_net(cur_request_id_tr, in_frame, input_blob,
849 | video_cap.vc.get(3), video_cap.vc.get(4))
850 | elif video_cap.type == 'shelf':
851 | video_cap.infer_network.exec_net(cur_request_id_sf, in_frame)
852 | else:
853 | video_cap.infer_network.exec_net(cur_request_id_sh, in_frame)
854 | if video_cap.infer_network.wait(cur_request_id) == 0:
855 | inf_end = time.time()
856 | det_time = inf_end - inf_start
857 |
858 | # Pass the frame to the inference engine and get the results
859 | res = video_cap.infer_network.get_output(cur_request_id)
860 |
861 | # Process the result obtained from the inference engine
862 | object_det = object_detection(video_cap, res)
863 |
864 | # If the feed type is "traffic" or "shelf", check the current and total count of the object
865 | if video_cap.type == 'traffic' or video_cap.type == 'shelf':
866 | process_output(video_cap)
867 |
868 | # If feed type is "traffic", generate the heatmap
869 | if video_cap.type == 'traffic':
870 | heatmap_generation(video_cap)
871 |
872 | # Send the data to InfluxDB
873 | if time.time() >= video_cap.utime + 1:
874 | update_info_object(labels, video_cap)
875 | video_cap.utime = time.time()
876 |
877 | else:
878 | # Detect head pose and emotions of the faces detected
879 | detect_head_pose_and_emotions(video_cap, object_det)
880 |
881 | # Send the data to InfluxDB
882 | if time.time() >= video_cap.utime + 1:
883 | update_info_shopper(video_cap)
884 | video_cap.utime = time.time()
885 | if is_async_mode:
886 | if video_cap.type == 'traffic':
887 | cur_request_id_tr, next_request_id_tr = next_request_id_tr, cur_request_id_tr
888 | elif video_cap.type == 'shopper':
889 | cur_request_id_sh, next_request_id_sh = next_request_id_sh, cur_request_id_sh
890 | else:
891 | cur_request_id_sf, next_request_id_sf = next_request_id_sf, cur_request_id_sf
892 |
893 | fps_time = time.time() - start_time
894 | fps_message = "FPS: {:.3f} fps".format(1 / fps_time)
895 | start_time = time.time()
896 | inf_time_message = "Inference time: N\A for async mode" if is_async_mode else\
897 | "Inference time: {:.3f} ms".format(det_time * 1000)
898 | cv2.putText(video_cap.frame, inf_time_message, (10, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5,
899 | (200, 10, 10), 1)
900 | cv2.putText(video_cap.frame, fps_message, (10, int(video_cap.input_height) - 10),
901 | cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
902 |
903 | # If no new frame, continue to the next input feed
904 | if no_more_data[idx] is True:
905 | continue
906 |
907 | # Print the results on the frame and stream it
908 | message = "Feed Type: {}".format(video_cap.type)
909 | cv2.putText(video_cap.frame, message, (10, 30),
910 | cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
911 |
912 | if video_cap.type == 'traffic' or video_cap.type == 'shelf':
913 | ht = 50
914 | for indx, label in enumerate(video_cap.labels_map):
915 | message = "{} -> Total Count: {}, Current Count: {}".format(labels[label],
916 | video_cap.total_count[indx],
917 | video_cap.current_count[indx])
918 | cv2.putText(video_cap.frame, message, (10, ht), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
919 | ht += 20
920 | else:
921 | message = "Face -> Total Count: {}, Current Count: {}".format(len(tracked_person), len(centroids))
922 | cv2.putText(video_cap.frame, message, (10, 50), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
923 | ht = 75
924 | for person in tracked_person:
925 | if person.gone is False:
926 | message = "Person {} is {}".format(person.id, person.sentiment)
927 | cv2.putText(video_cap.frame, message, (10, ht), cv2.FONT_HERSHEY_COMPLEX, 0.5,
928 | (200, 10, 10), 1)
929 | ht += 20
930 |
931 | frames[idx] = video_cap.frame
932 |
933 | # Resize the processed frames to stream on Grafana
934 | for idx, img in enumerate(frames):
935 | frames[idx] = cv2.resize(img, (480, 360))
936 |
937 | # Encode the frames into a memory buffer.
938 | ret, img = cv2.imencode('.jpg', np.hstack(frames))
939 | img = img.tobytes()
940 |
941 | # Yield the output frame to the server
942 | yield (b'--frame\r\n'
943 | b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n\r\n')
944 |
945 | # If no more frames, exit the loop
946 | if False not in no_more_data:
947 | break
948 |
949 |
950 | # Create object for Flask class
951 | app = Flask(__name__, template_folder=template_dir)
952 | app.logger.disabled = True
953 | log_ = log.getLogger('werkzeug')
954 | log_.disabled = True
955 |
956 | @app.route('/')
957 | def index():
958 | """
959 | Trigger the index() function on opening "0.0.0.0:5000/" URL
960 | :return: html file
961 | """
962 | return render_template('index.html')
963 |
964 |
965 | @app.route('/video_feed')
966 | def video_feed():
967 | """
968 | Trigger the video_feed() function on opening "0.0.0.0:5000/video_feed" URL
969 | :return:
970 | """
971 | return Response(retail_analytics(), mimetype='multipart/x-mixed-replace; boundary=frame')
972 |
973 |
974 | if __name__ == '__main__':
975 | app.run(host='0.0.0.0')
976 |
--------------------------------------------------------------------------------
/retail-analytics.json:
--------------------------------------------------------------------------------
1 | {
2 | "__inputs": [
3 | {
4 | "name": "DS_RETAIL_ANALYTICS",
5 | "label": "Retail_Analytics",
6 | "description": "",
7 | "type": "datasource",
8 | "pluginId": "influxdb",
9 | "pluginName": "InfluxDB"
10 | }
11 | ],
12 | "__requires": [
13 | {
14 | "type": "grafana",
15 | "id": "grafana",
16 | "name": "Grafana",
17 | "version": "5.3.2"
18 | },
19 | {
20 | "type": "panel",
21 | "id": "graph",
22 | "name": "Graph",
23 | "version": "5.0.0"
24 | },
25 | {
26 | "type": "datasource",
27 | "id": "influxdb",
28 | "name": "InfluxDB",
29 | "version": "5.0.0"
30 | },
31 | {
32 | "type": "panel",
33 | "id": "ryantxu-ajax-panel",
34 | "name": "AJAX",
35 | "version": "0.0.6"
36 | },
37 | {
38 | "type": "panel",
39 | "id": "singlestat",
40 | "name": "Singlestat",
41 | "version": "5.0.0"
42 | },
43 | {
44 | "type": "panel",
45 | "id": "table",
46 | "name": "Table",
47 | "version": "5.0.0"
48 | }
49 | ],
50 | "annotations": {
51 | "list": [
52 | {
53 | "builtIn": 1,
54 | "datasource": "-- Grafana --",
55 | "enable": true,
56 | "hide": true,
57 | "iconColor": "rgba(0, 211, 255, 1)",
58 | "name": "Annotations & Alerts",
59 | "type": "dashboard"
60 | }
61 | ]
62 | },
63 | "editable": true,
64 | "gnetId": null,
65 | "graphTooltip": 0,
66 | "id": null,
67 | "links": [],
68 | "panels": [
69 | {
70 | "datasource": null,
71 | "gridPos": {
72 | "h": 11,
73 | "w": 24,
74 | "x": 0,
75 | "y": 0
76 | },
77 | "header_js": "{}",
78 | "id": 54,
79 | "links": [],
80 | "method": "iframe",
81 | "mode": "html",
82 | "params_js": "{\n\n}",
83 | "request": "http",
84 | "responseType": "text",
85 | "showTime": false,
86 | "showTimeFormat": "LTS",
87 | "showTimePrefix": null,
88 | "showTimeValue": "request",
89 | "skipSameURL": false,
90 | "targets": [
91 | {}
92 | ],
93 | "templateResponse": true,
94 | "title": "Live Video Stream",
95 | "type": "ryantxu-ajax-panel",
96 | "url": "http://127.0.0.1:5000",
97 | "withCredentials": false
98 | },
99 | {
100 | "collapsed": false,
101 | "gridPos": {
102 | "h": 1,
103 | "w": 24,
104 | "x": 0,
105 | "y": 11
106 | },
107 | "id": 36,
108 | "panels": [],
109 | "title": "Shopper",
110 | "type": "row"
111 | },
112 | {
113 | "aliasColors": {},
114 | "bars": true,
115 | "dashLength": 10,
116 | "dashes": false,
117 | "datasource": "${DS_RETAIL_ANALYTICS}",
118 | "decimals": 0,
119 | "fill": 1,
120 | "gridPos": {
121 | "h": 10,
122 | "w": 5,
123 | "x": 0,
124 | "y": 12
125 | },
126 | "id": 56,
127 | "legend": {
128 | "alignAsTable": true,
129 | "avg": false,
130 | "current": true,
131 | "hideEmpty": false,
132 | "hideZero": false,
133 | "max": false,
134 | "min": false,
135 | "rightSide": false,
136 | "show": true,
137 | "total": false,
138 | "values": true
139 | },
140 | "lines": false,
141 | "linewidth": 1,
142 | "links": [],
143 | "nullPointMode": "null",
144 | "percentage": false,
145 | "pointradius": 5,
146 | "points": false,
147 | "renderer": "flot",
148 | "seriesOverrides": [],
149 | "spaceLength": 10,
150 | "stack": false,
151 | "steppedLine": false,
152 | "targets": [
153 | {
154 | "alias": "Interested",
155 | "groupBy": [
156 | {
157 | "params": [
158 | "$__interval"
159 | ],
160 | "type": "time"
161 | }
162 | ],
163 | "measurement": "shopper_interest",
164 | "orderByTime": "ASC",
165 | "policy": "default",
166 | "refId": "A",
167 | "resultFormat": "time_series",
168 | "select": [
169 | [
170 | {
171 | "params": [
172 | "Interested"
173 | ],
174 | "type": "field"
175 | },
176 | {
177 | "params": [],
178 | "type": "last"
179 | }
180 | ]
181 | ],
182 | "tags": []
183 | },
184 | {
185 | "alias": "Not Interested",
186 | "groupBy": [
187 | {
188 | "params": [
189 | "1s"
190 | ],
191 | "type": "time"
192 | },
193 | {
194 | "params": [
195 | "null"
196 | ],
197 | "type": "fill"
198 | }
199 | ],
200 | "measurement": "shopper_interest",
201 | "orderByTime": "ASC",
202 | "policy": "default",
203 | "refId": "B",
204 | "resultFormat": "time_series",
205 | "select": [
206 | [
207 | {
208 | "params": [
209 | "Not Interested"
210 | ],
211 | "type": "field"
212 | },
213 | {
214 | "params": [],
215 | "type": "last"
216 | }
217 | ]
218 | ],
219 | "tags": []
220 | }
221 | ],
222 | "thresholds": [],
223 | "timeFrom": null,
224 | "timeShift": null,
225 | "title": "Interest",
226 | "tooltip": {
227 | "shared": false,
228 | "sort": 0,
229 | "value_type": "individual"
230 | },
231 | "type": "graph",
232 | "xaxis": {
233 | "buckets": null,
234 | "mode": "series",
235 | "name": null,
236 | "show": true,
237 | "values": [
238 | "current"
239 | ]
240 | },
241 | "yaxes": [
242 | {
243 | "decimals": 0,
244 | "format": "short",
245 | "label": "",
246 | "logBase": 1,
247 | "max": null,
248 | "min": "0",
249 | "show": true
250 | },
251 | {
252 | "format": "short",
253 | "label": null,
254 | "logBase": 1,
255 | "max": null,
256 | "min": null,
257 | "show": true
258 | }
259 | ],
260 | "yaxis": {
261 | "align": false,
262 | "alignLevel": null
263 | }
264 | },
265 | {
266 | "columns": [],
267 | "datasource": "${DS_RETAIL_ANALYTICS}",
268 | "fontSize": "100%",
269 | "gridPos": {
270 | "h": 10,
271 | "w": 13,
272 | "x": 5,
273 | "y": 12
274 | },
275 | "hideTimeOverride": false,
276 | "id": 58,
277 | "links": [],
278 | "pageSize": null,
279 | "scroll": false,
280 | "showHeader": true,
281 | "sort": {
282 | "col": 0,
283 | "desc": true
284 | },
285 | "styles": [
286 | {
287 | "alias": "Time",
288 | "dateFormat": "YYYY-MM-DD HH:mm:ss",
289 | "pattern": "Time",
290 | "type": "date"
291 | },
292 | {
293 | "alias": "",
294 | "colorMode": null,
295 | "colors": [
296 | "rgba(245, 54, 54, 0.9)",
297 | "rgba(237, 129, 40, 0.89)",
298 | "rgba(50, 172, 45, 0.97)"
299 | ],
300 | "dateFormat": "YYYY-MM-DD HH:mm:ss",
301 | "decimals": 2,
302 | "mappingType": 1,
303 | "pattern": "Looking Duration",
304 | "thresholds": [],
305 | "type": "number",
306 | "unit": "s"
307 | },
308 | {
309 | "alias": "",
310 | "colorMode": null,
311 | "colors": [
312 | "rgba(245, 54, 54, 0.9)",
313 | "rgba(237, 129, 40, 0.89)",
314 | "rgba(50, 172, 45, 0.97)"
315 | ],
316 | "dateFormat": "YYYY-MM-DD HH:mm:ss",
317 | "decimals": 0,
318 | "link": false,
319 | "mappingType": 1,
320 | "pattern": "Person",
321 | "thresholds": [],
322 | "type": "number",
323 | "unit": "short"
324 | },
325 | {
326 | "alias": "",
327 | "colorMode": null,
328 | "colors": [
329 | "rgba(245, 54, 54, 0.9)",
330 | "rgba(237, 129, 40, 0.89)",
331 | "rgba(50, 172, 45, 0.97)"
332 | ],
333 | "dateFormat": "YYYY-MM-DD HH:mm:ss",
334 | "decimals": 2,
335 | "mappingType": 1,
336 | "pattern": "Duration in Frame",
337 | "thresholds": [],
338 | "type": "number",
339 | "unit": "s"
340 | },
341 | {
342 | "alias": "",
343 | "colorMode": null,
344 | "colors": [
345 | "rgba(245, 54, 54, 0.9)",
346 | "rgba(237, 129, 40, 0.89)",
347 | "rgba(50, 172, 45, 0.97)"
348 | ],
349 | "decimals": 2,
350 | "pattern": "/.*/",
351 | "thresholds": [],
352 | "type": "number",
353 | "unit": "short"
354 | }
355 | ],
356 | "targets": [
357 | {
358 | "alias": "Person ID",
359 | "groupBy": [
360 | {
361 | "params": [
362 | "$__interval"
363 | ],
364 | "type": "time"
365 | },
366 | {
367 | "params": [
368 | "none"
369 | ],
370 | "type": "fill"
371 | }
372 | ],
373 | "measurement": "shopper_duration",
374 | "orderByTime": "ASC",
375 | "policy": "default",
376 | "refId": "B",
377 | "resultFormat": "time_series",
378 | "select": [
379 | [
380 | {
381 | "params": [
382 | "person"
383 | ],
384 | "type": "field"
385 | },
386 | {
387 | "params": [],
388 | "type": "distinct"
389 | }
390 | ]
391 | ],
392 | "tags": []
393 | },
394 | {
395 | "alias": "Looking Duration",
396 | "groupBy": [
397 | {
398 | "params": [
399 | "$__interval"
400 | ],
401 | "type": "time"
402 | },
403 | {
404 | "params": [
405 | "none"
406 | ],
407 | "type": "fill"
408 | }
409 | ],
410 | "measurement": "shopper_duration",
411 | "orderByTime": "ASC",
412 | "policy": "default",
413 | "refId": "A",
414 | "resultFormat": "time_series",
415 | "select": [
416 | [
417 | {
418 | "params": [
419 | "Looking time"
420 | ],
421 | "type": "field"
422 | },
423 | {
424 | "params": [],
425 | "type": "last"
426 | }
427 | ]
428 | ],
429 | "tags": []
430 | },
431 | {
432 | "alias": "Duration in Frame",
433 | "groupBy": [
434 | {
435 | "params": [
436 | "$__interval"
437 | ],
438 | "type": "time"
439 | },
440 | {
441 | "params": [
442 | "none"
443 | ],
444 | "type": "fill"
445 | }
446 | ],
447 | "measurement": "shopper_duration",
448 | "orderByTime": "ASC",
449 | "policy": "default",
450 | "refId": "C",
451 | "resultFormat": "time_series",
452 | "select": [
453 | [
454 | {
455 | "params": [
456 | "Time in frame"
457 | ],
458 | "type": "field"
459 | },
460 | {
461 | "params": [],
462 | "type": "last"
463 | }
464 | ]
465 | ],
466 | "tags": []
467 | },
468 | {
469 | "alias": "Current Mood",
470 | "groupBy": [
471 | {
472 | "params": [
473 | "$__interval"
474 | ],
475 | "type": "time"
476 | },
477 | {
478 | "params": [
479 | "none"
480 | ],
481 | "type": "fill"
482 | }
483 | ],
484 | "measurement": "shopper_duration",
485 | "orderByTime": "ASC",
486 | "policy": "default",
487 | "refId": "D",
488 | "resultFormat": "time_series",
489 | "select": [
490 | [
491 | {
492 | "params": [
493 | "Current Mood"
494 | ],
495 | "type": "field"
496 | },
497 | {
498 | "params": [],
499 | "type": "last"
500 | }
501 | ]
502 | ],
503 | "tags": []
504 | }
505 | ],
506 | "timeFrom": "2s",
507 | "timeShift": null,
508 | "title": "Duration",
509 | "transform": "timeseries_to_columns",
510 | "type": "table"
511 | },
512 | {
513 | "cacheTimeout": null,
514 | "colorBackground": false,
515 | "colorValue": false,
516 | "colors": [
517 | "#d44a3a",
518 | "rgba(237, 129, 40, 0.89)",
519 | "#299c46"
520 | ],
521 | "datasource": "${DS_RETAIL_ANALYTICS}",
522 | "format": "none",
523 | "gauge": {
524 | "maxValue": 100,
525 | "minValue": 0,
526 | "show": true,
527 | "thresholdLabels": false,
528 | "thresholdMarkers": true
529 | },
530 | "gridPos": {
531 | "h": 4,
532 | "w": 5,
533 | "x": 18,
534 | "y": 12
535 | },
536 | "id": 60,
537 | "interval": null,
538 | "links": [],
539 | "mappingType": 1,
540 | "mappingTypes": [
541 | {
542 | "name": "value to text",
543 | "value": 1
544 | },
545 | {
546 | "name": "range to text",
547 | "value": 2
548 | }
549 | ],
550 | "maxDataPoints": 100,
551 | "nullPointMode": "connected",
552 | "nullText": null,
553 | "postfix": "",
554 | "postfixFontSize": "50%",
555 | "prefix": "",
556 | "prefixFontSize": "50%",
557 | "rangeMaps": [
558 | {
559 | "from": "null",
560 | "text": "N/A",
561 | "to": "null"
562 | }
563 | ],
564 | "sparkline": {
565 | "fillColor": "rgba(31, 118, 189, 0.18)",
566 | "full": false,
567 | "lineColor": "rgb(31, 120, 193)",
568 | "show": false
569 | },
570 | "tableColumn": "",
571 | "targets": [
572 | {
573 | "groupBy": [
574 | {
575 | "params": [
576 | "$__interval"
577 | ],
578 | "type": "time"
579 | },
580 | {
581 | "params": [
582 | "null"
583 | ],
584 | "type": "fill"
585 | }
586 | ],
587 | "measurement": "shopper_interest",
588 | "orderByTime": "ASC",
589 | "policy": "default",
590 | "refId": "A",
591 | "resultFormat": "time_series",
592 | "select": [
593 | [
594 | {
595 | "params": [
596 | "Total Count"
597 | ],
598 | "type": "field"
599 | },
600 | {
601 | "params": [],
602 | "type": "last"
603 | }
604 | ]
605 | ],
606 | "tags": []
607 | }
608 | ],
609 | "thresholds": "",
610 | "title": "Total Count",
611 | "type": "singlestat",
612 | "valueFontSize": "100%",
613 | "valueMaps": [
614 | {
615 | "op": "=",
616 | "text": "N/A",
617 | "value": "null"
618 | }
619 | ],
620 | "valueName": "current"
621 | },
622 | {
623 | "cacheTimeout": null,
624 | "colorBackground": false,
625 | "colorValue": false,
626 | "colors": [
627 | "#d44a3a",
628 | "rgba(237, 129, 40, 0.89)",
629 | "#299c46"
630 | ],
631 | "datasource": "${DS_RETAIL_ANALYTICS}",
632 | "format": "none",
633 | "gauge": {
634 | "maxValue": 100,
635 | "minValue": 0,
636 | "show": true,
637 | "thresholdLabels": false,
638 | "thresholdMarkers": true
639 | },
640 | "gridPos": {
641 | "h": 3,
642 | "w": 5,
643 | "x": 18,
644 | "y": 16
645 | },
646 | "id": 61,
647 | "interval": null,
648 | "links": [],
649 | "mappingType": 1,
650 | "mappingTypes": [
651 | {
652 | "name": "value to text",
653 | "value": 1
654 | },
655 | {
656 | "name": "range to text",
657 | "value": 2
658 | }
659 | ],
660 | "maxDataPoints": 100,
661 | "nullPointMode": "connected",
662 | "nullText": null,
663 | "postfix": "",
664 | "postfixFontSize": "50%",
665 | "prefix": "",
666 | "prefixFontSize": "50%",
667 | "rangeMaps": [
668 | {
669 | "from": "null",
670 | "text": "N/A",
671 | "to": "null"
672 | }
673 | ],
674 | "sparkline": {
675 | "fillColor": "rgba(31, 118, 189, 0.18)",
676 | "full": false,
677 | "lineColor": "rgb(31, 120, 193)",
678 | "show": false
679 | },
680 | "tableColumn": "",
681 | "targets": [
682 | {
683 | "groupBy": [
684 | {
685 | "params": [
686 | "$__interval"
687 | ],
688 | "type": "time"
689 | },
690 | {
691 | "params": [
692 | "null"
693 | ],
694 | "type": "fill"
695 | }
696 | ],
697 | "measurement": "shopper_interest",
698 | "orderByTime": "ASC",
699 | "policy": "default",
700 | "refId": "A",
701 | "resultFormat": "time_series",
702 | "select": [
703 | [
704 | {
705 | "params": [
706 | "Interested"
707 | ],
708 | "type": "field"
709 | },
710 | {
711 | "params": [],
712 | "type": "last"
713 | }
714 | ]
715 | ],
716 | "tags": []
717 | }
718 | ],
719 | "thresholds": "",
720 | "title": "People Interested",
721 | "type": "singlestat",
722 | "valueFontSize": "150%",
723 | "valueMaps": [
724 | {
725 | "op": "=",
726 | "text": "N/A",
727 | "value": "null"
728 | }
729 | ],
730 | "valueName": "current"
731 | },
732 | {
733 | "cacheTimeout": null,
734 | "colorBackground": false,
735 | "colorValue": false,
736 | "colors": [
737 | "#d44a3a",
738 | "rgba(237, 129, 40, 0.89)",
739 | "#299c46"
740 | ],
741 | "datasource": "${DS_RETAIL_ANALYTICS}",
742 | "format": "none",
743 | "gauge": {
744 | "maxValue": 100,
745 | "minValue": 0,
746 | "show": true,
747 | "thresholdLabels": false,
748 | "thresholdMarkers": true
749 | },
750 | "gridPos": {
751 | "h": 3,
752 | "w": 5,
753 | "x": 18,
754 | "y": 19
755 | },
756 | "id": 62,
757 | "interval": null,
758 | "links": [],
759 | "mappingType": 1,
760 | "mappingTypes": [
761 | {
762 | "name": "value to text",
763 | "value": 1
764 | },
765 | {
766 | "name": "range to text",
767 | "value": 2
768 | }
769 | ],
770 | "maxDataPoints": 100,
771 | "nullPointMode": "connected",
772 | "nullText": null,
773 | "postfix": "",
774 | "postfixFontSize": "50%",
775 | "prefix": "",
776 | "prefixFontSize": "50%",
777 | "rangeMaps": [
778 | {
779 | "from": "null",
780 | "text": "N/A",
781 | "to": "null"
782 | }
783 | ],
784 | "sparkline": {
785 | "fillColor": "rgba(31, 118, 189, 0.18)",
786 | "full": false,
787 | "lineColor": "rgb(31, 120, 193)",
788 | "show": false
789 | },
790 | "tableColumn": "",
791 | "targets": [
792 | {
793 | "groupBy": [
794 | {
795 | "params": [
796 | "$__interval"
797 | ],
798 | "type": "time"
799 | },
800 | {
801 | "params": [
802 | "null"
803 | ],
804 | "type": "fill"
805 | }
806 | ],
807 | "measurement": "shopper_interest",
808 | "orderByTime": "ASC",
809 | "policy": "default",
810 | "refId": "A",
811 | "resultFormat": "time_series",
812 | "select": [
813 | [
814 | {
815 | "params": [
816 | "Not Interested"
817 | ],
818 | "type": "field"
819 | },
820 | {
821 | "params": [],
822 | "type": "last"
823 | }
824 | ]
825 | ],
826 | "tags": []
827 | }
828 | ],
829 | "thresholds": "",
830 | "title": "People Not Interested",
831 | "type": "singlestat",
832 | "valueFontSize": "120%",
833 | "valueMaps": [
834 | {
835 | "op": "=",
836 | "text": "N/A",
837 | "value": "null"
838 | }
839 | ],
840 | "valueName": "current"
841 | },
842 | {
843 | "collapsed": false,
844 | "gridPos": {
845 | "h": 1,
846 | "w": 24,
847 | "x": 0,
848 | "y": 22
849 | },
850 | "id": 30,
851 | "panels": [],
852 | "title": "Store traffic",
853 | "type": "row"
854 | },
855 | {
856 | "cacheTimeout": null,
857 | "colorBackground": false,
858 | "colorValue": false,
859 | "colors": [
860 | "#d44a3a",
861 | "rgba(237, 129, 40, 0.89)",
862 | "#629e51"
863 | ],
864 | "datasource": "${DS_RETAIL_ANALYTICS}",
865 | "decimals": 0,
866 | "format": "none",
867 | "gauge": {
868 | "maxValue": 100,
869 | "minValue": 0,
870 | "show": true,
871 | "thresholdLabels": false,
872 | "thresholdMarkers": true
873 | },
874 | "gridPos": {
875 | "h": 4,
876 | "w": 5,
877 | "x": 0,
878 | "y": 23
879 | },
880 | "id": 40,
881 | "interval": null,
882 | "links": [],
883 | "mappingType": 1,
884 | "mappingTypes": [
885 | {
886 | "name": "value to text",
887 | "value": 1
888 | },
889 | {
890 | "name": "range to text",
891 | "value": 2
892 | }
893 | ],
894 | "maxDataPoints": 100,
895 | "nullPointMode": "connected",
896 | "nullText": null,
897 | "postfix": "",
898 | "postfixFontSize": "50%",
899 | "prefix": "",
900 | "prefixFontSize": "50%",
901 | "rangeMaps": [
902 | {
903 | "from": "null",
904 | "text": "N/A",
905 | "to": "null"
906 | }
907 | ],
908 | "sparkline": {
909 | "fillColor": "rgba(31, 118, 189, 0.18)",
910 | "full": false,
911 | "lineColor": "rgb(31, 120, 193)",
912 | "show": false
913 | },
914 | "tableColumn": "last",
915 | "targets": [
916 | {
917 | "alias": "Total Count",
918 | "groupBy": [
919 | {
920 | "params": [
921 | "$__interval"
922 | ],
923 | "type": "time"
924 | },
925 | {
926 | "params": [
927 | "null"
928 | ],
929 | "type": "fill"
930 | }
931 | ],
932 | "hide": false,
933 | "measurement": "traffic",
934 | "orderByTime": "ASC",
935 | "policy": "default",
936 | "query": "SELECT distinct(\"Total Count\") FROM \"traffic\" WHERE $timeFilter GROUP BY time(10s)",
937 | "rawQuery": false,
938 | "refId": "A",
939 | "resultFormat": "time_series",
940 | "select": [
941 | [
942 | {
943 | "params": [
944 | "Total Count"
945 | ],
946 | "type": "field"
947 | },
948 | {
949 | "params": [],
950 | "type": "distinct"
951 | }
952 | ]
953 | ],
954 | "tags": []
955 | }
956 | ],
957 | "thresholds": "",
958 | "title": "Number of people visited",
959 | "type": "singlestat",
960 | "valueFontSize": "110%",
961 | "valueMaps": [
962 | {
963 | "op": "=",
964 | "text": "N/A",
965 | "value": "null"
966 | }
967 | ],
968 | "valueName": "current"
969 | },
970 | {
971 | "aliasColors": {},
972 | "bars": false,
973 | "dashLength": 10,
974 | "dashes": false,
975 | "datasource": "${DS_RETAIL_ANALYTICS}",
976 | "decimals": 0,
977 | "description": "",
978 | "fill": 1,
979 | "gridPos": {
980 | "h": 8,
981 | "w": 11,
982 | "x": 5,
983 | "y": 23
984 | },
985 | "id": 48,
986 | "legend": {
987 | "avg": false,
988 | "current": true,
989 | "max": false,
990 | "min": false,
991 | "show": true,
992 | "total": false,
993 | "values": true
994 | },
995 | "lines": true,
996 | "linewidth": 1,
997 | "links": [],
998 | "nullPointMode": "null",
999 | "percentage": false,
1000 | "pointradius": 5,
1001 | "points": false,
1002 | "renderer": "flot",
1003 | "seriesOverrides": [],
1004 | "spaceLength": 10,
1005 | "stack": false,
1006 | "steppedLine": false,
1007 | "targets": [
1008 | {
1009 | "alias": "Number of People",
1010 | "groupBy": [
1011 | {
1012 | "params": [
1013 | "1s"
1014 | ],
1015 | "type": "time"
1016 | },
1017 | {
1018 | "params": [
1019 | "none"
1020 | ],
1021 | "type": "fill"
1022 | }
1023 | ],
1024 | "measurement": "traffic",
1025 | "orderByTime": "ASC",
1026 | "policy": "default",
1027 | "refId": "A",
1028 | "resultFormat": "time_series",
1029 | "select": [
1030 | [
1031 | {
1032 | "params": [
1033 | "Current Count"
1034 | ],
1035 | "type": "field"
1036 | },
1037 | {
1038 | "params": [],
1039 | "type": "distinct"
1040 | }
1041 | ]
1042 | ],
1043 | "tags": []
1044 | }
1045 | ],
1046 | "thresholds": [],
1047 | "timeFrom": null,
1048 | "timeShift": null,
1049 | "title": "Human Traffic in Store",
1050 | "tooltip": {
1051 | "shared": true,
1052 | "sort": 0,
1053 | "value_type": "individual"
1054 | },
1055 | "type": "graph",
1056 | "xaxis": {
1057 | "buckets": null,
1058 | "mode": "time",
1059 | "name": null,
1060 | "show": true,
1061 | "values": []
1062 | },
1063 | "yaxes": [
1064 | {
1065 | "decimals": 0,
1066 | "format": "short",
1067 | "label": null,
1068 | "logBase": 1,
1069 | "max": null,
1070 | "min": null,
1071 | "show": true
1072 | },
1073 | {
1074 | "format": "short",
1075 | "label": null,
1076 | "logBase": 1,
1077 | "max": null,
1078 | "min": null,
1079 | "show": true
1080 | }
1081 | ],
1082 | "yaxis": {
1083 | "align": false,
1084 | "alignLevel": null
1085 | }
1086 | },
1087 | {
1088 | "columns": [],
1089 | "datasource": "${DS_RETAIL_ANALYTICS}",
1090 | "fontSize": "100%",
1091 | "gridPos": {
1092 | "h": 8,
1093 | "w": 8,
1094 | "x": 16,
1095 | "y": 23
1096 | },
1097 | "id": 44,
1098 | "links": [],
1099 | "pageSize": null,
1100 | "scroll": true,
1101 | "showHeader": true,
1102 | "sort": {
1103 | "col": 0,
1104 | "desc": true
1105 | },
1106 | "styles": [
1107 | {
1108 | "alias": "Time",
1109 | "dateFormat": "YYYY-MM-DD HH:mm:ss",
1110 | "pattern": "Time",
1111 | "type": "date"
1112 | },
1113 | {
1114 | "alias": "",
1115 | "colorMode": null,
1116 | "colors": [
1117 | "rgba(245, 54, 54, 0.9)",
1118 | "rgba(237, 129, 40, 0.89)",
1119 | "rgba(50, 172, 45, 0.97)"
1120 | ],
1121 | "dateFormat": "YYYY-MM-DD HH:mm:ss",
1122 | "decimals": 0,
1123 | "mappingType": 1,
1124 | "pattern": "No of people present",
1125 | "thresholds": [],
1126 | "type": "number",
1127 | "unit": "short"
1128 | },
1129 | {
1130 | "alias": "",
1131 | "colorMode": null,
1132 | "colors": [
1133 | "rgba(245, 54, 54, 0.9)",
1134 | "rgba(237, 129, 40, 0.89)",
1135 | "rgba(50, 172, 45, 0.97)"
1136 | ],
1137 | "dateFormat": "YYYY-MM-DD HH:mm:ss",
1138 | "decimals": 0,
1139 | "mappingType": 1,
1140 | "pattern": "Total No of people visited",
1141 | "thresholds": [],
1142 | "type": "number",
1143 | "unit": "short"
1144 | },
1145 | {
1146 | "alias": "",
1147 | "colorMode": null,
1148 | "colors": [
1149 | "rgba(245, 54, 54, 0.9)",
1150 | "rgba(237, 129, 40, 0.89)",
1151 | "rgba(50, 172, 45, 0.97)"
1152 | ],
1153 | "decimals": 2,
1154 | "pattern": "/.*/",
1155 | "thresholds": [],
1156 | "type": "number",
1157 | "unit": "short"
1158 | }
1159 | ],
1160 | "targets": [
1161 | {
1162 | "alias": "No of people present",
1163 | "groupBy": [
1164 | {
1165 | "params": [
1166 | "1s"
1167 | ],
1168 | "type": "time"
1169 | },
1170 | {
1171 | "params": [
1172 | "none"
1173 | ],
1174 | "type": "fill"
1175 | }
1176 | ],
1177 | "measurement": "traffic",
1178 | "orderByTime": "ASC",
1179 | "policy": "default",
1180 | "refId": "A",
1181 | "resultFormat": "time_series",
1182 | "select": [
1183 | [
1184 | {
1185 | "params": [
1186 | "Current Count"
1187 | ],
1188 | "type": "field"
1189 | },
1190 | {
1191 | "params": [],
1192 | "type": "distinct"
1193 | }
1194 | ]
1195 | ],
1196 | "tags": []
1197 | },
1198 | {
1199 | "alias": "Total No of people visited",
1200 | "groupBy": [
1201 | {
1202 | "params": [
1203 | "1s"
1204 | ],
1205 | "type": "time"
1206 | },
1207 | {
1208 | "params": [
1209 | "null"
1210 | ],
1211 | "type": "fill"
1212 | }
1213 | ],
1214 | "measurement": "traffic",
1215 | "orderByTime": "ASC",
1216 | "policy": "default",
1217 | "refId": "B",
1218 | "resultFormat": "time_series",
1219 | "select": [
1220 | [
1221 | {
1222 | "params": [
1223 | "Total Count"
1224 | ],
1225 | "type": "field"
1226 | },
1227 | {
1228 | "params": [],
1229 | "type": "distinct"
1230 | }
1231 | ]
1232 | ],
1233 | "tags": []
1234 | }
1235 | ],
1236 | "title": "People Count",
1237 | "transform": "timeseries_to_columns",
1238 | "type": "table"
1239 | },
1240 | {
1241 | "cacheTimeout": null,
1242 | "colorBackground": false,
1243 | "colorValue": false,
1244 | "colors": [
1245 | "#d44a3a",
1246 | "rgba(237, 129, 40, 0.89)",
1247 | "#629e51"
1248 | ],
1249 | "datasource": "${DS_RETAIL_ANALYTICS}",
1250 | "decimals": 0,
1251 | "format": "none",
1252 | "gauge": {
1253 | "maxValue": 100,
1254 | "minValue": 0,
1255 | "show": true,
1256 | "thresholdLabels": false,
1257 | "thresholdMarkers": true
1258 | },
1259 | "gridPos": {
1260 | "h": 4,
1261 | "w": 5,
1262 | "x": 0,
1263 | "y": 27
1264 | },
1265 | "id": 52,
1266 | "interval": null,
1267 | "links": [],
1268 | "mappingType": 1,
1269 | "mappingTypes": [
1270 | {
1271 | "name": "value to text",
1272 | "value": 1
1273 | },
1274 | {
1275 | "name": "range to text",
1276 | "value": 2
1277 | }
1278 | ],
1279 | "maxDataPoints": 100,
1280 | "nullPointMode": "connected",
1281 | "nullText": null,
1282 | "postfix": "",
1283 | "postfixFontSize": "50%",
1284 | "prefix": "",
1285 | "prefixFontSize": "50%",
1286 | "rangeMaps": [
1287 | {
1288 | "from": "null",
1289 | "text": "N/A",
1290 | "to": "null"
1291 | }
1292 | ],
1293 | "sparkline": {
1294 | "fillColor": "rgba(31, 118, 189, 0.18)",
1295 | "full": false,
1296 | "lineColor": "rgb(31, 120, 193)",
1297 | "show": false
1298 | },
1299 | "tableColumn": "last",
1300 | "targets": [
1301 | {
1302 | "alias": "Total Count",
1303 | "groupBy": [
1304 | {
1305 | "params": [
1306 | "1s"
1307 | ],
1308 | "type": "time"
1309 | },
1310 | {
1311 | "params": [
1312 | "null"
1313 | ],
1314 | "type": "fill"
1315 | }
1316 | ],
1317 | "hide": false,
1318 | "measurement": "traffic",
1319 | "orderByTime": "ASC",
1320 | "policy": "default",
1321 | "query": "SELECT distinct(\"Total Count\") FROM \"traffic\" WHERE $timeFilter GROUP BY time(10s)",
1322 | "rawQuery": false,
1323 | "refId": "A",
1324 | "resultFormat": "time_series",
1325 | "select": [
1326 | [
1327 | {
1328 | "params": [
1329 | "Current Count"
1330 | ],
1331 | "type": "field"
1332 | },
1333 | {
1334 | "params": [],
1335 | "type": "distinct"
1336 | }
1337 | ]
1338 | ],
1339 | "tags": []
1340 | }
1341 | ],
1342 | "thresholds": "",
1343 | "title": "People currently present",
1344 | "type": "singlestat",
1345 | "valueFontSize": "110%",
1346 | "valueMaps": [
1347 | {
1348 | "op": "=",
1349 | "text": "N/A",
1350 | "value": "null"
1351 | }
1352 | ],
1353 | "valueName": "current"
1354 | },
1355 | {
1356 | "collapsed": false,
1357 | "gridPos": {
1358 | "h": 1,
1359 | "w": 24,
1360 | "x": 0,
1361 | "y": 31
1362 | },
1363 | "id": 22,
1364 | "panels": [],
1365 | "title": "Shelf",
1366 | "type": "row"
1367 | },
1368 | {
1369 | "cacheTimeout": null,
1370 | "colorBackground": false,
1371 | "colorPrefix": false,
1372 | "colorValue": false,
1373 | "colors": [
1374 | "#d44a3a",
1375 | "rgba(237, 129, 40, 0.89)",
1376 | "#299c46"
1377 | ],
1378 | "datasource": "${DS_RETAIL_ANALYTICS}",
1379 | "format": "none",
1380 | "gauge": {
1381 | "maxValue": 10,
1382 | "minValue": 0,
1383 | "show": true,
1384 | "thresholdLabels": false,
1385 | "thresholdMarkers": true
1386 | },
1387 | "gridPos": {
1388 | "h": 7,
1389 | "w": 5,
1390 | "x": 0,
1391 | "y": 32
1392 | },
1393 | "id": 50,
1394 | "interval": null,
1395 | "links": [],
1396 | "mappingType": 1,
1397 | "mappingTypes": [
1398 | {
1399 | "name": "value to text",
1400 | "value": 1
1401 | },
1402 | {
1403 | "name": "range to text",
1404 | "value": 2
1405 | }
1406 | ],
1407 | "maxDataPoints": 100,
1408 | "nullPointMode": "connected",
1409 | "nullText": null,
1410 | "postfix": "",
1411 | "postfixFontSize": "50%",
1412 | "prefix": "",
1413 | "prefixFontSize": "50%",
1414 | "rangeMaps": [
1415 | {
1416 | "from": "null",
1417 | "text": "N/A",
1418 | "to": "null"
1419 | }
1420 | ],
1421 | "sparkline": {
1422 | "fillColor": "rgba(31, 118, 189, 0.18)",
1423 | "full": false,
1424 | "lineColor": "rgb(31, 120, 193)",
1425 | "show": false
1426 | },
1427 | "tableColumn": "",
1428 | "targets": [
1429 | {
1430 | "alias": "No of Bottles",
1431 | "groupBy": [
1432 | {
1433 | "params": [
1434 | "1s"
1435 | ],
1436 | "type": "time"
1437 | },
1438 | {
1439 | "params": [
1440 | "null"
1441 | ],
1442 | "type": "fill"
1443 | }
1444 | ],
1445 | "measurement": "shelf",
1446 | "orderByTime": "ASC",
1447 | "policy": "default",
1448 | "refId": "A",
1449 | "resultFormat": "time_series",
1450 | "select": [
1451 | [
1452 | {
1453 | "params": [
1454 | "Current Count"
1455 | ],
1456 | "type": "field"
1457 | },
1458 | {
1459 | "params": [],
1460 | "type": "last"
1461 | }
1462 | ]
1463 | ],
1464 | "tags": []
1465 | }
1466 | ],
1467 | "thresholds": "2",
1468 | "title": "Bottles",
1469 | "type": "singlestat",
1470 | "valueFontSize": "80%",
1471 | "valueMaps": [
1472 | {
1473 | "op": "=",
1474 | "text": "N/A",
1475 | "value": "null"
1476 | }
1477 | ],
1478 | "valueName": "current"
1479 | },
1480 | {
1481 | "columns": [],
1482 | "datasource": "${DS_RETAIL_ANALYTICS}",
1483 | "fontSize": "100%",
1484 | "gridPos": {
1485 | "h": 7,
1486 | "w": 11,
1487 | "x": 5,
1488 | "y": 32
1489 | },
1490 | "hideTimeOverride": true,
1491 | "id": 10,
1492 | "links": [],
1493 | "pageSize": null,
1494 | "scroll": true,
1495 | "showHeader": true,
1496 | "sort": {
1497 | "col": 0,
1498 | "desc": true
1499 | },
1500 | "styles": [
1501 | {
1502 | "alias": "Time",
1503 | "dateFormat": "YYYY-MM-DD HH:mm:ss",
1504 | "pattern": "Time",
1505 | "type": "date"
1506 | },
1507 | {
1508 | "alias": "Number of bottles",
1509 | "colorMode": null,
1510 | "colors": [
1511 | "rgba(245, 54, 54, 0.9)",
1512 | "rgba(237, 129, 40, 0.89)",
1513 | "rgba(50, 172, 45, 0.97)"
1514 | ],
1515 | "dateFormat": "YYYY-MM-DD HH:mm:ss",
1516 | "decimals": 0,
1517 | "mappingType": 1,
1518 | "pattern": "shelf.Current Count",
1519 | "thresholds": [],
1520 | "type": "number",
1521 | "unit": "short"
1522 | },
1523 | {
1524 | "alias": "",
1525 | "colorMode": null,
1526 | "colors": [
1527 | "rgba(245, 54, 54, 0.9)",
1528 | "rgba(237, 129, 40, 0.89)",
1529 | "rgba(50, 172, 45, 0.97)"
1530 | ],
1531 | "decimals": 2,
1532 | "pattern": "/.*/",
1533 | "thresholds": [],
1534 | "type": "number",
1535 | "unit": "short"
1536 | }
1537 | ],
1538 | "targets": [
1539 | {
1540 | "alias": "",
1541 | "groupBy": [],
1542 | "measurement": "shelf",
1543 | "orderByTime": "ASC",
1544 | "policy": "default",
1545 | "query": "SELECT \"Current Count\" FROM \"shelf\" WHERE Object is \"bottle\"",
1546 | "rawQuery": false,
1547 | "refId": "A",
1548 | "resultFormat": "time_series",
1549 | "select": [
1550 | [
1551 | {
1552 | "params": [
1553 | "Current Count"
1554 | ],
1555 | "type": "field"
1556 | }
1557 | ]
1558 | ],
1559 | "tags": [
1560 | {
1561 | "key": "object",
1562 | "operator": "=",
1563 | "value": "bottle"
1564 | }
1565 | ]
1566 | }
1567 | ],
1568 | "timeFrom": "1h",
1569 | "title": "Information of objects",
1570 | "transform": "timeseries_to_columns",
1571 | "type": "table"
1572 | }
1573 | ],
1574 | "refresh": "5s",
1575 | "schemaVersion": 16,
1576 | "style": "dark",
1577 | "tags": [],
1578 | "templating": {
1579 | "list": []
1580 | },
1581 | "time": {
1582 | "from": "now-5m",
1583 | "to": "now"
1584 | },
1585 | "timepicker": {
1586 | "refresh_intervals": [
1587 | "5s",
1588 | "10s",
1589 | "30s",
1590 | "1m",
1591 | "5m",
1592 | "15m",
1593 | "30m",
1594 | "1h",
1595 | "2h",
1596 | "1d"
1597 | ],
1598 | "time_options": [
1599 | "5m",
1600 | "15m",
1601 | "1h",
1602 | "6h",
1603 | "12h",
1604 | "24h",
1605 | "2d",
1606 | "7d",
1607 | "30d"
1608 | ]
1609 | },
1610 | "timezone": "",
1611 | "title": "Retail Analytics",
1612 | "uid": "ASkE7Rqiz",
1613 | "version": 7
1614 | }
--------------------------------------------------------------------------------