├── .dockerignore ├── .gitignore ├── CrowdCounter_Usage.mp4 ├── Dockerfile_torchserve ├── LICENSE ├── README.md ├── config.properties ├── data └── occupantLoadFactor.csv ├── docker-compose.yml ├── dockerd-entrypoint.sh ├── docs ├── Database and Swagger Details.md ├── Datasets.md └── Model Creation and Serving.md ├── image_examples ├── IMG_4.jpg ├── crowd_01.jpg ├── img_1_64.json ├── sparse_crowd_01.jpg └── test_image.json ├── images └── Swagger.PNG ├── requirements.txt ├── requirements_torchserve_env.txt ├── service ├── .dockerignore ├── Dockerfile ├── __pycache__ │ ├── config.cpython-36.pyc │ ├── models.cpython-36.pyc │ ├── occupancytype.cpython-36.pyc │ ├── snapshot.cpython-36.pyc │ └── video.cpython-36.pyc ├── app.py ├── config.py ├── crowd_counter.db ├── models.py ├── occupancytype.py ├── snapshot.py └── video.py ├── start_docker_services.sh ├── start_services_gpu.sh ├── stop_docker_services.sh ├── stop_services_gpu.sh ├── swagger.yaml ├── ui ├── .dockerignore ├── .gitignore ├── Dockerfile ├── README.md ├── package-lock.json ├── package.json ├── public │ ├── favicon.ico │ ├── index.html │ ├── logo192.png │ ├── logo512.png │ ├── manifest.json │ └── robots.txt └── src │ ├── App.css │ ├── App.js │ ├── App.test.js │ ├── National-Fire-Protection-Association-Logo1.png │ ├── components │ ├── Card.js │ ├── Counter.js │ ├── Data.js │ ├── Graph.js │ ├── Grid.js │ ├── Occupancy.js │ ├── Player.css │ ├── Player.js │ └── logo_fprf.png │ ├── index.css │ ├── index.js │ ├── logo_fprf.png │ ├── serviceWorker.js │ └── setupTests.js └── utils ├── Combine_CrowdDataset.ipynb ├── Convert_CANNET_Pytorch_TF.ipynb ├── Convert_SSDCNET_Pytorch_TRF.ipynb ├── Finetune-Keras-vgg-mayub.ipynb ├── Generate_tf_serving_models.ipynb ├── Network.zip ├── Network ├── SSDCNet.py ├── __init__.py ├── base_Network_module.py ├── class_func.py └── merge_func.py ├── Test_SSDCNet_models.ipynb ├── Test_heatmap_and_TRTServer.ipynb ├── crdcnt_handler_sha_gpu.py ├── crdcnt_handler_shb_gpu.py ├── crowdmodel_sha.py ├── crowdmodel_shb.py ├── model_simple.onnx ├── pic.png └── test_onnx.py /.dockerignore: -------------------------------------------------------------------------------- 1 | # Files to ignore while building Docker image for the app 2 | docs* 3 | image* 4 | logs* 5 | utils* 6 | ui/node_modules/* 7 | service/__pycache__/* -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Log files 2 | logs* 3 | serving_models* 4 | torchserve_model_store* -------------------------------------------------------------------------------- /CrowdCounter_Usage.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/CrowdCounter_Usage.mp4 -------------------------------------------------------------------------------- /Dockerfile_torchserve: -------------------------------------------------------------------------------- 1 | # syntax = docker/dockerfile:experimental 2 | # 3 | # This file can build images for cpu and gpu env. By default it builds image for CPU. 4 | # Use following option to build image for cuda/GPU: --build-arg BASE_IMAGE=nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 5 | # Here is complete command for GPU/cuda - 6 | # $ DOCKER_BUILDKIT=1 docker build --file Dockerfile --build-arg BASE_IMAGE=nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 -t torchserve:latest . 7 | # 8 | # Following comments have been shamelessly copied from https://github.com/pytorch/pytorch/blob/master/Dockerfile 9 | # 10 | # NOTE: To build this you will need a docker version > 18.06 with 11 | # experimental enabled and DOCKER_BUILDKIT=1 12 | # 13 | # If you do not use buildkit you are not going to have a good time 14 | # 15 | # For reference: 16 | # https://docs.docker.com/develop/develop-images/build_enhancements/ 17 | 18 | 19 | ARG BASE_IMAGE=ubuntu:18.04 20 | 21 | FROM ${BASE_IMAGE} AS compile-image 22 | 23 | ENV PYTHONUNBUFFERED TRUE 24 | 25 | RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ 26 | apt-get update && \ 27 | DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ 28 | ca-certificates \ 29 | g++ \ 30 | python3-dev \ 31 | python3-distutils \ 32 | python3-venv \ 33 | openjdk-11-jre-headless \ 34 | curl \ 35 | && rm -rf /var/lib/apt/lists/* \ 36 | && cd /tmp \ 37 | && curl -O https://bootstrap.pypa.io/get-pip.py \ 38 | && python3 get-pip.py 39 | 40 | RUN python3 -m venv /home/venv 41 | 42 | ENV PATH="/home/venv/bin:$PATH" 43 | 44 | RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 45 | RUN update-alternatives --install /usr/local/bin/pip pip /usr/local/bin/pip3 1 46 | 47 | # This is only useful for cuda env 48 | RUN export USE_CUDA=1 49 | 50 | RUN pip install --no-cache-dir torchserve torch-model-archiver torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html 51 | 52 | # Final image for productions 53 | FROM ${BASE_IMAGE} AS runtime-image 54 | 55 | ENV PYTHONUNBUFFERED TRUE 56 | 57 | RUN --mount=type=cache,target=/var/cache/apt \ 58 | apt-get update && \ 59 | DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ 60 | python3 \ 61 | python3-distutils \ 62 | openjdk-11-jre-headless \ 63 | && rm -rf /var/lib/apt/lists/* \ 64 | && cd /tmp 65 | 66 | COPY --from=compile-image /home/venv /home/venv 67 | 68 | ENV PATH="/home/venv/bin:$PATH" 69 | 70 | RUN useradd -m model-server \ 71 | && mkdir -p /home/model-server/tmp 72 | 73 | COPY dockerd-entrypoint.sh /usr/local/bin/dockerd-entrypoint.sh 74 | 75 | RUN chmod +x /usr/local/bin/dockerd-entrypoint.sh \ 76 | && chown -R model-server /home/model-server 77 | 78 | COPY config.properties /home/model-server/config.properties 79 | RUN mkdir /home/model-server/torchserve_model_store && chown -R model-server /home/model-server/torchserve_model_store 80 | COPY torchserve_model_store/* /home/model-server/torchserve_model_store/ 81 | 82 | EXPOSE 8443 8444 83 | 84 | USER model-server 85 | WORKDIR /home/model-server 86 | ENV TEMP=/home/model-server/tmp 87 | ENTRYPOINT ["/usr/local/bin/dockerd-entrypoint.sh"] 88 | CMD ["serve"] 89 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, National Fire Protection Association (NFPA) 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Data informed Crowd Detection and Management 2 | Analyzing large crowds from video feeds 3 | 4 | 5 | https://user-images.githubusercontent.com/7122670/136591602-720dd6a5-fca5-42e1-a089-8f75f92f178c.mp4 6 | 7 | Front End Created by Fred MacDonald 8 | 9 | ## Motivation 10 | Large crowds can present some of the most complex challenges faced by safety officers, code officials, and facility managers. 11 | Crowd management has been a long-standing life safety concern for both fire-related and non-fire emergencies in an array of venues, including sports facilities, concert halls, clubs, malls, and fairgrounds. 12 | Crowd dynamics can lead to trampling incidents, crowd crushes, and violence, and when these events combine with insufficient means of egress and ineffective crowd management, injuries and deaths can occur, sometimes in staggering numbers. 13 | 14 | But what if crowd managers and authorities having jurisdiction (AHJs) could evaluate crowd dynamics with a real-time, automated crowd monitoring system? 15 | What if they could use detailed, data-informed situational awareness to identify rapid changes in crowd density, movement, and other behaviors and neutralize potentially dangerous situations? 16 | 17 | This data-informed crowd management project aims to solve some of the above concerns. 18 | The goal of the project is to develop a open-source prototype tool that could be developed and integrated into a real-time situational platform designed to automatically monitor crowds in a specified area and compare the estimated crowd size against requirements specified life safety codes, such as NFPA 101. 19 | The vision is not intended to replace safety officials; instead, the tool can be used as part of the event planning process, and during live events to support crowd managers as they make timely, informed decisions. 20 | 21 | ## Methodological Approach 22 | 23 | The evolution of Neural Network (NN) architectures in the past couple of years has shown to be very promising in the field of computer vision use cases. 24 | Specifically, variations of Convolutional Netural Networks(CNNs) have been used as State-of-the-art techniques to do Facial Recognition, Image Classification/Labeling and more recently crowd detection techniques. 25 | [More Details](https://www.mitpressjournals.org/doi/full/10.1162/neco_a_00990). 26 | 27 | Real time crowd counting can be thought of as estimating the number of people in a series of still images or video frames. 28 | We take a two step apporaching this calculation. First, we initially classify the image of the crowd as falling into one or two categorys: a sparse crowd or a dense crowd. We then apply a CNN model to count the number of people, with the specific model used having been fine-tuned to count eithr sparse or dense crowds. 29 | Although the first step in our process is optional, we have found the predicted count is much closer to ground truth when adding in this step. (TODO: Add test results section) 30 | 31 | Various Open Source data repositories with labelled crowd images/videos has been used to train, validate and test this application. See [Datasets](docs/Datasets.md) for more details. 32 | 33 | For the first classfication step, we use the [VGG16 model](https://arxiv.org/pdf/1409.1556.pdf) as the base and use [Transfer Learning](https://en.wikipedia.org/wiki/Transfer_learning) approaches to fine tune the model on large crowd images. This is done by changing the last softmax layer to match the output categories. [More Details](https://machinelearningmastery.com/how-to-use-transfer-learning-when-developing-convolutional-neural-network-models/). For second step of crowd counting, we pretrained models from teh [Congested Scene Recognition (CSRNet)](https://arxiv.org/pdf/1802.10062.pdf) family. CSRNet modle are estimation models that aim to generate high-quality density maps by using [dilated convolutions](http://vladlen.info/papers/dilated-convolutions.pdf). They also uses VGG16 as base layers because of its strong transfer learning ability and flexbility to modify the architecture without adding much complexity to training. 34 | 35 | In addition to CSRNet models originally published in 2018, we also used more recent crowd counting model, the [Supervised Spatial Divide and Conquer (SSDCNet) model](https://arxiv.org/pdf/2001.01886.pdf) (May 2020). Our initial analysis suggests that the SSDCNet models may yield slightly better performance on high density crowd images. 36 | 37 | The prototype application is built using a simple Python backend and ReactJS frontend. We choose these technologies because of model compatibility, fast prototyping and interactive visualizations. For serving the CNN models we use a combination of [Tensorflow Server](https://www.tensorflow.org/tfx/guide/serving) and [TorchServe](https://github.com/pytorch/serve) as Classification/CSRNet models are in Tensorflow format and SSDCNet models are Pytorch format. For more details on modeling see [Training and Serving](docs/Model%20Creation%20and%20Serving.md) models and for backend setup see [Database Details](docs/Database%20and%20Swagger%20Details.md) 38 | 39 | 40 | 41 | ## Getting Started 42 | 43 | ### Option 1: Docker Deployment 44 | 45 | ```python 46 | # Create conda environment for the project 47 | conda create -n crowd_count_env pip python=3.6.9 48 | conda activate crowd_count_env 49 | ``` 50 | 51 | ```python 52 | # Clone the project 53 | git clone https://github.com/NFPA/Crowd_Detection.git 54 | ``` 55 | 56 | ```python 57 | # Install python packages 58 | cd Crowd_Detection 59 | pip install -r requirements.txt 60 | # Start the docker service 61 | bash start_docker_services.sh 62 | ``` 63 | The `start_docker_services.sh` scripts downloads all necessary models from an AWS S3 bucket to build the respective model containers. The script also runs the Flask and React application inside a docker container using `docker-compose`. 64 | 65 | ```python 66 | # To see all running containers 67 | docker container ps 68 | ``` 69 | 70 | Go to `:8080` to see the Application. 71 | 72 | ```python 73 | # Stopping docker containers 74 | bash stop_docker_services.sh 75 | ``` 76 | 77 | For backing up all application data, please use below container locations: 78 | 79 | Heatmaps location - `/usr/src/app/images/heatmaps` 80 | 81 | Snapshots location - `/usr/src/app/images/snapshots` 82 | 83 | Application Database location - `/usr/src/app/service/crowd_counter.db` 84 | 85 | See [Database and Swagger Details](docs/Database%20and%20Swagger%20Details.md) for more info. on `crowd_counter.db` 86 | 87 | ### Option 2: Local Deployment 88 | 89 | #### Clone Project 90 | 91 | ```python 92 | # Clone the project 93 | git clone https://github.com/NFPA/Crowd_Detection.git 94 | cd Crowd_Detection 95 | ``` 96 | 97 | #### Create Environments 98 | 99 | ```python 100 | # Create Torchserve environment 101 | conda create --name torchserve_gpu --file requirements_torchserve_env.txt 102 | # To avoid package dependency errors, the above requirements file has been created using command: 103 | # `conda list --explicit > requirements_torchserve_env.txt` 104 | 105 | # Create conda environment for the project 106 | conda create -n crowd_count_env pip python=3.6.9 107 | conda activate crowd_count_env 108 | ``` 109 | 110 | #### Download models 111 | 112 | ```python 113 | # Download TF Serving models from S3 114 | mkdir -p serving_models 115 | cd serving_models 116 | 117 | # Install AWS CLI from here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html 118 | aws s3 cp s3://crowd-counter-models/Serving . --recursive 119 | cd .. 120 | 121 | # Download Torchserve models from S3 122 | mkdir -p torchserve_model_store 123 | cd torchserve_model_store 124 | aws s3 cp s3://crowd-counter-models/torchserve_models . --recursive 125 | cd .. 126 | ``` 127 | 128 | #### Install and Start services 129 | 130 | ```python 131 | # Make dir's to store images and heatmaps 132 | mkdir -p ./images/heatmaps 133 | mkdir -p ./images/snapshots 134 | 135 | # Install project packages 136 | pip install -r requirements.txt 137 | 138 | # Start all local services 139 | bash start_services_gpu.sh 140 | ``` 141 | 142 | This should create database if it does not exists, start the model Inference servers and Flask server. Refer to log files for any errors in startup. 143 | 144 | Go to `:8080` to see the Application. 145 | 146 | 147 | #### Front-End Review 148 | 149 | Pre-Capture Controls 150 | 151 | 1. To run this application, please submit a URL pointing to a video resource in an open source format (.mp4, .mov). If one is not available we have provided a default video for testing purposes. Note that future work could straightforwardly extend this prototype to capture streaming videos as well. 152 | 153 | 2. Once the URL is entered, click 'Load' and your video url will appear in the 'Environment Details' section on the right. 154 | 155 | 3. After the URL is loaded, select the 'Occupancy Type' in the drop-down menu below the URL. The 'Occupancy Type' will appear in the 'Environment Details' section to the right. In addition to the Occupancy Type, the Occupancy Load values will appear below the Occupancy Type in the 'Environment Details' Section. 156 | 157 | 4. After selecting the 'Occupancy Type', determine the square footage of the given video feed and enter it in the Area section below 'Occupany Type'. Changing the area and occupancy type values will affect the output of the Occupancy Threshold, also in the 'Environment Details' section. Once the area is greater than zero, your video will load and a graph will appear to the right. 158 | 159 | 5. Select Metric or Imperial (English) units of measure from the radio button below Area. 160 | 161 | Video Capture Controls 162 | 163 | 1. Once all of the information above is entered, a user interface for the video player will show up with common controls like Play, Pause and Stop, but also Capture Frame. Capture Frame will capture the current video frame and send it to the back end deep learning models to count the crowd. Once the frame has been processed, a heatmap will appear below the player next to the capture and counts and average count from different models will appear on the graph. 164 | 165 | 2. There is another option to 'Auto Capture' which will send captures to the backend models on a user specified (default: 2 seconds) [1 second, 2 second, 5 second, 10 second, 30 second, 60 second] interval. Auto Capture will stop at the conclusion of the video or it can be stopped by clicking the Stop Capture button. 166 | 167 | ### Results: 168 | 169 | [TODO: Insert heatmaps and Images of various environments/videos] 170 | 171 | 172 | ## Project Specific Members 173 | 174 | **Victoria Hutchison** - Research Project Cordinator, Fire Protection Research Foundation, NFPA 175 | 176 | **Joe Gochal** - Project Director, Data Analytics, NFPA 177 | 178 | **Mohammed Ayub** - Lead Developer, Data Analytics, NFPA 179 | 180 | **Frederick MacDonald III** - FrontEnd Developer, Data Analytics, NFPA 181 | 182 | ## Acknowledgements (Other Panel Members) 183 | National Institute of Standards Technology (NIST) for providing the grant. 184 | 185 | ## References 186 | 187 | 1) Yuhong Li, Xiaofan Zhang, and Deming Chen. [CSRNet: Dilated Convolutional Neural Networks for Understanding the Highly Congested Scenes](https://arxiv.org/pdf/1802.10062.pdf). In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1091–1100, 2018 188 | 189 | 2) Haipeng Xiong, Hao Lu, Chengxin Liu, Liang Liu, Chunhua Shen, Zhiguo Cao. [From Open Set to Closed Set: Supervised Spatial Divide-and-Conquer for Object Counting](https://github.com/xhp-hust-2018-2011/SS-DCNet) 190 | 191 | 3) Xiong, Haipeng and Lu, Hao and Liu, Chengxin and Liang, Liu and Cao, Zhiguo and Shen, Chunhua. [From Open Set to Closed Set: Counting Objects by Spatial Divide-and-Conquer](https://arxiv.org/pdf/2001.01886.pdf) , in Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 8362-8371, 2019 192 | 193 | 3) UCF-QNRF: H. Idrees, M. Tayyab, K. Athrey, D. Zhang, S. Al-Maddeed, N. Rajpoot, M. Shah, [Composition Loss for Counting, Density Map Estimation and Localization in Dense Crowds](https://www.crcv.ucf.edu/papers/eccv2018/2324.pdf), in Proceedings of IEEE European Conference on Computer Vision (ECCV 2018), Munich, Germany, September 8-14, 2018 194 | 195 | 4) UCF-CC-50: Haroon Idrees, Imran Saleemi, Cody Seibert, Mubarak Shah, [Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images](https://www.crcv.ucf.edu/papers/cvpr2013/Counting_V3o.pdf), IEEE International Conference on Computer Vision and Pattern Recognition (CVPR), 2013 196 | 197 | 5) WorldExpo'10: Cong Zhang, Hongsheng Li, Xiaogang Wang, Xiaokang Yang; [Cross-scene Crowd Counting via Deep Convolutional Neural Networks](cv-foundation.org/openaccess/content_cvpr_2015/papers/Zhang_Cross-Scene_Crowd_Counting_2015_CVPR_paper.pdf)Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015, pp. 833-841 198 | 199 | ## License 200 | This project is published under the [BSD-3 license](https://github.com/mohammedayub44/ObjectDetection/blob/main/LICENSE). 201 | -------------------------------------------------------------------------------- /config.properties: -------------------------------------------------------------------------------- 1 | inference_address=http://0.0.0.0:8443 2 | management_address=http://0.0.0.0:8444 3 | # default_workers_per_model=1 4 | # number_of_gpu=0 5 | cors_allowed_origin=* 6 | cors_allowed_methods=GET, POST, PUT, OPTIONS 7 | cors_allowed_headers=X-Custom-Header 8 | model_store=/home/model-server/torchserve_model_store -------------------------------------------------------------------------------- /data/occupantLoadFactor.csv: -------------------------------------------------------------------------------- 1 | Use,ft2_person,m2_person 2 | Concentrated use: without fixed seating,7,0.65 3 | Less concentrated use: without fixed seating,15,1.4 4 | Bench-Type seating,0.666,0.45 5 | Kitchens,100,9.3 6 | Library stack areas,100,9.3 7 | Library reading rooms (net),50,4.6 8 | Swimming pools (water surface),50,4.6 9 | Swimming pool decks,30,2.8 10 | Exercise rooms w/equipment,50,4.6 11 | Exercise rooms w/o equipment,15,1.4 12 | Stages(net),15,1.4 13 | Lighting and access catwalks; galleries; gridirons (net),100,9.3 14 | Casinos and gaming areas,11,1 15 | Skating rinks,50,4.6 16 | Business Use,150,14 17 | Concentrated Business Use,50,4.6 18 | Airport traffic control tower observation levels,40,3.7 19 | Collaboration rooms/spaces ≤450 ft2 (41.8 m2) in area,30,2.8 20 | Collaboration rooms/spaces >450 ft2 (41.8 m2) in area,15,1.4 21 | Day-Care Use (net),35,3.3 22 | Detention and Correctional Use,120,11.1 23 | Educational Use Classrooms(net),20,1.9 24 | Shops/laboratories/vocational rooms(net),50,4.6 25 | Health Care UseInpatient treatmentdepartments,240,22.3 26 | Sleeping departments,120,11.1 27 | Ambulatory health care,150,14 28 | Industrial Use General and high hazard industrial,100,9.3 29 | Sales area on street floor,30,2.8 30 | Sales area on two or more street floors,40,3.7 31 | Sales area on floor below street floor,30,2.8 32 | Sales area on floors above street floor,60,5.6 33 | Floors (portions) used only for storage/receiving/shipping and not open to general public,300,27.9 34 | Hotels and dormitories,200,18.6 35 | Apartment buildings,200,18.6 36 | Board and care: large,200,18.6 37 | storage: mercantile occupancies,300,27.9 38 | storage: other than mercantile occupancies,500,46.5 39 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | backend: 5 | build: 6 | context: ./ 7 | dockerfile: ./service/Dockerfile 8 | network_mode: "host" 9 | expose: 10 | - 8081 11 | ports: 12 | - 8081:8081 13 | environment: 14 | - FLASK_DEBUG=1 15 | frontend: 16 | tty: true 17 | build: 18 | context: ./ 19 | dockerfile: ./ui/Dockerfile 20 | expose: 21 | - 8080 22 | ports: 23 | - 8080:8080 24 | volumes: 25 | - ./ui/public:/usr/src/app/public 26 | - ./ui/src:/usr/src/app/src 27 | links: 28 | - "backend:backend" -------------------------------------------------------------------------------- /dockerd-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [[ "$1" = "serve" ]]; then 5 | shift 1 6 | torchserve --start --ncs --ts-config /home/model-server/config.properties 7 | else 8 | eval "$@" 9 | fi 10 | 11 | # prevent docker exit 12 | tail -f /dev/null 13 | -------------------------------------------------------------------------------- /docs/Database and Swagger Details.md: -------------------------------------------------------------------------------- 1 | ## Technologies Used: 2 | 3 | [Flask-SQLAlchemy](https://flask-sqlalchemy.palletsprojects.com/en/2.x/) - It's a Flask wrapper around SQLAlchemy which is a popular Python SQL toolkit and Object Relational Mapping (ORM) wrappers for developing database operations. 4 | 5 | [Flask-Marshmallow](https://flask-marshmallow.readthedocs.io/en/latest/) - wrapper for Flask apps to do serialization/deserialization to display Python objects in JSON format (also used for Swagger UI). 6 | 7 | [Swagger UI](https://swagger.io/tools/swagger-ui/) - visualize and interact with your DB endpoints seemlessly. Makes backend implemenation easy and client-side consumption easy. 8 | 9 | [Connexion](https://connexion.readthedocs.io/en/latest/index.html) - built on top of Flask to handle connection requests from Swagger UI design to database endpoints. 10 | 11 | 12 | ## Backend Service Details: 13 | 14 | **`config.py`** - Defines the configuration required for the Flask-App. This includes initializations for `SQLAlchemy`, `Marshmallow` and `connexion` objects. 15 | 16 | **`models.py`** - Defines ORM of Table level classes and column level attributes for the components stored in DB. They are Video, Snapshot and OccupancyType. 17 | 18 | **`video.py`** - Describes the video functions that run against `Video` endpoints. Like `create`, `delete`, `update` etc. 19 | 20 | **`snapshot.py`** - Describes the snapshot functions that run against `Snapshot` endpoints. Like `get_weekly_data`, `get_daily_data` etc. 21 | 22 | **`occupancytype.py`** - Function to display all Occupancy types available. 23 | 24 | **`app.py`** - 25 | - It stiches all the above functionalities together into one driver application. 26 | - Its containes functionality for fethcing required video and snapshot information from the client. 27 | - Coordinating with `TensorFlow` and `Torchserve` model servers. 28 | - Creating all the necessary DB tables if not already present and load Occupancy data from static CSV file. 29 | 30 | ## Swagger Details - 31 | 32 | The Swagger UI is driven by the `swagger.yaml` present in the root folder. The connexion config in `config.py` picks up this file using `basedir` during the start. 33 | UI is available at `http://:8081/api/ui/` after the application has started. 34 | 35 | It should look something like this : (feel free to play around) 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /docs/Datasets.md: -------------------------------------------------------------------------------- 1 | # Dataset Descriptions 2 | 3 | A copy of all Datasets used is stored in a pubic S3 repository for easy replication of the results and analysis. 4 | 5 | ## Training 6 | 7 | ### [UCF-QNRF, 2018](https://www.crcv.ucf.edu/data/ucf-qnrf/) 8 | Largest Crowd annotated dataset till date, made available for crowd counting and localization techniques by Center for Research in Computer Vision, University of Central Florida. 9 | 10 | Details: 11 | 12 | | Number of Images| Number of Annotations | Average Count | Maximum Count | Average Resolution | Average Density| 13 | |:---:|:---:|:---:|:---:|:---:|:---:| 14 | |1,535 (Train-1201 Test-334)| 1,251,642 | 815 | 12,865 | 2013 x 2902 | 1.12 x 10-4 | 15 | 16 | *The average density, i.e., the number of people per pixel over all images is also the lowest, signifying high-quality large images. 17 | 18 | - All images are taken from Flickr,Web and Hajj footage (Research Paper Section 4 - Data Collection) and not from a surveleilance camera streams or simulated crowd scenes. Hence, it is very diverse in terms of prepectivity, image resolution, crowd density and the scenarios which a crowd exist. 19 | 20 | - Dataset contains buildings, vegetation, sky and roads as they are present in realistic scenarios captured in the wild. This makes this dataset more realistic as well as difficult. 21 | 22 | - Reduce geographical bias images have been taken from various countries. Refer link for geo-tagged map. 23 | 24 | ### [UCF-CC-50, 2013](https://www.crcv.ucf.edu/data/ucf-cc-50/) 25 | 26 | 50 High Density Crowd images sourced from Flickr for research purposes by Center for Research in Computer Vision, University of Central Florida. 27 | 28 | Details: 29 | 30 | | Number of Images| Number of Annotations | Average Count | Maximum Count | Average Resolution | Average Density| 31 | |:---:|:---:|:---:|:---:|:---:|:---:| 32 | |50| 63,974 | 1,279 | 4633 | 2101 x 2888| 2.02 x 10-4 | 33 | 34 | 35 | 36 | ### [ShanghaiTech Dataset, 2016](https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Single-Image_Crowd_Counting_CVPR_2016_paper.pdf) 37 | 38 | [Part A](Add S3 Link) - Randomly crawled from the web. 39 | 40 | [Part B](Add S3 Link) - Taken from busy streets of metropolitan areas in Shanghai. 41 | 42 | Details: 43 | 44 | | Dataset| Number of Images| Number of Annotations | Average Count | Maximum Count | Average Resolution | Average Density| 45 | |:---:|:---:|:---:|:---:|:---:|:---:|:---:| 46 | |Part A | 482 (1198 Total)| 241,677 (330,000 Total) | 501.4 | 3139 | 589 x 868 | 9.33 x 10-4 | 47 | |Part B | 716 (1198 Total)| 88,488 (330,000 Total) | 123.6 | 578 | 768 × 1024| much larger | 48 | 49 | 50 | ### [WorldExpo'10 Dataset, 2015](https://www.cv-foundation.org/openaccess/content_cvpr_2015/html/Zhang_Cross-Scene_Crowd_Counting_2015_CVPR_paper.html) 51 | 52 | Details: 53 | 54 | | Number of Images| Number of Annotations | Average Count | Maximum Count | Average Resolution | Average Density| 55 | |:---:|:---:|:---:|:---:|:---:|:---:| 56 | |3980| 225,216 | 56 | 334 | 576 x 720| 1.36 x 10-4 | 57 | 58 | - It includes 1132 annotated video sequences captured by 108 surveillance cameras, all from Shanghai 2010 WorldExpo (Data shared for research purposes). 59 | - Data was primarily generated to show effective cross-scene counting by using CNN models and density maps. 60 | 61 | 62 | ## Testing 63 | 64 | [Pedestrian Dynamics Data Archive](https://ped.fz-juelich.de/database/doku.php) 65 | 66 | 67 | -------------------------------------------------------------------------------- /docs/Model Creation and Serving.md: -------------------------------------------------------------------------------- 1 | # Classification 2 | 3 | ## Finetuning Classifier on Crowd Image Datasets 4 | - We extract image features from trained VGG16 Model using simple script shown [here](https://www.pyimagesearch.com/2019/05/27/keras-feature-extraction-on-large-datasets-with-deep-learning/) 5 | 6 | ### 1) Build Dataset for Training Classifier 7 | - Concatenate data from ShanghaiTech and UCF-QNRF2018. Refer [Combining Datasets](https://github.com/NFPA/Crowd_Detection/blob/development/utils/Combine_CrowdDataset.ipynb) notebook for more details. 8 | - We modify the config, and run `build_dataset.py` from above Pyimagesearch Script. This generates `train`,`test`,`validation` splits with `dense` and `sparse` class label folders in each 9 | 10 | ### 2) Extract features for each Image 11 | - Run `extract_features.py` to extract `25,088` dimention (`7*7*512` flattened) feature vector for each image using Pretrained VGG16 Keras model. 12 | - We write all extracted features to csv files namely, `test.csv`, `train.csv` and `validate.csv`. 13 | 14 | ### 3) Train Classifier on extracted Image features 15 | - Modify and run `train.py` to train a simple multi-class logistic regression model on generated features (csv files). 16 | - `SMOTE` library is used to handle class imbalance. 17 | - `GridSearch` is used to train the model with best hyperparameters. 18 | - Outputs a serialized pickle model file `classify_model.pkl`. Refer [Combining Datasets](https://github.com/NFPA/Crowd_Detection/blob/development/utils/Combine_CrowdDataset.ipynb) notebook for classification example. 19 | 20 | Optionally, we could have also used Keras NN model with classification head a shown in [Finetunig VGG16 for Classification](https://github.com/NFPA/Crowd_Detection/blob/development/utils/Finetune-Keras-vgg-mayub.ipynb) ( not used here). 21 | 22 | ## Serving the Classifier 23 | - [JOBLIB](https://joblib.readthedocs.io/en/latest/) library is used to recontruct the model and run inference. 24 | 25 | # Count Prediction 26 | 27 | ## 1) Convoluted Scene Recognition (CSRNet) Model 28 | 29 | ### Downloading: 30 | 31 | [CSRNet](https://github.com/Neerajj9/CSRNet-keras) - Tensorflow Implementation with training data and model. 32 | You can download the weights from this [release](https://github.com/ZhengPeng7/CSRNet-Keras/releases) or from our [S3 Bucket](https://crowd-counter-models.s3.us-east-2.amazonaws.com/CSRNet/CSRNet_models.zip). 33 | 34 | ### Serving: Notebook - [Converting to Tensorflow Serving Format](https://github.com/NFPA/Crowd_Detection/blob/development/utils/Generate_tf_serving_models.ipynb) 35 | - Shows how to convert CSRNet models (Part A and B) to Tensorflow Serving format. 36 | - We need to serve a VGG16 model with no head to extract features as inputs to `classify_model.pkl` 37 | - Shows how to test a sample image with Tensorflow Server. 38 | 39 | Refer [`start_services_gpu.sh`](https://github.com/NFPA/Crowd_Detection/blob/development/start_services_gpu.sh) for steps to start TFServing docker. 40 | 41 | All the Tensorflow Serving models can be downloaded from our [S3 Bucket](https://crowd-counter-models.s3.us-east-2.amazonaws.com/Serving/tf_serving_models.zip). 42 | 43 | ## 2) Supervised Spatial Divide-and-Conquer (SSDCNet) Model 44 | 45 | ### Downloading: 46 | - Create model folder at project level to store all pytorch models. 47 | - Download model and weights from this [repo](https://github.com/xhp-hust-2018-2011/SS-DCNet) or from our [S3 Bucket](https://crowd-counter-models.s3.us-east-2.amazonaws.com/SSDCNet/SSDCNet_models.zip). 48 | 49 | ### Serving: [Torchserve Server](https://github.com/pytorch/serve) 50 | - Torchserve require the model to be archived (into .mar file) using [`torch-model-archiver`](https://github.com/pytorch/serve/blob/master/model-archiver/README.md) utility. 51 | - Archiver utility requires 52 | - Model Name - unique name to connect and run inference 53 | - Model Architecture - Model file describing the architectural detials 54 | - Serialized File - .pt or .pth file containing state_dict 55 | - Handler - file descrbing inference process to be handled (needs separate tuning for batch mode) 56 | - Export Path - Output the archived (`.mar`) file. Refer files from [`utils`](https://github.com/NFPA/Crowd_Detection/tree/development/utils/ssdcnet_sha) folder to generate `.mar` file. 57 | 58 | ```python 59 | torch-model-archiver --model-name ssdcnet_sha_gpu_batch --version 1.0 \ 60 | --model-file ./serve/examples/crowdcount/ssdcnet_sha/crowdmodel.py \ 61 | --serialized-file ./serve/examples/crowdcount/ssdcnet_sha/ssdcnet_sha_best_epoch.pth \ 62 | --export-path torchserve_model_store \ 63 | --handler ./serve/examples/crowdcount/ssdcnet_sha/crdcnt_handler_gpu.py -f 64 | ``` 65 | 66 | Refer [`start_services_gpu.sh`](https://github.com/NFPA/Crowd_Detection/blob/development/start_services_gpu.sh) for steps to start torch inference server. 67 | 68 | All `.mar` serving models can be downloaded from our [S3 Bucket](https://crowd-counter-models.s3.us-east-2.amazonaws.com/Serving/torchserve_model_store.zip) 69 | -------------------------------------------------------------------------------- /image_examples/IMG_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/image_examples/IMG_4.jpg -------------------------------------------------------------------------------- /image_examples/crowd_01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/image_examples/crowd_01.jpg -------------------------------------------------------------------------------- /image_examples/sparse_crowd_01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/image_examples/sparse_crowd_01.jpg -------------------------------------------------------------------------------- /images/Swagger.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/images/Swagger.PNG -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.10.0 2 | astor==0.8.1 3 | attrs==20.1.0 4 | certifi==2020.6.20 5 | chardet==3.0.4 6 | click==7.1.2 7 | clickclick==1.2.2 8 | connexion==2.6.0 9 | cycler==0.10.0 10 | Flask==1.1.2 11 | Flask-Cors==3.0.8 12 | flask-marshmallow==0.11.0 13 | Flask-SQLAlchemy==2.4.1 14 | gast==0.2.2 15 | google-pasta==0.2.0 16 | grpcio==1.31.0 17 | h5py==2.10.0 18 | idna==2.8 19 | importlib-metadata==1.7.0 20 | inflection==0.5.1 21 | itsdangerous==1.1.0 22 | Jinja2==2.11.2 23 | joblib==0.14.1 24 | jsonschema==3.2.0 25 | Keras==2.2.4 26 | Keras-Applications==1.0.8 27 | Keras-Preprocessing==1.1.2 28 | kiwisolver==1.2.0 29 | Markdown==3.2.2 30 | MarkupSafe==1.1.1 31 | marshmallow==3.7.1 32 | marshmallow-sqlalchemy==0.22.3 33 | matplotlib==3.1.1 34 | numpy==1.19.1 35 | openapi-spec-validator==0.2.9 36 | opt-einsum==3.3.0 37 | pandas==0.25.3 38 | Pillow==7.1.2 39 | protobuf==3.13.0 40 | pyparsing==2.4.7 41 | pyrsistent==0.16.0 42 | python-dateutil==2.8.1 43 | pytz==2020.1 44 | PyYAML==5.3.1 45 | requests==2.22.0 46 | scipy==1.5.2 47 | six==1.15.0 48 | scikit-learn==0.22.1 49 | SQLAlchemy==1.3.19 50 | swagger-ui-bundle==0.0.6 51 | tensorboard==1.15.0 52 | tensorflow==1.15.0 53 | tensorflow-estimator==1.15.1 54 | tensorflow-serving-api==1.11.0 55 | termcolor==1.1.0 56 | urllib3==1.25.10 57 | Werkzeug==1.0.1 58 | wrapt==1.12.1 59 | zipp==3.1.0 60 | -------------------------------------------------------------------------------- /requirements_torchserve_env.txt: -------------------------------------------------------------------------------- 1 | # This file may be used to create an environment using: 2 | # $ conda create --name --file 3 | # platform: linux-64 4 | @EXPLICIT 5 | https://repo.anaconda.com/pkgs/main/linux-64/_libgcc_mutex-0.1-main.tar.bz2 6 | https://repo.anaconda.com/pkgs/main/linux-64/blas-1.0-mkl.tar.bz2 7 | https://repo.anaconda.com/pkgs/main/linux-64/ca-certificates-2020.1.1-0.tar.bz2 8 | https://repo.anaconda.com/pkgs/main/linux-64/ld_impl_linux-64-2.33.1-h53a641e_7.tar.bz2 9 | https://repo.anaconda.com/pkgs/main/linux-64/libgfortran-ng-7.3.0-hdf63c60_0.tar.bz2 10 | https://repo.anaconda.com/pkgs/main/linux-64/libstdcxx-ng-9.1.0-hdf63c60_0.tar.bz2 11 | https://repo.anaconda.com/pkgs/main/linux-64/cudatoolkit-10.1.243-h6bb024c_0.tar.bz2 12 | https://repo.anaconda.com/pkgs/main/linux-64/libgcc-ng-9.1.0-hdf63c60_0.tar.bz2 13 | https://repo.anaconda.com/pkgs/main/linux-64/jpeg-9b-h024ee3a_2.tar.bz2 14 | https://repo.anaconda.com/pkgs/main/linux-64/libffi-3.3-he6710b0_1.tar.bz2 15 | https://repo.anaconda.com/pkgs/main/linux-64/ncurses-6.2-he6710b0_1.tar.bz2 16 | https://repo.anaconda.com/pkgs/main/linux-64/openssl-1.1.1g-h7b6447c_0.tar.bz2 17 | https://repo.anaconda.com/pkgs/main/linux-64/xz-5.2.5-h7b6447c_0.tar.bz2 18 | https://repo.anaconda.com/pkgs/main/linux-64/zlib-1.2.11-h7b6447c_3.tar.bz2 19 | https://repo.anaconda.com/pkgs/main/linux-64/libedit-3.1.20181209-hc058e9b_0.tar.bz2 20 | https://repo.anaconda.com/pkgs/main/linux-64/libpng-1.6.37-hbc83047_0.tar.bz2 21 | https://repo.anaconda.com/pkgs/main/linux-64/readline-8.0-h7b6447c_0.tar.bz2 22 | https://repo.anaconda.com/pkgs/main/linux-64/tk-8.6.8-hbc83047_0.tar.bz2 23 | https://repo.anaconda.com/pkgs/main/linux-64/zstd-1.3.7-h0b5b093_0.tar.bz2 24 | https://repo.anaconda.com/pkgs/main/linux-64/freetype-2.9.1-h8a8886c_1.tar.bz2 25 | https://repo.anaconda.com/pkgs/main/linux-64/libtiff-4.1.0-h2733197_0.tar.bz2 26 | https://repo.anaconda.com/pkgs/main/linux-64/sqlite-3.31.1-h62c20be_1.tar.bz2 27 | https://repo.anaconda.com/pkgs/main/linux-64/python-3.8.3-hcff3b4d_0.tar.bz2 28 | https://repo.anaconda.com/pkgs/main/linux-64/certifi-2020.4.5.1-py38_0.tar.bz2 29 | https://repo.anaconda.com/pkgs/main/linux-64/chardet-3.0.4-py38_1003.tar.bz2 30 | https://repo.anaconda.com/pkgs/main/linux-64/future-0.18.2-py38_0.tar.bz2 31 | https://repo.anaconda.com/pkgs/main/noarch/idna-2.9-py_1.tar.bz2 32 | https://repo.anaconda.com/pkgs/main/linux-64/ninja-1.9.0-py38hfd86e86_0.tar.bz2 33 | https://repo.anaconda.com/pkgs/main/noarch/olefile-0.46-py_0.tar.bz2 34 | https://repo.anaconda.com/pkgs/main/linux-64/psutil-5.7.0-py38h7b6447c_0.tar.bz2 35 | https://repo.anaconda.com/pkgs/main/noarch/pycparser-2.20-py_0.tar.bz2 36 | https://repo.anaconda.com/pkgs/main/linux-64/pysocks-1.7.1-py38_0.tar.bz2 37 | https://repo.anaconda.com/pkgs/main/linux-64/six-1.14.0-py38_0.tar.bz2 38 | https://repo.anaconda.com/pkgs/main/noarch/tqdm-4.46.0-py_0.tar.bz2 39 | https://repo.anaconda.com/pkgs/main/linux-64/cffi-1.14.0-py38he30daa8_1.tar.bz2 40 | https://repo.anaconda.com/pkgs/main/linux-64/pillow-7.1.2-py38hb39fc2d_0.tar.bz2 41 | https://repo.anaconda.com/pkgs/main/linux-64/setuptools-46.4.0-py38_0.tar.bz2 42 | https://repo.anaconda.com/pkgs/main/linux-64/cryptography-2.9.2-py38h1ba5d50_0.tar.bz2 43 | https://conda.anaconda.org/pytorch/noarch/torch-model-archiver-0.0.1b20200409-py38_0.tar.bz2 44 | https://conda.anaconda.org/pytorch/noarch/torchserve-0.0.1b20200409-py38_0.tar.bz2 45 | https://repo.anaconda.com/pkgs/main/linux-64/wheel-0.34.2-py38_0.tar.bz2 46 | https://repo.anaconda.com/pkgs/main/linux-64/pip-20.0.2-py38_3.tar.bz2 47 | https://repo.anaconda.com/pkgs/main/linux-64/pyopenssl-19.1.0-py38_0.tar.bz2 48 | https://repo.anaconda.com/pkgs/main/linux-64/urllib3-1.25.8-py38_0.tar.bz2 49 | https://repo.anaconda.com/pkgs/main/linux-64/requests-2.23.0-py38_0.tar.bz2 50 | https://repo.anaconda.com/pkgs/main/linux-64/intel-openmp-2020.1-217.tar.bz2 51 | https://repo.anaconda.com/pkgs/main/linux-64/mkl-2020.1-217.tar.bz2 52 | https://repo.anaconda.com/pkgs/main/linux-64/mkl-service-2.3.0-py38he904b0f_0.tar.bz2 53 | https://repo.anaconda.com/pkgs/main/linux-64/numpy-base-1.18.1-py38hde5b4d6_1.tar.bz2 54 | https://repo.anaconda.com/pkgs/main/linux-64/mkl_fft-1.0.15-py38ha843d7b_0.tar.bz2 55 | https://repo.anaconda.com/pkgs/main/linux-64/mkl_random-1.1.0-py38h962f231_0.tar.bz2 56 | https://repo.anaconda.com/pkgs/main/linux-64/numpy-1.18.1-py38h4f9e942_0.tar.bz2 57 | https://conda.anaconda.org/pytorch/linux-64/pytorch-1.5.0-py3.8_cuda10.1.243_cudnn7.6.3_0.tar.bz2 58 | https://conda.anaconda.org/pytorch/noarch/torchtext-0.6.0-py_1.tar.bz2 59 | https://conda.anaconda.org/pytorch/linux-64/torchvision-0.6.0-py38_cu101.tar.bz2 60 | -------------------------------------------------------------------------------- /service/.dockerignore: -------------------------------------------------------------------------------- 1 | # Files to ignore while building Docker image for the app 2 | __pycache* 3 | crowd_counter.db -------------------------------------------------------------------------------- /service/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.6.9 2 | 3 | # RUN apt-get update && \ 4 | # apt-get install -y software-properties-common && \ 5 | # add-apt-repository ppa:deadsnakes/ppa && \ 6 | # apt-get update -y && \ 7 | # apt-get install -y build-essential python3.6 python3.6-dev python3-pip && \ 8 | # apt-get install -y git && \ 9 | # # update pip 10 | # python3.6 -m pip install pip --upgrade && \ 11 | # python3.6 -m pip install wheel && \ 12 | RUN mkdir -p /usr/src/app && \ 13 | mkdir -p /usr/src/app/images/heatmaps && \ 14 | mkdir -p /usr/src/app/images/snapshots && \ 15 | mkdir -p /usr/src/app/serving_models && \ 16 | mkdir -p /usr/src/app/data 17 | 18 | WORKDIR /usr/src/app 19 | 20 | COPY requirements.txt swagger.yaml /usr/src/app/ 21 | 22 | RUN pip install -r requirements.txt && \ 23 | chmod -R +x /usr/src/app 24 | 25 | COPY ./service/* /usr/src/app/service/ 26 | COPY ./serving_models/classify_model.pkl /usr/src/app/serving_models/classify_model.pkl 27 | COPY ./data/occupantLoadFactor.csv /usr/src/app/data/occupantLoadFactor.csv 28 | 29 | WORKDIR /usr/src/app/service/ 30 | 31 | ENTRYPOINT [ "python" ] 32 | 33 | CMD [ "app.py" ] -------------------------------------------------------------------------------- /service/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/service/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /service/__pycache__/models.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/service/__pycache__/models.cpython-36.pyc -------------------------------------------------------------------------------- /service/__pycache__/occupancytype.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/service/__pycache__/occupancytype.cpython-36.pyc -------------------------------------------------------------------------------- /service/__pycache__/snapshot.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/service/__pycache__/snapshot.cpython-36.pyc -------------------------------------------------------------------------------- /service/__pycache__/video.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/service/__pycache__/video.cpython-36.pyc -------------------------------------------------------------------------------- /service/app.py: -------------------------------------------------------------------------------- 1 | from tensorflow_serving.apis import prediction_service_pb2_grpc 2 | from keras.preprocessing.image import ImageDataGenerator 3 | from keras.models import load_model as keras_load_model 4 | from tensorflow.core.framework import types_pb2 5 | from tensorflow_serving.apis import predict_pb2 6 | from keras.models import model_from_json 7 | from flask import Flask,jsonify,request,Response 8 | import matplotlib.pyplot as plt 9 | from PIL import Image,ImageFile 10 | from datetime import datetime 11 | from keras import backend as K 12 | from tensorflow import keras 13 | from flask_cors import CORS,cross_origin 14 | from flask import session 15 | import tensorflow as tf 16 | import numpy as np 17 | import matplotlib.pyplot as plt 18 | import joblib 19 | import requests 20 | import logging 21 | import random 22 | import string 23 | import grpc 24 | import time 25 | import json 26 | import io 27 | import os 28 | import base64 29 | import config 30 | import pandas as pd 31 | from config import db 32 | from models import Video, Snapshot, Occupancytype 33 | 34 | # Get the application instance 35 | connex_app = config.connex_app 36 | 37 | # Read the swagger.yml file to configure the endpoints 38 | connex_app.add_api('swagger.yaml') 39 | 40 | # (not required) Route specific CORS added for endpoints below 41 | # CORS(connex_app.app) 42 | 43 | logging.getLogger('flask_cors').level = logging.DEBUG 44 | logging.basicConfig(level=logging.DEBUG) 45 | 46 | def prepare_image(img, im_type=None): 47 | if im_type=="classify": 48 | newsize = (224, 224) 49 | img = img.resize(newsize) 50 | #Function to load,normalize and return image 51 | im = np.array(img) 52 | im = im/255.0 53 | im[:,:,0]=(im[:,:,0]-0.485)/0.229 54 | im[:,:,1]=(im[:,:,1]-0.456)/0.224 55 | im[:,:,2]=(im[:,:,2]-0.406)/0.225 56 | im = np.expand_dims(im,axis = 0) 57 | print(str(im.shape)) 58 | return im 59 | 60 | def create_tf_prediction_request(): 61 | channel = grpc.insecure_channel("127.0.0.1:8500") 62 | stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) 63 | request = predict_pb2.PredictRequest() 64 | return stub,request 65 | 66 | 67 | @connex_app.route('/', methods=['GET',"POST","OPTIONS"]) 68 | @cross_origin() 69 | def home(): 70 | return '''

Crowd Detection API

71 |

Endpoint /predict with Image parameter returns count and heatmap.

''' 72 | 73 | @connex_app.route("/predict", methods=["POST","OPTIONS"]) 74 | @cross_origin() 75 | def predict(): 76 | # initialize the data dictionary that will be returned from the 77 | # view 78 | start = time.time() 79 | st, req = create_tf_prediction_request() 80 | data = {"success": False} 81 | # print(type(request.get_json(force=True))) 82 | # print(request.get_json(force=True)) 83 | 84 | # ensure an image was properly uploaded to our endpoint 85 | if request.method == "POST" or request.method == "OPTIONS": 86 | if request.get_json(force=True) is not None: #request.files.get("image"): 87 | # read the image in PIL format 88 | # image = request.files["image"].read() 89 | ImageFile.LOAD_TRUNCATED_IMAGES = True 90 | img_data = request.get_json(force=True)['data'] 91 | v_area = request.get_json(force=True)['metadata']['area'] 92 | v_url = request.get_json(force=True)['metadata']['vidUrl'] 93 | v_units = request.get_json(force=True)['metadata']['units'] 94 | v_occtype = request.get_json(force=True)['metadata']['occType'] 95 | v_duration = request.get_json(force=True)['metadata']['duration'] 96 | v_threshold = request.get_json(force=True)['metadata']['threshold'] 97 | 98 | if len(img_data) % 4: 99 | # not a multiple of 4, add padding: 100 | img_data += '=' * (4 - len(img_data) % 4) 101 | 102 | # image_datare.sub('^data:image/.+;base64,', '', data['img']).decode('base64') 103 | image = Image.open(io.BytesIO(base64.b64decode(img_data))) 104 | # print("Image:"+ str(image)) 105 | print(os.getcwd()) 106 | # Create video name 107 | # v_name = ''.join(random.choices(string.ascii_uppercase +string.digits, k = 5)) 108 | # v_name = v_url 109 | 110 | # Create Dynamic Path 111 | img_path = os.path.join(connex_app.app.config['SNAPSHOT_FOLDER'], 112 | ''.join(random.choices(string.ascii_uppercase +string.digits, k = 10))) 113 | 114 | print(img_path) 115 | 116 | hmap_path = os.path.join(connex_app.app.config['HEATMAP_FOLDER'], 117 | ''.join(random.choices(string.ascii_uppercase +string.digits, k = 10))) 118 | 119 | print(hmap_path) 120 | classify_model_path = os.path.join(connex_app.app.config['CLASSIFY_MODEL_FILE']) 121 | 122 | # Save the clicked image 123 | image.save(img_path+'.png', "PNG") 124 | 125 | # session["snapshot_data"] = img_data 126 | # preprocess the image and prepare it for classification 127 | c_image = prepare_image(image,im_type="classify") 128 | 129 | # Set session for Classify 130 | # K.set_session(c_session) 131 | # req.model_spec.name = "classify" 132 | # req.inputs["input_image"].CopyFrom(tf.make_tensor_proto(c_image, dtype=types_pb2.DT_FLOAT)) 133 | req.model_spec.name = "vgg16_nohead" 134 | req.inputs["input_1"].CopyFrom(tf.make_tensor_proto(c_image, dtype=types_pb2.DT_FLOAT)) 135 | print("extracting") 136 | response = st.Predict(req, timeout=60.0) 137 | feat = tf.make_ndarray(response.outputs['block5_pool']) 138 | features = feat.reshape((feat.shape[0], 7 * 7 * 512)) 139 | # predictions = tf.make_ndarray(response.outputs['dense_11/Softmax:0']) 140 | 141 | # Classify the Image 142 | # predictions = classify_model.predict(c_image) 143 | # predicted_classes = np.argmax(predictions,axis=1) 144 | print("loading") 145 | clf = joblib.load(classify_model_path) 146 | print("classify") 147 | class_label = clf.predict(features) 148 | 149 | # print("Predcited Class (0-dense, 1-sparse): Class "+str(predicted_classes[0])) 150 | print("Predcited Class (0-dense, 1-sparse): Class "+str(class_label)) 151 | 152 | temp_img_bytes = io.BytesIO() 153 | image.save(temp_img_bytes, format='JPEG') 154 | ssdcnet_img_data = temp_img_bytes.getvalue() 155 | 156 | image = prepare_image(image) 157 | 158 | print("adding video to db") 159 | # video = Video(url="fav_mixed_tape.avi") 160 | # Hardcoding random load id for now. TODO: Change this. 161 | # load_id = random.randint(1, 37) 162 | occ_load = Occupancytype.query.filter_by(use=v_occtype).one_or_none() 163 | print("Occupant Type found: "+ str(occ_load.use)+ " "+ str(occ_load.id) ) 164 | # occ_load = Occupancytype.query.filter_by(id=load_id).one_or_none() 165 | 166 | # video = Video(url="MNTFJ.avi",vid_load_ref=occ_load) 167 | video = Video.query.filter_by(url=v_url,area=v_area,vid_load_ref=occ_load).one_or_none() 168 | print(type(video)) 169 | print("old:" + str(video)) 170 | # Workaround to check for uniqueness in Video table 171 | if video: 172 | video.last_updated = datetime.utcnow() 173 | else: 174 | video = Video(url=v_url, area=v_area, units=v_units, threshold=v_threshold, duration=v_duration,vid_load_ref=occ_load) 175 | print("current:" + str(video)) 176 | db.session.add(video) 177 | db.session.commit() 178 | # insert_command = Video.__table__.insert( 179 | # prefixes=['OR IGNORE'], 180 | # values=dict(url='fav_mixed_tape.avi') 181 | # ) 182 | # db.session.execute(insert_command) 183 | # st2, req2 = create_tf_prediction_request() 184 | 185 | # Important to del the input dict from the previous request if you need to reuse 186 | # the same request multiple times. Otherwise it appends another inputs field which gives 187 | # input shape mismatch error. 188 | del req.inputs['input_1'] 189 | 190 | if class_label == '1': 191 | # classify the input image and then initialize the list 192 | # of predictions to return to the client 193 | req.model_spec.name = "sparse_crowd" 194 | req.inputs["input_image"].CopyFrom(tf.make_tensor_proto(image, dtype=types_pb2.DT_FLOAT)) 195 | p_stime = time.time() 196 | # K.set_session(session) 197 | # p_hmap = loaded_model.predict(image) 198 | response = st.Predict(req, timeout=60.0) 199 | p_etime = time.time() 200 | 201 | ssdcnet_sparse_url = 'http://0.0.0.0:8443/predictions/ssdcnet_shb_gpu_2' 202 | 203 | # Run SSDCNet model 204 | p_stime_ts = time.time() 205 | ssdcnet_res = requests.post(ssdcnet_sparse_url,data=ssdcnet_img_data) 206 | p_etime_ts = time.time() 207 | ssdcnet_count = ssdcnet_res.text 208 | ssd = int(float(ssdcnet_count)) 209 | 210 | # ssdcnet_count = '50' # Fix batching in Torchserve 211 | 212 | p_hmap = tf.make_ndarray(response.outputs['y_out/Relu:0']) 213 | 214 | data["predict_time_ms"] = str(round((p_etime - p_stime)*1000)) 215 | data["predict_time_ts_ms"] = str(round((p_etime_ts - p_stime_ts)*1000)) 216 | count = int(np.sum(p_hmap)) 217 | 218 | average = (ssd + count) / 2 219 | 220 | 221 | print(p_hmap.shape) 222 | 223 | p_hmap = p_hmap.reshape(p_hmap.shape[1],p_hmap.shape[2]) 224 | print(p_hmap.shape) 225 | fig = plt.figure(frameon=False) 226 | # fig.set_size_inches(p_hmap.shape[0],p_hmap.shape[1]) 227 | ax = plt.Axes(fig, [0., 0., 1., 1.]) 228 | ax.set_axis_off() 229 | fig.add_axes(ax) 230 | ax.imshow(p_hmap, aspect='auto') 231 | ib = io.BytesIO() 232 | fig.savefig(ib,bbox_inches='tight', pad_inches=0) 233 | fig.savefig(hmap_path+'.png', format='png', bbox_inches='tight', pad_inches=0) 234 | ib.seek(0) 235 | new_image_string = base64.b64encode(ib.getvalue()).decode("utf-8") 236 | print(str(len(new_image_string))) 237 | 238 | snapshot = Snapshot(snap=img_path+".png", 239 | heatmap=hmap_path+".png", 240 | pred_class = "sparse", 241 | pred_count = count, 242 | pred_count_ssdcnet=ssdcnet_count, 243 | video_id_ref=video) 244 | db.session.add(snapshot) 245 | 246 | r = {"class": "Sparse","count": str(int(round(count))), "ssdcnet_count": str(ssdcnet_count), "predicted_heatmap": str(new_image_string), "average": str(average) } 247 | data["predictions"] = r 248 | 249 | print("Predicted Count: " + str(round(count))) 250 | print("Predicted SSDCNet Count: " + str(ssdcnet_count)) 251 | 252 | else: 253 | 254 | ssdcnet_dense_url = 'http://0.0.0.0:8443/predictions/ssdcnet_sha_gpu_2' 255 | req.model_spec.name = "dense_crowd" 256 | req.inputs["input_image"].CopyFrom(tf.make_tensor_proto(image, dtype=types_pb2.DT_FLOAT)) 257 | 258 | p_stime = time.time() 259 | response = st.Predict(req, timeout=60.0) 260 | p_etime = time.time() 261 | 262 | # Run SSDCNet model 263 | p_stime_ts = time.time() 264 | ssdcnet_res = requests.post(ssdcnet_dense_url,data=ssdcnet_img_data) 265 | p_etime_ts = time.time() 266 | ssdcnet_count = ssdcnet_res.text 267 | ssd = int(float(ssdcnet_count)) 268 | # ssdcnet_count = '50' # Fix Torchserve batching 269 | 270 | p_hmap = tf.make_ndarray(response.outputs['y_out/Relu:0']) 271 | data["predict_time_ms"] = str(round((p_etime - p_stime)*1000)) 272 | data["predict_time_ts_ms"] = str(round((p_etime_ts - p_stime_ts)*1000)) 273 | count = int(np.sum(p_hmap)) 274 | 275 | average = (ssd + count) / 2 276 | 277 | 278 | print(p_hmap.shape) 279 | p_hmap = p_hmap.reshape(p_hmap.shape[1],p_hmap.shape[2]) 280 | print(p_hmap.shape) 281 | fig = plt.figure(frameon=False) 282 | # fig.set_size_inches(p_hmap.shape[0],p_hmap.shape[1]) 283 | ax = plt.Axes(fig, [0., 0., 1., 1.]) 284 | ax.set_axis_off() 285 | fig.add_axes(ax) 286 | ax.imshow(p_hmap, aspect='auto') 287 | ib = io.BytesIO() 288 | fig.savefig(ib,bbox_inches='tight', pad_inches=0) 289 | fig.savefig(hmap_path+'.png', format='png', bbox_inches='tight', pad_inches=0) 290 | ib.seek(0) 291 | new_image_string = base64.b64encode(ib.getvalue()).decode("utf-8") 292 | print(str(len(new_image_string))) 293 | 294 | snapshot = Snapshot(snap=img_path+".png", 295 | heatmap=hmap_path+".png", 296 | pred_class = "dense", 297 | pred_count = int(count), 298 | pred_count_ssdcnet=ssdcnet_count, 299 | video_id_ref=video) 300 | db.session.add(snapshot) 301 | 302 | r = {"class": "Dense","count": str(round(count)), "ssdcnet_count": str(ssdcnet_count), "predicted_heatmap": str(new_image_string) , "average": str(average)} 303 | data["predictions"] = r 304 | 305 | print("Predicted Count: " + str(round(count))) 306 | print("Predicted SSDCNet Count: " + str(ssdcnet_count)) 307 | 308 | db.session.commit() 309 | 310 | data["success"] = True 311 | end = time.time() 312 | data["total_time_ms"] = str(round((end - start)*1000)) 313 | resp = Response(json.dumps(data)) 314 | 315 | return resp 316 | 317 | 318 | # Create all the necessary DB tables 319 | config.db.create_all() 320 | 321 | # Get Row count in Occupancy Table 322 | tot_rows = db.session.query(Occupancytype).count() 323 | 324 | # Insert rows from csv into Occupancy table 325 | if tot_rows < 1: 326 | file_name = '../data/occupantLoadFactor.csv' 327 | df = pd.read_csv(file_name) 328 | for _, row in df.iterrows(): 329 | load = Occupancytype(use=row["Use"], load_ft2=row["ft2_person"], load_m2=row["m2_person"]) 330 | db.session.add(load) 331 | db.session.commit() 332 | 333 | if __name__ == "__main__": 334 | connex_app.run(port=8081) -------------------------------------------------------------------------------- /service/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import connexion 3 | from flask_sqlalchemy import SQLAlchemy 4 | from flask_marshmallow import Marshmallow 5 | 6 | 7 | # Get Parent Directory 8 | basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) 9 | 10 | print("Basedir: " + basedir) 11 | 12 | # Create the connexion instance, it creates a Flask App in the background 13 | connex_app = connexion.FlaskApp(__name__, specification_dir=basedir) 14 | 15 | # Get underlying flask app 16 | app = connex_app.app 17 | 18 | # Set SQLAlchemy Specific Configs 19 | app.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245' 20 | app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///crowd_counter.db' 21 | app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = 'False' 22 | app.config['SNAPSHOT_FOLDER'] = '../images/snapshots' 23 | app.config['HEATMAP_FOLDER'] = '../images/heatmaps' 24 | app.config['CLASSIFY_MODEL_FILE'] = '../serving_models/classify_model.pkl' 25 | 26 | # Create a DB instance 27 | db= SQLAlchemy(app,session_options={"autoflush": False}) 28 | 29 | # Initialize Marshmallow 30 | ma = Marshmallow(app) 31 | -------------------------------------------------------------------------------- /service/crowd_counter.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/service/crowd_counter.db -------------------------------------------------------------------------------- /service/models.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from config import db, ma 3 | 4 | class Video(db.Model): 5 | id = db.Column(db.Integer, primary_key=True) 6 | url = db.Column(db.String(250), nullable=False) 7 | units = db.Column(db.String(20), nullable=True) 8 | area = db.Column(db.Numeric(10,4), nullable=False, default=0.0) 9 | duration = db.Column(db.Numeric(10,4), nullable=False, default=0.0) 10 | threshold = db.Column(db.Numeric(10,4), nullable=False, default=0.0) 11 | date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) 12 | load_id = db.Column(db.Integer, db.ForeignKey('occupancytype.id'), nullable=False) 13 | snapshot = db.relationship('Snapshot', backref='video_id_ref', lazy=True, cascade="all,delete-orphan",) 14 | last_updated = db.Column(db.DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow) 15 | __table_args__ = (db.UniqueConstraint('url', 'area', 'load_id', name='video_load_uc'),) 16 | 17 | def __repr__(self): 18 | return f"Video('{self.id}','{self.url}', '{self.area}', '{self.duration}', '{self.units}',\ 19 | '{self.threshold}', '{self.vid_load_ref}', '{self.date_created}', '{self.last_updated}' )" 20 | 21 | 22 | class Snapshot(db.Model): 23 | snap = db.Column(db.Text, nullable=False) 24 | id = db.Column(db.Integer, primary_key=True) 25 | heatmap = db.Column(db.Text, nullable=False) 26 | pred_count = db.Column(db.Float, nullable=False) 27 | pred_class = db.Column(db.String(10), nullable=False) 28 | pred_count_ssdcnet = db.Column(db.Float, nullable=False) 29 | date_clicked = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) 30 | video_id = db.Column(db.Integer, db.ForeignKey('video.id', ondelete='CASCADE'), nullable=False) 31 | 32 | def __repr__(self): 33 | return f"Snapshot('{self.id}', '{self.snap}', '{self.heatmap}', '{self.pred_class}', \ 34 | '{self.pred_count}', '{self.pred_count_ssdcnet}', '{self.date_clicked}')" 35 | 36 | 37 | class Occupancytype(db.Model): 38 | use = db.Column(db.Text, nullable=False) 39 | id = db.Column(db.Integer, primary_key=True) 40 | load_m2 = db.Column(db.Float, nullable=False) 41 | load_ft2 = db.Column(db.Float, nullable=False) 42 | vid = db.relationship('Video', backref='vid_load_ref', lazy=True,cascade="save-update,merge",) 43 | 44 | def __repr__(self): 45 | return f"Occupancytype('{self.id}', '{self.use}', '{self.load_ft2}', '{self.load_m2}')" 46 | 47 | 48 | class VideoSchema(ma.ModelSchema): 49 | class Meta: 50 | model = Video 51 | sqla_session = db.session 52 | 53 | 54 | class SnapshotSchema(ma.ModelSchema): 55 | class Meta: 56 | model = Snapshot 57 | sqla_session = db.session 58 | 59 | 60 | class OccupancytypeSchema(ma.ModelSchema): 61 | class Meta: 62 | model = Occupancytype 63 | sqla_session = db.session 64 | -------------------------------------------------------------------------------- /service/occupancytype.py: -------------------------------------------------------------------------------- 1 | from flask import make_response, abort 2 | from config import db 3 | import os 4 | import datetime 5 | from models import Occupancytype, OccupancytypeSchema 6 | 7 | 8 | def read_all(): 9 | """ 10 | This function responds to a request for /api/occupancytype/all 11 | with the complete lists of occupancy load into data from database 12 | :return: json string of list of occupancy load info 13 | 14 | """ 15 | 16 | # Create the list of snapshots from our data 17 | all_rows = Occupancytype.query.order_by(Occupancytype.id).all() 18 | 19 | # Serialize the date for the response 20 | occload_schema = OccupancytypeSchema(many=True) 21 | 22 | data = occload_schema.dump(all_rows) 23 | return data -------------------------------------------------------------------------------- /service/snapshot.py: -------------------------------------------------------------------------------- 1 | from flask import make_response, abort 2 | from config import db 3 | import os 4 | import datetime 5 | from models import Snapshot, SnapshotSchema 6 | 7 | 8 | def read_all(): 9 | """ 10 | This function responds to a request for /api/snapshot 11 | with the complete lists of snapshots in database 12 | :return: json string of list of snapshots 13 | 14 | """ 15 | 16 | # Create the list of snapshots from our data 17 | all_snapshots = Snapshot.query.order_by(Snapshot.id).all() 18 | 19 | # Serialize the date for the response 20 | snapshot_schema = SnapshotSchema(many=True) 21 | 22 | data = snapshot_schema.dump(all_snapshots) 23 | return data 24 | 25 | 26 | def read_all_from_video(video_id): 27 | """ 28 | This function responds to a request for /api/snapshot/{video_id} 29 | with all matching snapshots for a video 30 | 31 | :param video_id: Id of snapshot to find 32 | :return: matched Snapshot array of objects 33 | 34 | """ 35 | 36 | # Read snapshot from our data 37 | snapshot = Snapshot.query.filter(Snapshot.video_id == video_id).all() 38 | 39 | # if found 40 | if snapshot is not None: 41 | 42 | # Serialize the date for the response 43 | snapshot_schema = SnapshotSchema(many=True) 44 | data = snapshot_schema.dump(snapshot) 45 | return data 46 | 47 | else: 48 | # Snapshot not found 49 | abort( 50 | 404, 51 | "Snapshots not found for Video ID : {video_id}".format(video_id=video_id), 52 | ) 53 | 54 | def read_one_from_video(video_id,snapshot_id): 55 | """ 56 | This function responds to a request for /api/snapshot/{video_id}/{snapshot_id} 57 | with one matching snapshot for a video 58 | 59 | :param video_id: Id of video to find 60 | :param snapshot_id: Id of snapshot to find 61 | :return: matched snapshot 62 | 63 | """ 64 | 65 | # Read snapshot from our data 66 | snapshot = Snapshot.query.filter(Snapshot.video_id == video_id,Snapshot.id == snapshot_id).one_or_none() 67 | 68 | # if found 69 | if snapshot is not None: 70 | 71 | # Serialize the date for the response 72 | snapshot_schema = SnapshotSchema() 73 | data = snapshot_schema.dump(snapshot) 74 | return data 75 | 76 | else: 77 | # Snapshot not found 78 | abort( 79 | 404, 80 | "Snapshot not found for video {video_id} and snapshot: {snapshot_id}".format(video_id=video_id,snapshot_id=snapshot_id), 81 | ) 82 | 83 | 84 | # def create(snapshot): 85 | # """ 86 | # This function creates a new snapshot in the database with 87 | # provide snapshot data. 88 | 89 | # :param snapshot: snapshot to create in snapshot structure 90 | # :return: 201 on success, 406 on snapshot exists 91 | 92 | # """ 93 | # vname = snapshot.get("video_name") 94 | 95 | # existing_video = ( 96 | # Snapshot.query.filter(Snapshot.video_name == vname) 97 | # .one_or_none() 98 | # ) 99 | 100 | # # Check to insert snapshot 101 | # if existing_video is None: 102 | 103 | # # Create a snapshot instance using the schema 104 | # schema = VideoSchema() 105 | # new_video = schema.load(snapshot, session=db.session) 106 | 107 | # # Add the snapshot to the database 108 | # db.session.add(new_video) 109 | # db.session.commit() 110 | 111 | # # Serialize and return the newly created snapshot in the response 112 | # data = schema.dump(new_video) 113 | 114 | # return data, 201 115 | 116 | # # Otherwise, snapshot exists already 117 | # else: 118 | # abort( 119 | # 409, 120 | # "Snapshot with {vname} exists already".format( 121 | # vname=vname 122 | # ), 123 | # ) 124 | 125 | 126 | def delete(video_id,snapshot_id): 127 | """ 128 | This function deletes a snapshot for a given video_id 129 | :param video_id: Id of the video 130 | :param snapshot_id: Id of the snapshot to delete 131 | :return: 200 on successful delete, 404 if not found 132 | 133 | """ 134 | # Get the snapshot requested 135 | snapshot = Snapshot.query.filter(Snapshot.video_id == video_id, Snapshot.id == snapshot_id).one_or_none() 136 | 137 | # Check if found 138 | if snapshot is not None: 139 | os.remove(os.path.join(snapshot.heatmap)) 140 | os.remove(os.path.join(snapshot.snap)) 141 | db.session.delete(snapshot) 142 | db.session.commit() 143 | return make_response( 144 | "Snapshot with video: {video_id} and snapshot: {snapshot_id} deleted".format(video_id=video_id, snapshot_id = snapshot_id), 200 145 | ) 146 | 147 | # Otherwise, nope, didn't find that snapshot 148 | else: 149 | abort( 150 | 404, 151 | "Snapshot not found for video: {video_id} and snapshot {snapshot_id} ".format(video_id=video_id, snapshot_id = snapshot_id), 152 | ) 153 | 154 | 155 | def update(video_id, snapshot_id, snapshot): 156 | """ 157 | This function updates a particular snapshot for a given video_id 158 | :param video_id: Id of the video 159 | :param snapshot_id: Id of the snapshot to update 160 | :param snapshot Snapshot instance to update 161 | :return: 200 on successful update, 404 if not found and 409 is already exists 162 | 163 | """ 164 | # Get the video_id to update 165 | update_vid = Snapshot.query.filter(Snapshot.video_id == video_id, Snapshot.id == snapshot_id).one_or_none() 166 | 167 | # If Id does not exist 168 | if update_vid is None: 169 | abort( 170 | 404, 171 | "Snapshot not found for video : {video_id} and snapshot : {snapshot_id}".format(video_id=video_id, snapshot_id = snapshot_id), 172 | ) 173 | 174 | # Check for duplicate creation of another snapshot already existing 175 | elif ( 176 | update_vid is not None and (update_vid.snap == snapshot.get("snap") or update_vid.heatmap == snapshot.get("heatmap")) and update_vid.date_clicked == snapshot.get("date_clicked") 177 | ): 178 | abort( 179 | 409, 180 | "Snapshot with same image or heatmap location {snap} / {heatmap} exists already".format( 181 | snap=update_vid.snap, heatmap=update_vid.heatmap 182 | ), 183 | ) 184 | 185 | # Now update! 186 | else: 187 | 188 | # turn the passed in snapshot into a db object 189 | schema = SnapshotSchema() 190 | update = schema.load(snapshot, session=db.session) 191 | 192 | # Set the id to the snapshot we want to update 193 | update.id = snapshot_id 194 | 195 | # merge the new object into the old and commit it to the db 196 | db.session.merge(update) 197 | db.session.commit() 198 | 199 | # return updated snapshot in the response 200 | data = schema.dump(update_vid) 201 | 202 | return data, 200 203 | 204 | def get_last_week(video_id): 205 | """ 206 | This function retuns all snapshots of a video for last weeks time interval 207 | :param video_id: Id of the video 208 | :return: 200 on successful delete, 404 if not found 209 | 210 | """ 211 | current_time = datetime.datetime.utcnow() 212 | week_ago = current_time - datetime.timedelta(weeks=1) 213 | 214 | snapshot_schema = SnapshotSchema(many=True) 215 | snaps_within_last_week = Snapshot.query.filter(Snapshot.video_id == video_id, Snapshot.date_clicked > week_ago).all() 216 | data = snapshot_schema.dump(snaps_within_last_week) 217 | 218 | return data, 200 219 | 220 | 221 | def get_last_month(video_id): 222 | """ 223 | This function retuns all snapshots of a video for last month time interval 224 | :param video_id: Id of the video 225 | :return: 200 on successful delete, 404 if not found 226 | 227 | """ 228 | current_time = datetime.datetime.utcnow() 229 | month_ago = current_time - datetime.timedelta(weeks=4) 230 | 231 | snapshot_schema = SnapshotSchema(many=True) 232 | snaps_within_last_month = Snapshot.query.filter(Snapshot.video_id == video_id, Snapshot.date_clicked > month_ago).all() 233 | data = snapshot_schema.dump(snaps_within_last_month) 234 | 235 | return data, 200 236 | 237 | def get_last_day(video_id): 238 | """ 239 | This function retuns all snapshots of a video for one day time interval 240 | :param video_id: Id of the video 241 | :return: 200 on successful delete, 404 if not found 242 | 243 | """ 244 | current_time = datetime.datetime.utcnow() 245 | day_ago = current_time - datetime.timedelta(days=1) 246 | 247 | snapshot_schema = SnapshotSchema(many=True) 248 | snaps_within_last_day = Snapshot.query.filter(Snapshot.video_id == video_id, Snapshot.date_clicked > day_ago).all() 249 | data = snapshot_schema.dump(snaps_within_last_day) 250 | 251 | return data, 200 -------------------------------------------------------------------------------- /service/video.py: -------------------------------------------------------------------------------- 1 | import os 2 | from flask import make_response, abort 3 | from config import db 4 | from models import Video, VideoSchema, Occupancytype, Snapshot, SnapshotSchema 5 | from datetime import datetime 6 | 7 | def read_all(): 8 | """ 9 | This function responds to a request for /api/video 10 | with the complete lists of videos in database 11 | :return: json string of list of videos 12 | 13 | """ 14 | 15 | # Create the list of videos from our data 16 | all_videos = Video.query.order_by(Video.url).all() 17 | 18 | # Serialize the date for the response 19 | video_schema = VideoSchema(many=True) 20 | 21 | data = video_schema.dump(all_videos) 22 | return data 23 | 24 | 25 | def read_one(video_id): 26 | """ 27 | This function responds to a request for /api/video/{video_id} 28 | with one mathcing video from videos 29 | 30 | :param video_id: Id of video to find 31 | :return: matched video 32 | 33 | """ 34 | 35 | # Read video from our data 36 | video = Video.query.filter(Video.id == video_id).one_or_none() 37 | 38 | # if found 39 | if video is not None: 40 | 41 | # Serialize the date for the response 42 | video_schema = VideoSchema() 43 | data = video_schema.dump(video) 44 | return data 45 | 46 | else: 47 | # Video not found 48 | abort( 49 | 404, 50 | "Video not found for Id: {video_id}".format(video_id=video_id), 51 | ) 52 | 53 | 54 | def create(video): 55 | """ 56 | This function creates a new video in the database with 57 | provide video data. 58 | 59 | :param video: video to create in video structure 60 | :return: 201 on success, 406 on video exists 61 | 62 | """ 63 | vname = video.get("url") 64 | 65 | existing_video = ( 66 | Video.query.filter(Video.url == vname) 67 | .one_or_none() 68 | ) 69 | 70 | # Check to insert video 71 | if existing_video is None: 72 | 73 | # Create a video instance using the schema 74 | schema = VideoSchema() 75 | new_video = schema.load(video, session=db.session) 76 | 77 | # Add the video to the database 78 | db.session.add(new_video) 79 | db.session.commit() 80 | 81 | # Serialize and return the newly created video in the response 82 | data = schema.dump(new_video) 83 | 84 | return data, 201 85 | 86 | # Otherwise, video exists already 87 | else: 88 | existing_video.last_updated = datetime.utcnow() 89 | # db.session.merge(update) 90 | db.session.commit() 91 | abort( 92 | 409, 93 | "Video with {vname} exists already".format( 94 | vname=vname 95 | ), 96 | ) 97 | 98 | 99 | def delete(video_id): 100 | """ 101 | This function deletes a video using the video_id 102 | :param video_id: Id of the video to delete 103 | :return: 200 on successful delete, 404 if not found 104 | 105 | """ 106 | # Get the video requested 107 | video = Video.query.filter(Video.id == video_id).one_or_none() 108 | 109 | # Check if found 110 | if video is not None: 111 | # Get all Snapshots related to the video, delete static files stored for a video 112 | all_snapshots = Snapshot.query.filter(Snapshot.video_id == video_id).all() 113 | snapshot_schema = SnapshotSchema(many=True) 114 | all_snaps = snapshot_schema.dump(all_snapshots) 115 | print(str(all_snaps)) 116 | for pic in all_snaps: 117 | os.remove(os.path.join(pic['heatmap'])) 118 | os.remove(os.path.join(pic['snap'])) 119 | db.session.delete(video) 120 | db.session.commit() 121 | return make_response( 122 | "Video with ID: {video_id} deleted".format(video_id=video_id), 200 123 | ) 124 | 125 | # Otherwise, nope, didn't find that video 126 | else: 127 | abort( 128 | 404, 129 | "Video not found for Id: {video_id}".format(video_id=video_id), 130 | ) 131 | 132 | 133 | def update(video_id, video): 134 | """ 135 | This function deletes a video using the video_id 136 | :param video_id: Id of the video to delete 137 | :return: 200 on successful delete, 404 if not found 138 | 139 | """ 140 | # Get the video_id to update 141 | update_vid = Video.query.filter(Video.id == video_id).one_or_none() 142 | print("Update: "+ str(update_vid)) 143 | 144 | # Get details to check uniqueness 145 | vname = video.get("url") 146 | load_id = video.get("vid_load_ref") 147 | occ_ref = Occupantload.query.filter(Occupantload.id == load_id).one_or_none() 148 | 149 | existing_video = ( 150 | Video.query.filter(Video.url == vname, Video.vid_load_ref == occ_ref).one_or_none() 151 | ) 152 | 153 | # If Id does not exist 154 | if update_vid is None: 155 | abort( 156 | 404, 157 | "Video not found for Id: {video_id}".format(video_id=video_id), 158 | ) 159 | 160 | # # Check for duplicate creation of another video already existing (not required) 161 | # elif ( 162 | # existing_video is not None 163 | # and existing_video.id != video_id 164 | # and existing_video.video_name == vname 165 | # and existing_video.vid_load_ref == load_id 166 | # ): 167 | # abort( 168 | # 409, 169 | # "Video with {vname} and {loadid} exists already".format( 170 | # vname=vname,loadid=load_id 171 | # ), 172 | # ) 173 | 174 | # Now update! 175 | else: 176 | 177 | # turn the passed in video into a db object 178 | schema = VideoSchema() 179 | update = schema.load(video, session=db.session) 180 | print("Update: "+ str(update)) 181 | 182 | # Set the id to the video we want to update 183 | update.id = update_vid.id 184 | 185 | # merge the new object into the old and commit it to the db 186 | db.session.merge(update) 187 | db.session.commit() 188 | 189 | # return updated video in the response 190 | data = schema.dump(update_vid) 191 | 192 | return data, 200 193 | -------------------------------------------------------------------------------- /start_docker_services.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TF_CONTAINER_NAME='crowd_counter_tf_gpu' 4 | TORCH_CONTAINER_NAME='torch_models' 5 | # Download TF Serving models from S3 6 | mkdir -p serving_models 7 | cd serving_models 8 | 9 | # Install AWS CLI from here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html 10 | aws s3 cp s3://crowd-counter-models/Serving . --recursive 11 | cd .. 12 | 13 | # Download Torchserve models from S3 14 | mkdir -p torchserve_model_store 15 | cd torchserve_model_store 16 | aws s3 cp s3://crowd-counter-models/torchserve_models . --recursive 17 | cd .. 18 | 19 | sleep 5 20 | 21 | # Build and/or Start Tensorflow Serving Docker Service 22 | echo "Checking existing containers..." 23 | if [ ! "$(docker ps -q -f name=$TF_CONTAINER_NAME)" ]; then 24 | if [ "$(docker ps -aq -f status=exited -f name=$TF_CONTAINER_NAME)" ]; then 25 | # start existing container 26 | echo "Starting existing container $TF_CONTAINER_NAME ..." 27 | docker container start $TF_CONTAINER_NAME 28 | # Clean-up and start new container 29 | else 30 | echo "Clearing old versions of $TF_CONTAINER_NAME ..." 31 | docker container stop $TF_CONTAINER_NAME 32 | docker container rm $TF_CONTAINER_NAME 33 | echo "Starting new container with name $TF_CONTAINER_NAME ..." 34 | nvidia-docker run -d --name crowd_counter_tf_gpu -p 8500:8500 --mount type=bind,source=/home/ubuntu/mayub/Github/Crowd_Detection/serving_models/,target=/models/serving_models -t tensorflow/serving:1.15.0-gpu --per_process_gpu_memory_fraction=0.40 --model_config_file=/models/serving_models/models.config 35 | fi 36 | fi 37 | 38 | # Build torchserve docker service 39 | if [ ! "$(docker ps -q -f name=$TORCH_CONTAINER_NAME)" ]; then 40 | if [ "$(docker ps -aq -f status=exited -f name=$TORCH_CONTAINER_NAME)" ]; then 41 | # start existing container 42 | echo "Starting existing container $TORCH_CONTAINER_NAME ..." 43 | docker container start $TORCH_CONTAINER_NAME 44 | # Clean-up and start new container 45 | else 46 | echo "Building torchserve service image... " 47 | DOCKER_BUILDKIT=1 docker build --file Dockerfile_torchserve --build-arg BASE_IMAGE=nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 -t torchserve:latest . 48 | # Run container 49 | echo "Starting container with name $TORCH_CONTAINER_NAME ... " 50 | docker run --rm -it -d --name torch_models --gpus all -p 8443:8443 -p 8444:8444 torchserve:latest 51 | sleep 5 52 | fi 53 | fi 54 | 55 | # Register the SHA Dense model on Torchserve 56 | echo "Registering Dense model on Torchserve port 8444 ..." 57 | curl -k -X POST "http://0.0.0.0:8444/models?model_name=ssdcnet_sha_gpu_2&url=ssdcnet_sha_gpu_batch.mar&batch_size=2&max_batch_delay=100&initial_workers=1&synchronous=true" 58 | 59 | # Register the SHB Sparse model on Torchserve 60 | echo "Registering Sparse model on Torchserve port 8444 ..." 61 | curl -k -X POST "http://0.0.0.0:8444/models?model_name=ssdcnet_shb_gpu_2&url=ssdcnet_shb_gpu_batch.mar&batch_size=2&max_batch_delay=100&initial_workers=1&synchronous=true" 62 | 63 | echo "Torchserve Status - " 64 | curl "http://0.0.0.0:8443/ping" 65 | echo "Torchserve Models - " 66 | curl "http://0.0.0.0:8444/models" 67 | sleep 2 68 | 69 | # back and front end app 70 | echo "Starting Flask Server on Port 8081 and React UI on Port 8080..." 71 | docker-compose up -d 72 | -------------------------------------------------------------------------------- /start_services_gpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CONTAINER_NAME='crowd_counter_tf_gpu' 4 | TF_MODEL_LOG_FILE='logs/tf_docker.txt' 5 | PYTORCH_MODEL_LOG_FILE='logs/torchserve_docker.txt' 6 | FLASK_SERVER_LOG_FILE='logs/flask_server.txt' 7 | REACT_SERVER_LOG_FILE='logs/react_server.txt' 8 | 9 | # Kill services if already running 10 | # UI port 11 | sudo kill -9 $(sudo lsof -t -i:8080) 12 | # Flaskserver port 13 | sudo kill -9 $(sudo lsof -t -i:8081) 14 | 15 | 16 | # Starting TF-Docker container 17 | if [ ! "$(docker ps -q -f name=$CONTAINER_NAME)" ]; then 18 | if [ "$(docker ps -aq -f status=exited -f name=$CONTAINER_NAME)" ]; then 19 | # start existing container 20 | echo "Starting existing container $CONTAINER_NAME ..." 21 | docker container start $CONTAINER_NAME 22 | # Clean-up and start new container 23 | else 24 | echo "Clearing old versions of $CONTAINER_NAME ..." 25 | docker container stop $CONTAINER_NAME 26 | docker container rm $CONTAINER_NAME 27 | echo "Starting new container with name $CONTAINER_NAME. Registering logs in file $TF_MODEL_LOG_FILE ..." 28 | nvidia-docker run -d --name crowd_counter_tf_gpu -p 8500:8500 --mount type=bind,source=./serving_models/,target=/models/serving_models -t tensorflow/serving:1.15.0-gpu --per_process_gpu_memory_fraction=0.40 --model_config_file=/models/serving_models/models.config > $TF_MODEL_LOG_FILE 29 | fi 30 | fi 31 | 32 | source activate torchserve_gpu 33 | 34 | # Start the Torchserve empty server 35 | echo "Starting Pytorch model server and storing log in $PYTORCH_MODEL_LOG_FILE ..." 36 | 37 | # export PYTHONPATH=${PYTHONPATH}:/home/ubuntu/mayub/Github/SSDCNet (Not required as Zip file is included in .mar ) 38 | torchserve --start --ncs --model-store torchserve_model_store > $PYTORCH_MODEL_LOG_FILE 39 | sleep 5 40 | 41 | # Register the SHA Dense model on Torchserve 42 | echo "Registering Dense model on Torchserve port 8444 ..." 43 | curl -k -X POST "http://0.0.0.0:8444/models?model_name=ssdcnet_sha_gpu_2&url=ssdcnet_sha_gpu_batch.mar&batch_size=2&max_batch_delay=100&initial_workers=1&synchronous=true" 44 | 45 | # Register the SHB Sparse model on Torchserve 46 | echo "Registering Sparse model on Torchserve port 8444 ..." 47 | curl -k -X POST "http://0.0.0.0:8444/models?model_name=ssdcnet_shb_gpu_2&url=ssdcnet_shb_gpu_batch.mar&batch_size=2&max_batch_delay=100&initial_workers=1&synchronous=true" 48 | 49 | # Change envs to tensorflow 50 | echo "Changing conda environments..." 51 | source deactivate 52 | source activate crowd_count_env 53 | 54 | # Change to Flask Server Dir 55 | cd ./service 56 | 57 | # Start Flask Server 58 | echo "Starting Flask Server on Port 8081... Check log file $FLASK_SERVER_LOG_FILE for more details." 59 | python app.py > ../$FLASK_SERVER_LOG_FILE 2>&1 & 60 | # BACK_PID=$! 61 | # wait $BACK_PID 62 | cd .. 63 | # Start UI 64 | 65 | cd ./ui 66 | echo "Starting React UI on Port 8080... Check log file $REACT_SERVER_LOG_FILE for more details. " 67 | npm start > ../$REACT_SERVER_LOG_FILE 2>&1 & 68 | -------------------------------------------------------------------------------- /stop_docker_services.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FRONT_END='crowd_detection_frontend_1' 4 | BACK_END='crowd_detection_backend_1' 5 | TORCH_MODELS='torch_models' 6 | TF_MODELS='crowd_counter_tf_gpu' 7 | 8 | echo "Stopping flask docker named $FRONT_END ..." 9 | docker container stop $FRONT_END 10 | 11 | echo "Stopping React docker named $BACK_END ..." 12 | docker container stop $BACK_END 13 | 14 | echo "Stopping TF Serving docker named $TF_MODELS ..." 15 | docker container stop $TF_MODELS 16 | 17 | echo "Stopping Torchserve docker named $TORCH_MODELS ..." 18 | docker container stop $TORCH_MODELS -------------------------------------------------------------------------------- /stop_services_gpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CONTAINER_NAME='crowd_counter_tf_gpu' 4 | 5 | # Stop the Docker Models (TensorFlow and Torchserve) 6 | echo "Stopping TF Container $CONTAINER_NAME ..." 7 | docker container stop $CONTAINER_NAME 8 | echo "Stopping Torchserve ..." 9 | source activate torchserve_gpu 10 | torchserve --stop 11 | source deactivate 12 | 13 | # Stop/kill the services (Flask - 8081 and React - 8080) 14 | echo "Stopping React and Flask running on PORT 8080 and 8081 ..." 15 | sudo kill -9 $(sudo lsof -t -i:8080) 16 | sudo kill -9 $(sudo lsof -t -i:8081) 17 | 18 | -------------------------------------------------------------------------------- /swagger.yaml: -------------------------------------------------------------------------------- 1 | swagger: "2.0" 2 | info: 3 | description: This is the swagger file Crowd Counter that goes with Server Code 4 | version: "1.0.0" 5 | title: Swagger Rest Article 6 | consumes: 7 | - application/json 8 | produces: 9 | - application/json 10 | 11 | basePath: /api 12 | 13 | # Paths supported by the server application 14 | paths: 15 | /video: 16 | get: 17 | operationId: video.read_all 18 | tags: 19 | - Video 20 | summary: Read all videos present sorted by Name 21 | description: Read all videos present sorted by Name 22 | responses: 23 | 200: 24 | description: Successfully read video set operation 25 | schema: 26 | type: array 27 | items: 28 | properties: 29 | id: 30 | type: string 31 | description: Id of the video 32 | url: 33 | type: string 34 | description: URL of the video 35 | duration: 36 | type: string 37 | description: Total Duration of the video 38 | area: 39 | type: string 40 | description: In-scene area of the Video 41 | units: 42 | type: string 43 | description: metric for in-scene video area 44 | threshold: 45 | type: string 46 | description: NFPA Calculated max value for provided occupancy and video area 47 | date_created: 48 | type: string 49 | description: Creation timestamp of the Video 50 | last_updated: 51 | type: string 52 | description: Last Updated timestamp of the Video 53 | vid_load_ref: 54 | type: integer 55 | description: Occupancy type id linked to this video 56 | 57 | post: 58 | operationId: video.create 59 | tags: 60 | - Video 61 | summary: Create a video 62 | description: Create a new video 63 | parameters: 64 | - name: video 65 | in: body 66 | description: Video to create 67 | required: True 68 | schema: 69 | type: object 70 | required: 71 | - url 72 | - vid_load_ref 73 | properties: 74 | url: 75 | type: string 76 | description: URL of the video to create 77 | duration: 78 | type: string 79 | description: Total Duration of the video 80 | area: 81 | type: string 82 | description: In-scene area of the video 83 | units: 84 | type: string 85 | description: metric for in-scene video area 86 | threshold: 87 | type: string 88 | description: NFPA Calculated max value for provided occupancy and video area 89 | date_created: 90 | type: string 91 | description: Creation Timestamp of the video 92 | last_updated: 93 | type: string 94 | description: Last Updated timestamp of the Video 95 | vid_load_ref: 96 | type: integer 97 | description: Occupancy type id linked to this video 98 | responses: 99 | 201: 100 | description: Successfully created video 101 | schema: 102 | properties: 103 | id: 104 | type: string 105 | description: Id of the video 106 | url: 107 | type: string 108 | description: URL of the video 109 | duration: 110 | type: string 111 | description: Total Duration of the video 112 | area: 113 | type: string 114 | description: In-scene area of the video 115 | units: 116 | type: string 117 | description: metric for in-scene video area 118 | threshold: 119 | type: string 120 | description: NFPA Calculated max value for provided occupancy and video area 121 | date_created: 122 | type: string 123 | description: Creation Timestamp of the video 124 | last_updated: 125 | type: string 126 | description: Last Updated timestamp of the Video 127 | vid_load_ref: 128 | type: integer 129 | description: Occupancy type id linked to this video 130 | 131 | /video/{video_id}: 132 | get: 133 | operationId: video.read_one 134 | tags: 135 | - Video 136 | summary: Read details of one video 137 | description: Read details of one video 138 | parameters: 139 | - name: video_id 140 | in: path 141 | description: Id of the video to get fetch 142 | type: integer 143 | required: True 144 | responses: 145 | 200: 146 | description: Successfully read video from data 147 | schema: 148 | type: object 149 | properties: 150 | id: 151 | type: string 152 | description: Id of the video 153 | url: 154 | type: string 155 | description: URL of the video 156 | duration: 157 | type: string 158 | description: Total Duration of the video 159 | area: 160 | type: string 161 | description: In-scene area of the video 162 | units: 163 | type: string 164 | description: metric for in-scene video area 165 | threshold: 166 | type: string 167 | description: NFPA Calculated max value for provided occupancy and video area 168 | date_created: 169 | type: string 170 | description: Creation Timestamp of the video 171 | last_updated: 172 | type: string 173 | description: Last Updated timestamp of the Video 174 | vid_load_ref: 175 | type: integer 176 | description: Occupancy type id linked to this video 177 | put: 178 | operationId: video.update 179 | tags: 180 | - Video 181 | summary: Update a video 182 | description: Update a video 183 | parameters: 184 | - name: video_id 185 | in: path 186 | description: Id the video to update 187 | type: integer 188 | required: True 189 | - name: video 190 | in: body 191 | schema: 192 | type: object 193 | required: 194 | - id 195 | - url 196 | - vid_load_ref 197 | properties: 198 | id: 199 | type: integer 200 | description: ID of the video 201 | url: 202 | type: string 203 | description: URL of the video 204 | duration: 205 | type: string 206 | description: Total Duration of the video 207 | area: 208 | type: string 209 | description: In-scene area of the video 210 | units: 211 | type: string 212 | description: metric for in-scene video area 213 | threshold: 214 | type: string 215 | description: NFPA Calculated max value for provided occupancy and video area 216 | date_created: 217 | type: string 218 | description: Creation Timestamp of the video 219 | last_updated: 220 | type: string 221 | description: Last Updated timestamp of the Video 222 | vid_load_ref: 223 | type: integer 224 | description: Occupancy type id linked to this video 225 | responses: 226 | 200: 227 | description: Successfully updated video 228 | schema: 229 | properties: 230 | id: 231 | type: string 232 | description: Id of the video 233 | url: 234 | type: string 235 | description: URL of the video 236 | duration: 237 | type: string 238 | description: Total Duration of the video 239 | area: 240 | type: string 241 | description: In-scene area of the video 242 | units: 243 | type: string 244 | description: metric for in-scene video area 245 | threshold: 246 | type: string 247 | description: NFPA Calculated max value for provided occupancy and video area 248 | date_created: 249 | type: string 250 | description: Creation Timestamp of the video 251 | last_updated: 252 | type: string 253 | description: Last Updated timestamp of the Video 254 | vid_load_ref: 255 | type: integer 256 | description: Occupancy type id linked to this video 257 | 258 | delete: 259 | operationId: video.delete 260 | tags: 261 | - Video 262 | summary: Delete a video from the list 263 | description: Delete a video 264 | parameters: 265 | - name: video_id 266 | in: path 267 | type: integer 268 | description: Id of the video to delete 269 | required: true 270 | responses: 271 | 200: 272 | description: Successfully deleted a video 273 | 274 | # Snapshot related Endpoints 275 | /snapshot: 276 | get: 277 | operationId: snapshot.read_all 278 | tags: 279 | - Snapshot 280 | summary: Read all snapshots sorted by ID 281 | description: Read all snapshots sorted by ID 282 | responses: 283 | 200: 284 | description: Successfully read snapshot operation 285 | schema: 286 | type: array 287 | items: 288 | properties: 289 | id: 290 | type: string 291 | description: Id of the snapshot 292 | snap: 293 | type: string 294 | description: Static File Location of the snapshot in jpeg format 295 | heatmap: 296 | type: string 297 | description: Static File Location of the snapshot in jpeg format 298 | pred_class: 299 | type: string 300 | description: Predicted Class from the Classification model 301 | pred_count: 302 | type: string 303 | description: Predicted Count from the Crowd Detection model 304 | pred_count_ssdcnet: 305 | type: string 306 | description: Predicted Count from SSDCNet model 307 | date_clicked: 308 | type: string 309 | description: Timestamp of the snapshot clicked (Datetime format) 310 | video_id_ref: 311 | type: string 312 | description: Video ID liked to this snapshot 313 | 314 | # Reads all snapshots for a particular video 315 | /snapshot/{video_id}: 316 | get: 317 | operationId: snapshot.read_all_from_video 318 | tags: 319 | - Snapshot 320 | summary: Read all snapshots for a given video 321 | description: Read all snapshots for a given video 322 | parameters: 323 | - name: video_id 324 | in: path 325 | description: Id of the video to get snapshots 326 | type: integer 327 | required: True 328 | responses: 329 | 200: 330 | description: Successfully read snapshots for given ID. 331 | schema: 332 | type: array 333 | items: 334 | properties: 335 | id: 336 | type: string 337 | description: Id of the snapshot 338 | snap: 339 | type: string 340 | description: Static File Location of the snapshot in jpeg format 341 | heatmap: 342 | type: string 343 | description: Static File Location of the snapshot in jpeg format 344 | pred_class: 345 | type: string 346 | description: Predicted Class from the Classification model 347 | pred_count: 348 | type: string 349 | description: Predicted Count from the Crowd Detection model 350 | pred_count_ssdcnet: 351 | type: string 352 | description: Predicted Count from SSDCNet model 353 | date_clicked: 354 | type: string 355 | description: Timestamp of the snapshot clicked (Datetime format) 356 | video_id_ref: 357 | type: string 358 | description: Video ID liked to this snapshot 359 | 360 | # Reads or modifies one snapshot for a given video 361 | /snapshot/{video_id}/{snapshot_id}: 362 | get: 363 | operationId: snapshot.read_one_from_video 364 | tags: 365 | - Snapshot 366 | summary: Read one snapshot for a given video 367 | description: Read one snapshot for a given video 368 | parameters: 369 | - name: video_id 370 | in: path 371 | description: Id of the video 372 | type: integer 373 | required: True 374 | - name: snapshot_id 375 | in: path 376 | description: Id of the snapshot 377 | type: integer 378 | required: True 379 | responses: 380 | 200: 381 | description: Successfully read snapshot for given ID. 382 | schema: 383 | type: object 384 | properties: 385 | id: 386 | type: string 387 | description: Id of the snapshot 388 | snap: 389 | type: string 390 | description: Static File Location of the snapshot in jpeg format 391 | heatmap: 392 | type: string 393 | description: Static File Location of the snapshot in jpeg format 394 | pred_class: 395 | type: string 396 | description: Predicted Class from the Classification model 397 | pred_count: 398 | type: string 399 | description: Predicted Count from the Crowd Detection model 400 | pred_count_ssdcnet: 401 | type: string 402 | description: Predicted Count from SSDCNet model 403 | date_clicked: 404 | type: string 405 | description: Timestamp of the snapshot clicked (Datetime format) 406 | video_id_ref: 407 | type: string 408 | description: Video ID liked to this snapshot 409 | put: 410 | operationId: snapshot.update 411 | tags: 412 | - Snapshot 413 | summary: Update a Snapshot instance 414 | description: Update a Snapshot instance 415 | parameters: 416 | - name: video_id 417 | in: path 418 | description: Id the video in reference 419 | type: integer 420 | required: True 421 | - name: snapshot_id 422 | in: path 423 | description: Id the snapshot to update 424 | type: integer 425 | required: True 426 | - name: snapshot 427 | in: body 428 | schema: 429 | type: object 430 | required: 431 | - id 432 | properties: 433 | id: 434 | type: string 435 | description: Id of the snapshot 436 | snap: 437 | type: string 438 | description: Static File Location of the snapshot in jpeg format 439 | heatmap: 440 | type: string 441 | description: Static File Location of the snapshot in jpeg format 442 | pred_class: 443 | type: string 444 | description: Predicted Class from the Classification model 445 | pred_count: 446 | type: string 447 | description: Predicted Count from the Crowd Detection model 448 | pred_count_ssdcnet: 449 | type: string 450 | description: Predicted Count from SSDCNet model 451 | date_clicked: 452 | type: string 453 | description: Timestamp of the snapshot clicked (Datetime format) 454 | responses: 455 | 200: 456 | description: Successfully updated video 457 | schema: 458 | properties: 459 | id: 460 | type: string 461 | description: Id of the snapshot 462 | snap: 463 | type: string 464 | description: Static File Location of the snapshot in jpeg format 465 | heatmap: 466 | type: string 467 | description: Static File Location of the snapshot in jpeg format 468 | pred_class: 469 | type: string 470 | description: Predicted Class from the Classification model 471 | pred_count: 472 | type: string 473 | description: Predicted Count from the Crowd Detection model 474 | pred_count_ssdcnet: 475 | type: string 476 | description: Predicted Count from SSDCNet model 477 | date_clicked: 478 | type: string 479 | description: Timestamp of the snapshot clicked (Datetime format) 480 | 481 | delete: 482 | operationId: snapshot.delete 483 | tags: 484 | - Snapshot 485 | summary: Delete a snapshot from the list 486 | description: Delete a snapshot from the list 487 | parameters: 488 | - name: video_id 489 | in: path 490 | type: integer 491 | description: Id of the video in reference 492 | required: true 493 | - name: snapshot_id 494 | in: path 495 | type: integer 496 | description: Id of the snapshot to delete 497 | required: true 498 | responses: 499 | 200: 500 | description: Successfully deleted a video 501 | 502 | # Get last week snapshots for a video 503 | /snapshot/get_last_week/{video_id}: 504 | get: 505 | operationId: snapshot.get_last_week 506 | tags: 507 | - Snapshot 508 | summary: Read all snapshots for one week interval 509 | description: Read all snapshots for one week interval 510 | parameters: 511 | - name: video_id 512 | in: path 513 | description: Id of the video to get snapshots 514 | type: integer 515 | required: True 516 | responses: 517 | 200: 518 | description: Successfully read snapshots for given ID. 519 | schema: 520 | type: array 521 | items: 522 | properties: 523 | id: 524 | type: string 525 | description: Id of the snapshot 526 | snap: 527 | type: string 528 | description: Static File Location of the snapshot in jpeg format 529 | heatmap: 530 | type: string 531 | description: Static File Location of the snapshot in jpeg format 532 | pred_class: 533 | type: string 534 | description: Predicted Class from the Classification model 535 | pred_count: 536 | type: string 537 | description: Predicted Count from the Crowd Detection model 538 | pred_count_ssdcnet: 539 | type: string 540 | description: Predicted Count from SSDCNet model 541 | date_clicked: 542 | type: string 543 | description: Timestamp of the snapshot clicked (Datetime format) 544 | video_id_ref: 545 | type: string 546 | description: Video ID liked to this snapshot 547 | 548 | # Get last month snapshots for a video 549 | /snapshot/get_last_month/{video_id}: 550 | get: 551 | operationId: snapshot.get_last_month 552 | tags: 553 | - Snapshot 554 | summary: Read all snapshots for one month interval 555 | description: Read all snapshots for one month interval 556 | parameters: 557 | - name: video_id 558 | in: path 559 | description: Id of the video to get snapshots 560 | type: integer 561 | required: True 562 | responses: 563 | 200: 564 | description: Successfully read snapshots for given ID. 565 | schema: 566 | type: array 567 | items: 568 | properties: 569 | id: 570 | type: string 571 | description: Id of the snapshot 572 | snap: 573 | type: string 574 | description: Static File Location of the snapshot in jpeg format 575 | heatmap: 576 | type: string 577 | description: Static File Location of the snapshot in jpeg format 578 | pred_class: 579 | type: string 580 | description: Predicted Class from the Classification model 581 | pred_count: 582 | type: string 583 | description: Predicted Count from the Crowd Detection model 584 | pred_count_ssdcnet: 585 | type: string 586 | description: Predicted Count from SSDCNet model 587 | date_clicked: 588 | type: string 589 | description: Timestamp of the snapshot clicked (Datetime format) 590 | video_id_ref: 591 | type: string 592 | description: Video ID liked to this snapshot 593 | 594 | # Get last day snapshots for a video 595 | /snapshot/get_last_day/{video_id}: 596 | get: 597 | operationId: snapshot.get_last_day 598 | tags: 599 | - Snapshot 600 | summary: Read all snapshots for one day interval 601 | description: Read all snapshots for one day interval 602 | parameters: 603 | - name: video_id 604 | in: path 605 | description: Id of the video to get snapshots 606 | type: integer 607 | required: True 608 | responses: 609 | 200: 610 | description: Successfully read snapshots for given ID. 611 | schema: 612 | type: array 613 | items: 614 | properties: 615 | id: 616 | type: string 617 | description: Id of the snapshot 618 | snap: 619 | type: string 620 | description: Static File Location of the snapshot in jpeg format 621 | heatmap: 622 | type: string 623 | description: Static File Location of the snapshot in jpeg format 624 | pred_class: 625 | type: string 626 | description: Predicted Class from the Classification model 627 | pred_count: 628 | type: string 629 | description: Predicted Count from the Crowd Detection model 630 | pred_count_ssdcnet: 631 | type: string 632 | description: Predicted Count from SSDCNet model 633 | date_clicked: 634 | type: string 635 | description: Timestamp of the snapshot clicked (Datetime format) 636 | video_id_ref: 637 | type: string 638 | description: Video ID liked to this snapshot 639 | 640 | # Get all data for Occupancytype 641 | /occupancytype/all: 642 | get: 643 | operationId: occupancytype.read_all 644 | tags: 645 | - Occupancytype 646 | summary: Returns all data for occupancy loads 647 | description: Returns all data for occupancy loads 648 | responses: 649 | 200: 650 | description: Successfully returned occupancy info. 651 | schema: 652 | type: array 653 | items: 654 | properties: 655 | id: 656 | type: string 657 | description: Id of the Occupancy 658 | use: 659 | type: string 660 | description: Occupancy type 661 | load_ft2: 662 | type: string 663 | description: Load measure in square feet 664 | load_m2: 665 | type: string 666 | description: Load measure in square meter rounded to one decimal point 667 | -------------------------------------------------------------------------------- /ui/.dockerignore: -------------------------------------------------------------------------------- 1 | # Files to ignore while building Docker image for the app 2 | node_modules/* 3 | -------------------------------------------------------------------------------- /ui/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # production 12 | /build 13 | 14 | # misc 15 | .DS_Store 16 | .env.local 17 | .env.development.local 18 | .env.test.local 19 | .env.production.local 20 | 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | -------------------------------------------------------------------------------- /ui/Dockerfile: -------------------------------------------------------------------------------- 1 | # pull official base image 2 | FROM node:latest 3 | 4 | RUN mkdir -p /usr/src/app 5 | 6 | # set working directory 7 | WORKDIR /usr/src/app 8 | 9 | # install app dependencies 10 | COPY ./ui/package.json /usr/src/app/ 11 | 12 | RUN npm install 13 | 14 | # start app 15 | CMD ["npm", "start"] -------------------------------------------------------------------------------- /ui/README.md: -------------------------------------------------------------------------------- 1 | # Crowd_Detection 2 | Detecting large crowds with Videos and Camera feeds 3 | 4 | 5 | ## Running 6 | 7 | Start Flask Server: 8 | 9 | `python service\app.py ` 10 | 11 | Post image Request (separate terminal): 12 | 13 | `curl -X POST -F image=@"service/IMG_4.jpg" 'http://127.0.0.1:8080/predict'` 14 | 15 | Data Returned: 16 | ```javascript 17 | { predict_time_ms":"760","predictions":{"count":"154.0","heatmap":"[[[[0.0928445 ]\n [0.17732866]\n [0.17231198]\n ...\n [0.02326113]\n [0.04403671]\n [0.04160059]]\n\n [[0.06391969]\n [0.1233318 ]\n [0.1385351 ]\n ...\n [0.02002291]\n [0.05446133]\n [0.04844731]]\n\n [[0.07501812]\n [0.15517338]\n [0.17023663]\n ...\n [0.08882342]\n [0.05094147]\n [0.05723583]]\n\n ...\n\n [[0.00743099]\n [0.00695727]\n [0.01426582]\n ...\n [0.00754658]\n [0.00372858]\n [0.00793823]]\n\n [[0.01017613]\n [0.00693572]\n [0.01583 ]\n ...\n [0.00631961]\n [0.01035042]\n [0.01273389]]\n\n [[0.00763646]\n [0.00454946]\n [0.0060876 ]\n ...\n [0.00599788]\n [0.01181053]\n [0.01865886]]]]"},"success":true,"total_time_ms":"775"} 18 | -------------------------------------------------------------------------------- /ui/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "crowd-counter", 3 | "version": "0.1.0", 4 | "private": true, 5 | "dependencies": { 6 | "@material-ui/core": "^4.9.5", 7 | "@testing-library/jest-dom": "^4.2.4", 8 | "@testing-library/react": "^9.4.0", 9 | "@testing-library/user-event": "^7.2.1", 10 | "add": "^2.0.6", 11 | "axios": "^0.19.2", 12 | "base64-img": "^1.0.4", 13 | "bootstrap": "^4.4.1", 14 | "capture-video-frame": "^0.1.3", 15 | "chart.js": "^2.9.3", 16 | "d3": "^5.15.0", 17 | "d3-scale": "^3.2.1", 18 | "image-thumbnail": "^1.0.7", 19 | "node-fetch": "^2.6.0", 20 | "or": "^0.2.0", 21 | "react": "^16.13.1", 22 | "react-bootstrap": "^1.0.0-beta.16", 23 | "react-bootstrap-sidebar": "0.0.1", 24 | "react-chartjs-2": "^2.9.0", 25 | "react-dom": "^16.13.1", 26 | "react-image-gallery": "^1.0.5", 27 | "react-image-lightbox": "^5.1.1", 28 | "react-images": "^1.1.0", 29 | "react-native": "^0.62.2", 30 | "react-native-elements": "^1.2.7", 31 | "react-native-fs": "^2.16.2", 32 | "react-native-image-gallery": "^2.1.5", 33 | "react-photo-gallery": "^8.0.0", 34 | "react-player": "^1.14.2", 35 | "react-responsive-carousel": "^3.1.51", 36 | "react-scripts": "^3.4.0", 37 | "reactstrap": "^8.4.1", 38 | "save-file": "^2.3.1", 39 | "screenfull": "^5.0.1", 40 | "yarn": "^1.22.1" 41 | }, 42 | "scripts": { 43 | "start": "PORT=8080 react-scripts start", 44 | "build": "react-scripts build", 45 | "test": "react-scripts test", 46 | "eject": "react-scripts eject" 47 | }, 48 | "eslintConfig": { 49 | "extends": "react-app" 50 | }, 51 | "browserslist": { 52 | "production": [ 53 | ">0.2%", 54 | "not dead", 55 | "not op_mini all" 56 | ], 57 | "development": [ 58 | "last 1 chrome version", 59 | "last 1 firefox version", 60 | "last 1 safari version" 61 | ] 62 | }, 63 | "babel": { 64 | "presets": [ 65 | "@babel/preset-env", 66 | "@babel/preset-react" 67 | ], 68 | "plugins": [ 69 | "@babel/plugin-proposal-class-properties" 70 | ] 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /ui/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/ui/public/favicon.ico -------------------------------------------------------------------------------- /ui/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 12 | 13 | 14 | 15 | 19 | 20 | 29 | React App 30 | 31 | 32 | 33 |
34 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /ui/public/logo192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/ui/public/logo192.png -------------------------------------------------------------------------------- /ui/public/logo512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/ui/public/logo512.png -------------------------------------------------------------------------------- /ui/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "React App", 3 | "name": "Create React App Sample", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | }, 10 | { 11 | "src": "logo192.png", 12 | "type": "image/png", 13 | "sizes": "192x192" 14 | }, 15 | { 16 | "src": "logo512.png", 17 | "type": "image/png", 18 | "sizes": "512x512" 19 | } 20 | ], 21 | "start_url": ".", 22 | "display": "standalone", 23 | "theme_color": "#000000", 24 | "background_color": "#ffffff" 25 | } 26 | -------------------------------------------------------------------------------- /ui/public/robots.txt: -------------------------------------------------------------------------------- 1 | # https://www.robotstxt.org/robotstxt.html 2 | User-agent: * 3 | -------------------------------------------------------------------------------- /ui/src/App.css: -------------------------------------------------------------------------------- 1 | .App { 2 | height: 100%; 3 | width: 100%; 4 | margin: 0; 5 | } 6 | 7 | .quadrant Paper { 8 | background-color: lightgray; 9 | } 10 | 11 | .content { 12 | margin: auto; 13 | margin-top: 20px; 14 | width: 75%; 15 | height: auto; 16 | 17 | text-align: center; 18 | border-color: black 19 | } 20 | 21 | .Button { 22 | float: right; 23 | vertical-align: top; 24 | } 25 | 26 | input[type=range] { 27 | -webkit-appearance: none; 28 | margin: 18px 0; 29 | width: 100%; 30 | } 31 | input[type=range]:focus { 32 | outline: none; 33 | } 34 | input[type=range]::-webkit-slider-runnable-track { 35 | width: 100%; 36 | height: 8.4px; 37 | cursor: pointer; 38 | animate: 0.2s; 39 | box-shadow: 1px 1px 1px #000000, 0px 0px 1px #0d0d0d; 40 | background: #3377FF; 41 | border-radius: 1.3px; 42 | border: 0.2px solid #010101; 43 | } 44 | input[type=range]::-webkit-slider-thumb { 45 | box-shadow: 1px 1px 1px #000000, 0px 0px 1px #0d0d0d; 46 | border: 1px solid #000000; 47 | height: 36px; 48 | width: 16px; 49 | border-radius: 3px; 50 | background: #ffffff; 51 | cursor: pointer; 52 | -webkit-appearance: none; 53 | margin-top: -14px; 54 | } 55 | input[type=range]:focus::-webkit-slider-runnable-track { 56 | background: #3377FF; 57 | } 58 | input[type=range]::-moz-range-track { 59 | width: 100%; 60 | height: 8.4px; 61 | cursor: pointer; 62 | animate: 0.2s; 63 | box-shadow: 1px 1px 1px #000000, 0px 0px 1px #0d0d0d; 64 | background: #3071a9; 65 | border-radius: 1.3px; 66 | border: 0.2px solid #010101; 67 | } 68 | input[type=range]::-moz-range-thumb { 69 | box-shadow: 1px 1px 1px #000000, 0px 0px 1px #0d0d0d; 70 | border: 1px solid #000000; 71 | height: 36px; 72 | width: 16px; 73 | border-radius: 3px; 74 | background: #ffffff; 75 | cursor: pointer; 76 | } 77 | input[type=range]::-ms-track { 78 | width: 100%; 79 | height: 8.4px; 80 | cursor: pointer; 81 | animate: 0.2s; 82 | background: transparent; 83 | border-color: transparent; 84 | border-width: 16px 0; 85 | color: transparent; 86 | } 87 | input[type=range]::-ms-fill-lower { 88 | background: #2a6495; 89 | border: 0.2px solid #010101; 90 | border-radius: 2.6px; 91 | box-shadow: 1px 1px 1px #000000, 0px 0px 1px #0d0d0d; 92 | } 93 | input[type=range]::-ms-fill-upper { 94 | background: #FFE033; 95 | border: 0.2px solid #010101; 96 | border-radius: 2.6px; 97 | box-shadow: 1px 1px 1px #000000, 0px 0px 1px #0d0d0d; 98 | } 99 | input[type=range]::-ms-thumb { 100 | box-shadow: 1px 1px 1px #000000, 0px 0px 1px #0d0d0d; 101 | border: 1px solid #000000; 102 | height: 36px; 103 | width: 16px; 104 | border-radius: 3px; 105 | background: #ffffff; 106 | cursor: pointer; 107 | } 108 | input[type=range]:focus::-ms-fill-lower { 109 | background: #FFE033; 110 | } 111 | input[type=range]:focus::-ms-fill-upper { 112 | background: #FFE033; 113 | } 114 | 115 | body { 116 | padding: 30px; 117 | } 118 | 119 | .themed-container { 120 | border: 1px solid; 121 | border-radius: 8px; 122 | margin:10px; 123 | float:left; 124 | } 125 | 126 | img { 127 | padding: 8px; 128 | } -------------------------------------------------------------------------------- /ui/src/App.js: -------------------------------------------------------------------------------- 1 | 2 | import React, { Component } from 'react'; 3 | import logo from './logo_fprf.png'; 4 | import './App.css'; 5 | import Player from './components/Player' 6 | 7 | 8 | function App() { 9 | return ( 10 |
11 | 12 | 13 |
); 14 | } 15 | 16 | export default App; -------------------------------------------------------------------------------- /ui/src/App.test.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { render } from '@testing-library/react'; 3 | import App from './App'; 4 | 5 | test('renders learn react link', () => { 6 | const { getByText } = render(); 7 | const linkElement = getByText(/learn react/i); 8 | expect(linkElement).toBeInTheDocument(); 9 | }); 10 | -------------------------------------------------------------------------------- /ui/src/National-Fire-Protection-Association-Logo1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/ui/src/National-Fire-Protection-Association-Logo1.png -------------------------------------------------------------------------------- /ui/src/components/Card.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { withStyles } from '@material-ui/core/styles'; 3 | import Paper from '@material-ui/core/Button'; 4 | 5 | const StyledPaper = withStyles({ 6 | root: { 7 | background: 'linear-gradient(45deg, #FE6B8B 30%, #FF8E53 90%)', 8 | borderRadius: 3, 9 | border: 0, 10 | color: 'white', 11 | height: 48, 12 | padding: '0 30px', 13 | boxShadow: '0 3px 5px 2px rgba(255, 105, 135, .3)', 14 | }, 15 | label: { 16 | textTransform: 'capitalize', 17 | }, 18 | })(Paper); 19 | 20 | export default function ClassesShorthand() { 21 | return classes shorthand; 22 | } -------------------------------------------------------------------------------- /ui/src/components/Counter.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react' 2 | import { makeStyles, Paper, Grid, Container, TextField} from '@material-ui/core'; 3 | 4 | class Counter extends Component{ 5 | constructor(props){ 6 | super(props) 7 | } 8 | 9 | //On the change event for the select box pass the selected value back to the parent 10 | 11 | 12 | render(){ 13 | let csrCount = this.props.csrCount; 14 | let ssdcCount = this.props.ssdcCount; 15 | 16 | return ( 17 |

18 | 19 | 20 |

Current CSRNet Count: {csrCount}

21 |

Current SSDCNet Count: {ssdcCount}

22 |
23 | ) 24 | } 25 | } 26 | 27 | export default Counter; -------------------------------------------------------------------------------- /ui/src/components/Data.js: -------------------------------------------------------------------------------- 1 | export const occupancyData = [{ 2 | "Use": "Concentrated use: without fixed seating", 3 | "ft2": 7, 4 | "m2": 0.65 5 | }, 6 | { 7 | "Use": "Less concentrated use: without fixed seating", 8 | "ft2": 15, 9 | "m2": 1.4 10 | }, 11 | { 12 | "Use": "Bench-Type seating", 13 | "ft2": 0.666, 14 | "m2": 0.45 15 | }, 16 | { 17 | "Use": "Kitchens", 18 | "ft2": 100, 19 | "m2": 9.3 20 | }, 21 | { 22 | "Use": "Library stack areas", 23 | "ft2": 100, 24 | "m2": 9.3 25 | }, 26 | { 27 | "Use": "Library reading rooms (net)", 28 | "ft2": 50, 29 | "m2": 4.6 30 | }, 31 | { 32 | "Use": "Swimming pools (water surface)", 33 | "ft2": 50, 34 | "m2": 4.6 35 | }, 36 | { 37 | "Use": "Swimming pool decks", 38 | "ft2": 30, 39 | "m2": 2.8 40 | }, 41 | { 42 | "Use": "Exercise rooms w/equipment", 43 | "ft2": 50, 44 | "m2": 4.6 45 | }, 46 | { 47 | "Use": "Exercise rooms w/o equipment", 48 | "ft2": 15, 49 | "m2": 1.4 50 | }, 51 | { 52 | "Use": "Stages(net)", 53 | "ft2": 15, 54 | "m2": 1.4 55 | }, 56 | { 57 | "Use": "Lighting and access catwalks; galleries; gridirons (net)", 58 | "ft2": 100, 59 | "m2": 9.3 60 | }, 61 | { 62 | "Use": "Casinos and gaming areas", 63 | "ft2": 11, 64 | "m2": 1 65 | }, 66 | { 67 | "Use": "Skating rinks", 68 | "ft2": 50, 69 | "m2": 4.6 70 | }, 71 | { 72 | "Use": "Business Use", 73 | "ft2": 150, 74 | "m2": 14 75 | }, 76 | { 77 | "Use": "Concentrated Business Use", 78 | "ft2": 50, 79 | "m2": 4.6 80 | }, 81 | { 82 | "Use": "Airport traffic control tower observation levels", 83 | "ft2": 40, 84 | "m2": 3.7 85 | }, 86 | { 87 | "Use": "Collaboration rooms/spaces ≤450 ft2 (41.8 m2) in area", 88 | "ft2": 30, 89 | "m2": 2.8 90 | }, 91 | { 92 | "Use": "Collaboration rooms/spaces >450 ft2 (41.8 m2) in area", 93 | "ft2": 15, 94 | "m2": 1.4 95 | }, 96 | { 97 | "Use": "Day-Care Use (net)", 98 | "ft2": 35, 99 | "m2": 3.3 100 | }, 101 | { 102 | "Use": "Detention and Correctional Use", 103 | "ft2": 120, 104 | "m2": 11.1 105 | }, 106 | { 107 | "Use": "Educational Use Classrooms(net)", 108 | "ft2": 20, 109 | "m2": 1.9 110 | }, 111 | { 112 | "Use": "Shops/laboratories/vocational rooms(net)", 113 | "ft2": 50, 114 | "m2": 4.6 115 | }, 116 | { 117 | "Use": "Health Care UseInpatient treatmentdepartments", 118 | "ft2": 240, 119 | "m2": 22.3 120 | }, 121 | { 122 | "Use": "Sleeping departments", 123 | "ft2": 120, 124 | "m2": 11.1 125 | }, 126 | { 127 | "Use": "Ambulatory health care", 128 | "ft2": 150, 129 | "m2": 14 130 | }, 131 | { 132 | "Use": "Industrial Use General and high hazard industrial", 133 | "ft2": 100, 134 | "m2": 9.3 135 | }, 136 | { 137 | "Use": "Sales area on street floor", 138 | "ft2": 30, 139 | "m2": 2.8 140 | }, 141 | { 142 | "Use": "Sales area on two or more street floors", 143 | "ft2": 40, 144 | "m2": 3.7 145 | }, 146 | { 147 | "Use": "Sales area on floor below street floor", 148 | "ft2": 30, 149 | "m2": 2.8 150 | }, 151 | { 152 | "Use": "Sales area on floors above street floor", 153 | "ft2": 60, 154 | "m2": 5.6 155 | }, 156 | { 157 | "Use": "Floors (portions) used only for storage/receiving/shipping and not open to general public", 158 | "ft2": 300, 159 | "m2": 27.9 160 | }, 161 | { 162 | "Use": "Hotels and dormitories", 163 | "ft2": 200, 164 | "m2": 18.6 165 | }, 166 | { 167 | "Use": "Apartment buildings", 168 | "ft2": 200, 169 | "m2": 18.6 170 | }, 171 | { 172 | "Use": "Board and care: large", 173 | "ft2": 200, 174 | "m2": 18.6 175 | }, 176 | { 177 | "Use": "storage: mercantile occupancies", 178 | "ft2": 300, 179 | "m2": 27.9 180 | }, 181 | { 182 | "Use": "storage: other than mercantile occupancies", 183 | "ft2": 500, 184 | "m2": 46.5 185 | } 186 | ]; 187 | 188 | export const colourOptions = [ 189 | { value: 'ocean', label: 'Ocean', color: '#00B8D9', isFixed: true }, 190 | { value: 'blue', label: 'Blue', color: '#0052CC', isDisabled: true }, 191 | { value: 'purple', label: 'Purple', color: '#5243AA' }, 192 | { value: 'red', label: 'Red', color: '#FF5630', isFixed: true }, 193 | { value: 'orange', label: 'Orange', color: '#FF8B00' }, 194 | { value: 'yellow', label: 'Yellow', color: '#FFC400' }, 195 | { value: 'green', label: 'Green', color: '#36B37E' }, 196 | { value: 'forest', label: 'Forest', color: '#00875A' }, 197 | { value: 'slate', label: 'Slate', color: '#253858' }, 198 | { value: 'silver', label: 'Silver', color: '#666666' }, 199 | ]; 200 | export default { 201 | occupancyData,colourOptions 202 | } -------------------------------------------------------------------------------- /ui/src/components/Graph.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react' 2 | import Chart from "chart.js"; 3 | //import classes from "./LineGraph.module.css"; 4 | let myLineChart; 5 | 6 | //--Chart Style Options--// 7 | Chart.defaults.global.defaultFontFamily = "'PT Sans', sans-serif" 8 | Chart.defaults.global.legend.display = false; 9 | //--Chart Style Options--// 10 | 11 | export default class LineGraph extends Component { 12 | chartRef = React.createRef(); 13 | 14 | componentDidMount() { 15 | this.buildChart(); 16 | } 17 | 18 | componentDidUpdate() { 19 | this.buildChart(); 20 | } 21 | 22 | buildChart = () => { 23 | const myChartRef = this.chartRef.current.getContext("2d"); 24 | const { data, ssdcnet,average, threshold, labels } = this.props; 25 | 26 | if (typeof myLineChart !== "undefined") myLineChart.destroy(); 27 | 28 | myLineChart = new Chart(myChartRef, { 29 | type: "line", 30 | data: { 31 | //Bring in data 32 | labels: labels, 33 | datasets: [ 34 | { 35 | label: "CSRNet Count", 36 | data: data, 37 | fill: false, 38 | type: 'line', 39 | borderColor: "#3377FF" 40 | }, { 41 | label: "SSDC Count", 42 | data: ssdcnet, 43 | type: 'line', 44 | fill: false, 45 | borderColor: "#FFE033" 46 | },{ 47 | label: "Average Count", 48 | data: average, 49 | type: 'line', 50 | fill: false, 51 | borderColor: "#4CBB17" 52 | },{ 53 | label: "Occupancy Threshold", 54 | data: threshold, 55 | type: 'line', 56 | fill: false, 57 | borderColor: "#FF0000" 58 | } 59 | 60 | 61 | ] 62 | }, 63 | options: { 64 | scales:{ 65 | yAxes: [{scaleLabel: 66 | { 67 | display:true, 68 | labelString: 'Count Persons' 69 | }}], 70 | xAxes:[{scaleLabel: 71 | { 72 | display:true, 73 | labelString: 'Elapsed Time' 74 | }}] 75 | }, 76 | showLines: true, 77 | animation:{ 78 | duration: 0 79 | }, 80 | legend: { 81 | display: true, 82 | position: 'right', 83 | labels: { 84 | fontColor: "#000080", 85 | } 86 | }} 87 | }); 88 | 89 | } 90 | 91 | render() { 92 | 93 | return ( 94 |
95 | 99 |
100 | ) 101 | } 102 | } -------------------------------------------------------------------------------- /ui/src/components/Grid.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { makeStyles } from '@material-ui/core/styles'; 3 | import Paper from '@material-ui/core/Paper'; 4 | import Grid from '@material-ui/core/Grid'; 5 | 6 | const useStyles = makeStyles(theme => ({ 7 | root: { 8 | flexGrow: 1, 9 | }, 10 | paper: { 11 | padding: theme.spacing(2), 12 | textAlign: 'center', 13 | color: theme.palette.text.secondary, 14 | }, 15 | })); 16 | 17 | export default function AutoGrid() {turn ( 18 |
19 | 20 | 21 | 22 | 23 | 24 | xs 25 | 26 | 27 | xs 28 | 29 | 30 | 31 | 32 | xs 33 | 34 | 35 | xs=6 36 | 37 | 38 | xs 39 | 40 | 41 |
42 | ); 43 | } -------------------------------------------------------------------------------- /ui/src/components/Occupancy.js: -------------------------------------------------------------------------------- 1 | import React, {Component} from 'react'; 2 | import {Form, Label, Control} from 'react-bootstrap'; 3 | 4 | class DynamicSelect extends Component{ 5 | constructor(props){ 6 | super(props) 7 | } 8 | 9 | //On the change event for the select box pass the selected value back to the parent 10 | handleChange = (event) => 11 | { 12 | let selectedValue = event.target.value; 13 | this.props.onSelectChange(selectedValue); 14 | } 15 | 16 | render(){ 17 | let occupancyData = this.props.arrayOfData; 18 | let options = occupancyData.map((data) => 19 | 25 | ); 26 | 27 | return ( 28 | 29 |
Select Occupancy Type
30 | 31 | 32 | {options} 33 | 34 |
35 | ) 36 | } 37 | } 38 | 39 | export default DynamicSelect; -------------------------------------------------------------------------------- /ui/src/components/Player.css: -------------------------------------------------------------------------------- 1 | 2 | 3 | #logo { 4 | float: right; 5 | width: 360px; 6 | height: 100px; 7 | margin-bottom: 20px; 8 | text-align: center; 9 | } 10 | 11 | .header h1 { 12 | text-align: center; 13 | position: relative; 14 | top: 20px; 15 | background: #3377FF; 16 | background-color: #3377FF; 17 | left: 50%; 18 | 19 | } 20 | 21 | h2 { 22 | text-align: center 23 | } 24 | 25 | .title { 26 | background-color: #3377FF; 27 | color: #FFE033; 28 | padding: 10px; 29 | margin-bottom: 25px; 30 | } 31 | .quadrant { 32 | width: 100%; 33 | color: burlywood; 34 | } 35 | .navbar{ 36 | background-color: #3377FF; 37 | color: #FFE033; 38 | } 39 | .content { 40 | width: 75% 41 | } 42 | 43 | .header { 44 | position: relative; 45 | 46 | 47 | } 48 | 49 | 50 | input[type=text] { 51 | width: 25%; 52 | } 53 | 54 | Button { 55 | padding: 1rem 1.5rem; 56 | } 57 | 58 | 59 | .btn-control { 60 | background-color: #3377FF; 61 | color: #FFE033; 62 | } 63 | 64 | .btn-lg { 65 | padding: 1rem 1.5rem; 66 | font-size: 1.5rem; 67 | } 68 | 69 | 70 | 71 | .themed-container { 72 | display: flex; 73 | background-color:rgb(251, 255, 200); 74 | justify-content: center; 75 | 76 | } 77 | 78 | .results { 79 | padding: 5px; 80 | background-color: yellow 81 | } 82 | 83 | #content { 84 | justify-content: left; 85 | } -------------------------------------------------------------------------------- /ui/src/components/Player.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import ReactPlayer from 'react-player' 3 | import captureVideoFrame from 'capture-video-frame' 4 | import { findDOMNode } from 'react-dom' 5 | import styles from './Player.css'; 6 | import LineGraph from './Graph' 7 | import occupancyData from './Data' 8 | import DynamicSelect from './Occupancy' 9 | import Counter from './Counter' 10 | import {Row, Button, Form, FormControl, InputGroup, Navbar, Nav} from 'react-bootstrap'; 11 | import screenfull from 'screenfull' 12 | import { makeStyles, Paper, Grid, Container, TextField} from '@material-ui/core'; 13 | import logo from './logo_fprf.png' 14 | 15 | 16 | const axios = require('axios').default; 17 | const occData = occupancyData.occupancyData 18 | console.log(occData) 19 | const base64Header = 'data:image/gif;base64,'; 20 | const classes = makeStyles(theme => ({ 21 | root: { 22 | flexGrow: 1 23 | } 24 | })); 25 | 26 | export default class Player extends React.Component { 27 | constructor (props) { 28 | super(props) 29 | this.state = { 30 | url: "https://crowd-counter-test-vids.s3.us-east-2.amazonaws.com/IMG_1852.MOV", 31 | playing: false, 32 | image: null, 33 | frames: [], 34 | counts: [], 35 | played: 0, 36 | duration: 0, 37 | captureTime: [], 38 | captureInterval: 2000, 39 | isLoading: false, 40 | result: 0, 41 | heatmaps: [], 42 | heatmap: 'R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=', 43 | crowdclass: "", 44 | selectedValue: 'Nothing selected', 45 | area: 0, 46 | unit: 'm2', 47 | occLoadFactor:0, 48 | threshold: 0, 49 | thresholds:[], 50 | ssdcnet:[], 51 | averages: [], 52 | ip: window.location.hostname 53 | } 54 | //this.handleChange = this.handleChange.bind(this); 55 | this.captureVideo = this.captureVideo.bind(this); 56 | this.handleChange = this.handleChange.bind(this); 57 | this.handleSubmit = this.handleSubmit.bind(this); 58 | this.forceUpdateHandler = this.forceUpdateHandler.bind(this); 59 | this.autoCapture = this.autoCapture.bind(this); 60 | this.stopAutoCapture = this.stopAutoCapture.bind(this); 61 | this.handleSelectChange = this.handleSelectChange.bind(this); 62 | this.onChange = this.onChange.bind(this) 63 | } 64 | 65 | forceUpdateHandler(){ 66 | this.forceUpdate(); 67 | }; 68 | 69 | onChange(e){ 70 | const re = /^[0-9\b]+$/; 71 | if (e.target.value === '' || re.test(e.target.value)) { 72 | this.setState({area: e.target.value}) 73 | }; 74 | 75 | } 76 | 77 | setUnits(event) { 78 | this.setState({unit: event.target.value}); 79 | console.log(event.target.value); 80 | } 81 | 82 | handleChange(event) { 83 | this.setState({value: event.target.value}); 84 | } 85 | 86 | handleSelectChange = (selectedValue) => { 87 | this.setState({ 88 | selectedValue: selectedValue 89 | }); 90 | } 91 | 92 | handleSubmit(event) { 93 | event.preventDefault(); 94 | console.log('A video was submitted: ' + this.state.value); 95 | 96 | } 97 | 98 | handleSeekMouseDown = e => { 99 | this.setState({ seeking: true }) 100 | } 101 | 102 | handleSeekChange = e => { 103 | this.setState({ played: parseFloat(e.target.value) }) 104 | } 105 | 106 | handleSeekMouseUp = e => { 107 | this.setState({ seeking: false }) 108 | this.player.seekTo(parseFloat(e.target.value)) 109 | } 110 | 111 | handleProgress = state => { 112 | console.log('onProgress', state) 113 | // We only want to update time slider if we are not currently seeking 114 | if (!this.state.seeking) { 115 | this.setState(state) 116 | } 117 | } 118 | 119 | handleEnded = () => { 120 | console.log('onEnded') 121 | this.setState({ playing: false }); 122 | if (this.state.intervalId){ 123 | clearInterval(this.state.intervalId); 124 | } 125 | } 126 | 127 | handleDuration = (duration) => { 128 | console.log('onDuration', duration) 129 | this.setState({ duration }) 130 | } 131 | 132 | handleCaptureInterval(event) { 133 | 134 | this.setState({captureInterval: event.target.value}); 135 | console.log("2", this.state.captureInterval) 136 | } 137 | 138 | 139 | captureVideo() { 140 | const frame = captureVideoFrame(this.player.getInternalPlayer()); 141 | console.log('captured frame', frame); 142 | this.setState({ image: frame.dataUri }); 143 | let image_data = frame.dataUri; 144 | this.state.frames.push(image_data); 145 | let times = this.state.duration * this.state.played; 146 | let ip = ''.concat('http://', this.state.ip, ':8081/predict'); 147 | console.log(ip); 148 | this.state.captureTime.push(times.toFixed(2)); 149 | 150 | 151 | const imgData = frame.dataUri; 152 | 153 | let metadata = { vidUrl: this.state.url, occType: this.state.selectedValue, area: this.state.area, 154 | units: this.state.unit, duration: this.state.duration, threshold: this.state.threshold } 155 | var data = imgData.split(","); 156 | var payload = { 157 | header: data[0], 158 | data: data[1], 159 | metadata: metadata 160 | }; 161 | console.log('sending to server', payload); 162 | 163 | axios.post(ip, payload) 164 | .then((res) => { 165 | console.log("RESPONSE RECEIVED: ", res); 166 | this.setState({result:res.data.predictions.count}); 167 | this.setState({heatmap:res.data.predictions.predicted_heatmap}); 168 | this.setState({crowdclass:res.data.predictions.class}); 169 | this.state.averages.push(res.data.predictions.average) 170 | this.state.counts.push(this.state.result); 171 | this.state.ssdcnet.push(res.data.predictions.ssdcnet_count); 172 | this.state.heatmaps.push(base64Header+res.data.predictions.predicted_heatmap); 173 | this.state.thresholds.push(this.state.threshold) 174 | this.forceUpdateHandler(); 175 | console.log('heatmaps', this.state.heatmaps); 176 | console.log(this.state.counts); 177 | }) 178 | .catch((err) => { 179 | console.log("AXIOS ERROR: ", err); 180 | }); 181 | } 182 | 183 | autoCapture() { 184 | let intervalId = setInterval(this.captureVideo, this.state.captureInterval); 185 | this.setState({ intervalId: intervalId }); 186 | } 187 | 188 | stopAutoCapture() { 189 | clearInterval(this.state.intervalId); 190 | } 191 | 192 | handleClickFullscreen = () => { 193 | screenfull.request(findDOMNode(this.player)); 194 | } 195 | 196 | ref = player => { 197 | this.player = player; 198 | } 199 | 200 | handleCancelClick = (event) => { 201 | this.setState({ result: "" }); 202 | this.setState({ image: null }); 203 | this.setState({ crowdclass: "" }); 204 | this.setState({ heatmap: ""}); 205 | } 206 | 207 | render() { 208 | //const isLoading = this.state.isLoading; 209 | const { played } = this.state; 210 | let captures = this.state.frames; 211 | const result = occData.find( element => element.Use===this.state.selectedValue ); 212 | const area = this.state.area; 213 | let threshold = 0; 214 | let thresholds = []; 215 | 216 | if (result && this.state.unit === 'm2') { 217 | threshold = this.state.area / result.m2; 218 | } 219 | 220 | if (result && this.state.unit === 'ft2') { 221 | threshold = this.state.area / result.ft2; 222 | } 223 | 224 | if (captures){ 225 | var i; 226 | for (i = 0; i < captures.length; i++) { 227 | thresholds.push(threshold); 228 | } 229 | console.log(captures.length); 230 | console.log(threshold) 231 | console.log(thresholds) 232 | } 233 | return ( 234 | 235 |
236 | 237 | 238 | 239 | 240 | {' '} 247 |

Crowd Counting Application

248 |
249 | 250 | 251 | 252 | 253 |

Controls

254 |

Enter a custom video URL:
255 |
256 | { this.urlInput = input }} type='text' 258 | placeholder="Enter a video URL" 259 | aria-label="Video URL" 260 | aria-describedby="basic-addon2" 261 | onSubmit={this.handleSubmit}/> 262 |
263 |
264 | 265 |
266 | 267 | 268 |
Enter area of Video Feed:
269 | 277 | 278 | 279 |
Units of Measure:
280 |
281 | Metric (m²) 282 | Imperial (ft²)
283 |
284 | 285 |
286 |
287 | 288 |

Environment Details

289 | { 290 | this.state.url !== undefined && 291 |
292 | Video Source URL: {this.state.url} 293 |
294 | }{"\n"} 295 | {
296 | Camera Feed Area (m²/ft²): {this.state.area + ' '}{this.state.unit} 297 |
}{"\n"} 298 | 299 |
300 | Selected Occupancy Type: {this.state.selectedValue}
301 | { 302 | result !== undefined && 303 |
304 | Occupancy Loads (per person): {result.ft2} Ft²; {result.m2} m² 305 |
306 | } 307 | {"\n"} 308 | { 309 | result !== undefined && 310 |
Occupancy Threshold: {threshold.toFixed(0)}
311 | }
312 |
313 | { 314 | result !== undefined && area !== 0 && 315 | 316 | 317 |

Video Feed

318 | 319 | 320 | { this.player = player }} 322 | url={this.state.url} 323 | playing={this.state.playing} 324 | onSeek={e => console.log('onSeek', e)} 325 | onProgress={this.handleProgress} 326 | onDuration={this.handleDuration} 327 | onEnded={this.handleEnded} 328 | width='100%' 329 | height='100%' 330 | config={{ file: { attributes: { 331 | crossorigin: 'anonymous' 332 | }}}} 333 | /> 334 | 341 | 342 | 343 | 344 | 345 | 346 | 347 |

348 | 359 |
360 | 361 | 362 | 363 | 364 | 365 |
366 |
} 367 | { 368 | result !== undefined && area !== 0 && 369 | 370 |

Model Prediction Count Over Time

371 | 374 |
} 375 | 376 | 377 | { captures.length > 0 && 378 |

Captures

379 | {captures.length > 0 && 380 | 381 | this.state.frames.map((item,index) => ( 382 | 383 |
384 |

Elapsed Time: {this.state.captureTime[index]} (in seconds)
385 | CSRNet Count: {this.state.counts[index]}
386 | SSDCNet Count: {this.state.ssdcnet[index]}
387 | Average Count: {this.state.averages[index]}

388 |
389 | 390 | capture 391 | heatmap
392 | ))} 393 |
394 | } 395 |
396 | 397 |
398 |
399 | 400 | ); 401 | } 402 | } 403 | -------------------------------------------------------------------------------- /ui/src/components/logo_fprf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/ui/src/components/logo_fprf.png -------------------------------------------------------------------------------- /ui/src/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', 5 | sans-serif; 6 | -webkit-font-smoothing: antialiased; 7 | -moz-osx-font-smoothing: grayscale; 8 | } 9 | 10 | code { 11 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', 12 | monospace; 13 | } 14 | -------------------------------------------------------------------------------- /ui/src/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom'; 3 | import './index.css'; 4 | import App from './App'; 5 | import * as serviceWorker from './serviceWorker'; 6 | 7 | ReactDOM.render(, document.getElementById('root')); 8 | 9 | // If you want your app to work offline and load faster, you can change 10 | // unregister() to register() below. Note this comes with some pitfalls. 11 | // Learn more about service workers: https://bit.ly/CRA-PWA 12 | serviceWorker.unregister(); 13 | -------------------------------------------------------------------------------- /ui/src/logo_fprf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/ui/src/logo_fprf.png -------------------------------------------------------------------------------- /ui/src/serviceWorker.js: -------------------------------------------------------------------------------- 1 | // This optional code is used to register a service worker. 2 | // register() is not called by default. 3 | 4 | // This lets the app load faster on subsequent visits in production, and gives 5 | // it offline capabilities. However, it also means that developers (and users) 6 | // will only see deployed updates on subsequent visits to a page, after all the 7 | // existing tabs open on the page have been closed, since previously cached 8 | // resources are updated in the background. 9 | 10 | // To learn more about the benefits of this model and instructions on how to 11 | // opt-in, read https://bit.ly/CRA-PWA 12 | 13 | const isLocalhost = Boolean( 14 | window.location.hostname === 'localhost' || 15 | // [::1] is the IPv6 localhost address. 16 | window.location.hostname === '[::1]' || 17 | // 127.0.0.0/8 are considered localhost for IPv4. 18 | window.location.hostname.match( 19 | /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/ 20 | ) 21 | ); 22 | 23 | export function register(config) { 24 | if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) { 25 | // The URL constructor is available in all browsers that support SW. 26 | const publicUrl = new URL(process.env.PUBLIC_URL, window.location.href); 27 | if (publicUrl.origin !== window.location.origin) { 28 | // Our service worker won't work if PUBLIC_URL is on a different origin 29 | // from what our page is served on. This might happen if a CDN is used to 30 | // serve assets; see https://github.com/facebook/create-react-app/issues/2374 31 | return; 32 | } 33 | 34 | window.addEventListener('load', () => { 35 | const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`; 36 | 37 | if (isLocalhost) { 38 | // This is running on localhost. Let's check if a service worker still exists or not. 39 | checkValidServiceWorker(swUrl, config); 40 | 41 | // Add some additional logging to localhost, pointing developers to the 42 | // service worker/PWA documentation. 43 | navigator.serviceWorker.ready.then(() => { 44 | console.log( 45 | 'This web app is being served cache-first by a service ' + 46 | 'worker. To learn more, visit https://bit.ly/CRA-PWA' 47 | ); 48 | }); 49 | } else { 50 | // Is not localhost. Just register service worker 51 | registerValidSW(swUrl, config); 52 | } 53 | }); 54 | } 55 | } 56 | 57 | function registerValidSW(swUrl, config) { 58 | navigator.serviceWorker 59 | .register(swUrl) 60 | .then(registration => { 61 | registration.onupdatefound = () => { 62 | const installingWorker = registration.installing; 63 | if (installingWorker == null) { 64 | return; 65 | } 66 | installingWorker.onstatechange = () => { 67 | if (installingWorker.state === 'installed') { 68 | if (navigator.serviceWorker.controller) { 69 | // At this point, the updated precached content has been fetched, 70 | // but the previous service worker will still serve the older 71 | // content until all client tabs are closed. 72 | console.log( 73 | 'New content is available and will be used when all ' + 74 | 'tabs for this page are closed. See https://bit.ly/CRA-PWA.' 75 | ); 76 | 77 | // Execute callback 78 | if (config && config.onUpdate) { 79 | config.onUpdate(registration); 80 | } 81 | } else { 82 | // At this point, everything has been precached. 83 | // It's the perfect time to display a 84 | // "Content is cached for offline use." message. 85 | console.log('Content is cached for offline use.'); 86 | 87 | // Execute callback 88 | if (config && config.onSuccess) { 89 | config.onSuccess(registration); 90 | } 91 | } 92 | } 93 | }; 94 | }; 95 | }) 96 | .catch(error => { 97 | console.error('Error during service worker registration:', error); 98 | }); 99 | } 100 | 101 | function checkValidServiceWorker(swUrl, config) { 102 | // Check if the service worker can be found. If it can't reload the page. 103 | fetch(swUrl, { 104 | headers: { 'Service-Worker': 'script' } 105 | }) 106 | .then(response => { 107 | // Ensure service worker exists, and that we really are getting a JS file. 108 | const contentType = response.headers.get('content-type'); 109 | if ( 110 | response.status === 404 || 111 | (contentType != null && contentType.indexOf('javascript') === -1) 112 | ) { 113 | // No service worker found. Probably a different app. Reload the page. 114 | navigator.serviceWorker.ready.then(registration => { 115 | registration.unregister().then(() => { 116 | window.location.reload(); 117 | }); 118 | }); 119 | } else { 120 | // Service worker found. Proceed as normal. 121 | registerValidSW(swUrl, config); 122 | } 123 | }) 124 | .catch(() => { 125 | console.log( 126 | 'No internet connection found. App is running in offline mode.' 127 | ); 128 | }); 129 | } 130 | 131 | export function unregister() { 132 | if ('serviceWorker' in navigator) { 133 | navigator.serviceWorker.ready.then(registration => { 134 | registration.unregister(); 135 | }); 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /ui/src/setupTests.js: -------------------------------------------------------------------------------- 1 | // jest-dom adds custom jest matchers for asserting on DOM nodes. 2 | // allows you to do things like: 3 | // expect(element).toHaveTextContent(/react/i) 4 | // learn more: https://github.com/testing-library/jest-dom 5 | import '@testing-library/jest-dom/extend-expect'; 6 | -------------------------------------------------------------------------------- /utils/Convert_CANNET_Pytorch_TF.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Context aware Neural network Model\n", 8 | "\n", 9 | "## Convert PyTorch Models to TensorFlow \n" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 2, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "from torch.utils.data import Dataset, DataLoader\n", 19 | "from torchvision import transforms\n", 20 | "import matplotlib.pyplot as plt\n", 21 | "import torch.optim as optim\n", 22 | "import tensorflow as tf\n", 23 | "import torch.nn as nn\n", 24 | "import numpy as np\n", 25 | "import torch\n", 26 | "import onnx\n", 27 | "import time\n", 28 | "import os\n", 29 | "import sys\n", 30 | "import cv2" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 1, 36 | "metadata": {}, 37 | "outputs": [ 38 | { 39 | "name": "stderr", 40 | "output_type": "stream", 41 | "text": [ 42 | "/home/ubuntu/mayub/Github/onnx-tensorflow/onnx_tf/common/__init__.py:89: UserWarning: onnx_tf.common.get_outputs_names is deprecated. It will be removed in future release. Use TensorflowGraph.get_outputs_names instead.\n", 43 | " warnings.warn(message)\n" 44 | ] 45 | } 46 | ], 47 | "source": [ 48 | "from onnx_tf.backend import prepare" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "sys.path.insert(1, '/home/ubuntu/mayub/Github/Context-Aware_Crowd_Counting-pytorch/')\n", 58 | "import cannet as cann_model" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": null, 64 | "metadata": {}, 65 | "outputs": [], 66 | "source": [ 67 | "MODEL_PARAM_PATH = '/home/ubuntu/mayub/Github/Context-Aware_Crowd_Counting-pytorch/cvpr2019_CAN_SHHA_353.pth'" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "metadata": { 74 | "scrolled": true 75 | }, 76 | "outputs": [], 77 | "source": [ 78 | "model= cann_model.CANNet()\n", 79 | "device=torch.device(\"cpu\")\n", 80 | "model.load_state_dict(torch.load(MODEL_PARAM_PATH, map_location=torch.device('cpu')))\n", 81 | "model.to(device)\n", 82 | "model.eval()\n", 83 | "print(model)" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "metadata": {}, 89 | "source": [ 90 | "### Preprocess the Image for CANNet model (context aware model)" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": null, 96 | "metadata": {}, 97 | "outputs": [], 98 | "source": [ 99 | "def read_and_convert_img(image_path):\n", 100 | " img=plt.imread(image_path)/255\n", 101 | " print(len(img.shape))\n", 102 | " if len(img.shape)==2:\n", 103 | " # expand grayscale image to three channel.\n", 104 | " img=img[:,:,np.newaxis]\n", 105 | " img=np.concatenate((img,img,img),2)\n", 106 | " print(img.shape[0])\n", 107 | " print(img.shape[1])\n", 108 | " ds_rows=int(img.shape[0]//8) # Downsampling to match model size\n", 109 | " ds_cols=int(img.shape[1]//8)\n", 110 | " print(ds_rows)\n", 111 | " print(ds_cols)\n", 112 | " img = cv2.resize(img,(ds_cols*8,ds_rows*8))\n", 113 | " print(img.shape)\n", 114 | " img=img.transpose((2,0,1)) # convert to order (channel,rows,cols)\n", 115 | " img_tensor=torch.tensor(img,dtype=torch.float)\n", 116 | " img_tensor=transforms.functional.normalize(img_tensor,mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])\n", 117 | " img_tensor=img_tensor.view(1,img_tensor.shape[0],img_tensor.shape[1],img_tensor.shape[2])\n", 118 | " print(img.shape)\n", 119 | " print(img_tensor.shape)\n", 120 | " #img_tensor = np.expand_dims(img_tensor,axis = 0)\n", 121 | " return img_tensor" 122 | ] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "metadata": {}, 127 | "source": [ 128 | "### Check Input image dimentions " 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": null, 134 | "metadata": {}, 135 | "outputs": [], 136 | "source": [ 137 | "image_input= read_and_convert_img('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/data/shanghaitech/part_A_final/test_data/images/IMG_1.jpg')" 138 | ] 139 | }, 140 | { 141 | "cell_type": "markdown", 142 | "metadata": {}, 143 | "source": [ 144 | "### Run the Prediction on sample image to test model" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": null, 150 | "metadata": { 151 | "scrolled": true 152 | }, 153 | "outputs": [], 154 | "source": [ 155 | "image_input=image_input.to(device)\n", 156 | "et_dmap=model(image_input)\n", 157 | "print(et_dmap.data.sum())" 158 | ] 159 | }, 160 | { 161 | "cell_type": "markdown", 162 | "metadata": {}, 163 | "source": [ 164 | "### Export the model in ONNX format" 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "execution_count": null, 170 | "metadata": {}, 171 | "outputs": [], 172 | "source": [ 173 | "# Export to ONNX format\n", 174 | "torch.onnx.export(model, image_input, './model_simple.onnx', input_names=['image_input'], output_names=['image_output'],operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)" 175 | ] 176 | }, 177 | { 178 | "cell_type": "markdown", 179 | "metadata": {}, 180 | "source": [ 181 | "### Prepare the model for TensorFlow export" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": 4, 187 | "metadata": {}, 188 | "outputs": [], 189 | "source": [ 190 | "# Load ONNX model and convert to TensorFlow format\n", 191 | "model_onnx = onnx.load('./model_simple.onnx')\n", 192 | "\n", 193 | "# tf_rep = prepare(model_onnx)\n", 194 | "\n", 195 | "# Export model as .pb file\n", 196 | "#tf_rep.export_graph('./model_simple.pb')" 197 | ] 198 | }, 199 | { 200 | "cell_type": "code", 201 | "execution_count": null, 202 | "metadata": {}, 203 | "outputs": [], 204 | "source": [ 205 | "tf_rep = prepare(model_onnx,device='CPU')" 206 | ] 207 | }, 208 | { 209 | "cell_type": "markdown", 210 | "metadata": {}, 211 | "source": [ 212 | "## FAILS !! Aten op is not implemented in ONNX " 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": 3, 218 | "metadata": {}, 219 | "outputs": [ 220 | { 221 | "ename": "AttributeError", 222 | "evalue": "'function' object has no attribute 'device'", 223 | "output_type": "error", 224 | "traceback": [ 225 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 226 | "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", 227 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mprepare\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", 228 | "\u001b[0;31mAttributeError\u001b[0m: 'function' object has no attribute 'device'" 229 | ] 230 | } 231 | ], 232 | "source": [ 233 | "prepare()" 234 | ] 235 | }, 236 | { 237 | "cell_type": "code", 238 | "execution_count": null, 239 | "metadata": {}, 240 | "outputs": [], 241 | "source": [] 242 | } 243 | ], 244 | "metadata": { 245 | "kernelspec": { 246 | "display_name": "Environment (conda_crowd_detection_mayub)", 247 | "language": "python", 248 | "name": "conda_crowd_detection_mayub" 249 | }, 250 | "language_info": { 251 | "codemirror_mode": { 252 | "name": "ipython", 253 | "version": 3 254 | }, 255 | "file_extension": ".py", 256 | "mimetype": "text/x-python", 257 | "name": "python", 258 | "nbconvert_exporter": "python", 259 | "pygments_lexer": "ipython3", 260 | "version": "3.6.9" 261 | } 262 | }, 263 | "nbformat": 4, 264 | "nbformat_minor": 2 265 | } 266 | -------------------------------------------------------------------------------- /utils/Network.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/utils/Network.zip -------------------------------------------------------------------------------- /utils/Network/SSDCNet.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch 3 | from torchvision import models 4 | 5 | import torch.nn.functional as F 6 | import math 7 | 8 | try: 9 | from class_func import Class2Count 10 | from merge_func import count_merge_low2high_batch 11 | from base_Network_module import up,up_res 12 | except: 13 | from Network.class_func import Class2Count 14 | from Network.merge_func import count_merge_low2high_batch 15 | from Network.base_Network_module import up,up_res 16 | # ============================================================================ 17 | # 1.base module 18 | # ============================================================================ 19 | 20 | def Gauss_initialize_weights(net): 21 | for m in net.modules(): 22 | if isinstance(m, nn.Conv2d): 23 | nn.init.normal_(m.weight, std=0.01) 24 | if m.bias is not None: 25 | nn.init.constant_(m.bias, 0) 26 | elif isinstance(m, nn.BatchNorm2d): 27 | nn.init.constant_(m.weight, 1) 28 | nn.init.constant_(m.bias, 0) 29 | 30 | 31 | # --1.1 32 | def make_layers(cfg, in_channels = 3,batch_norm=False,dilation = False): 33 | if dilation: 34 | d_rate = 2 35 | else: 36 | d_rate = 1 37 | layers = [] 38 | for v in cfg: 39 | if v == 'M': 40 | layers += [nn.MaxPool2d(kernel_size=2, stride=2)] 41 | else: 42 | conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=d_rate,dilation = d_rate) 43 | if batch_norm: 44 | layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] 45 | else: 46 | layers += [conv2d, nn.ReLU(inplace=True)] 47 | in_channels = v 48 | return nn.Sequential(*layers) 49 | 50 | # --define base Netweork module 51 | class VGG16_frontend(nn.Module): 52 | def __init__(self,block_num=5,decode_num=0,load_weights=True,bn=False,IF_freeze_bn=False): 53 | super(VGG16_frontend,self).__init__() 54 | self.block_num = block_num 55 | self.load_weights = load_weights 56 | self.bn = bn 57 | self.IF_freeze_bn = IF_freeze_bn 58 | self.decode_num = decode_num 59 | 60 | block_dict = [[64, 64, 'M'], [128, 128, 'M'], [256, 256, 256, 'M'],\ 61 | [512, 512, 512,'M'], [512, 512, 512,'M']] 62 | 63 | self.frontend_feat = [] 64 | for i in range(block_num): 65 | self.frontend_feat += block_dict[i] 66 | 67 | if self.bn: 68 | self.features = make_layers(self.frontend_feat, batch_norm=True) 69 | else: 70 | self.features = make_layers(self.frontend_feat, batch_norm=False) 71 | 72 | 73 | if self.load_weights: 74 | if self.bn: 75 | pretrained_model = models.vgg16_bn(pretrained = True) 76 | else: 77 | pretrained_model = models.vgg16(pretrained = True) 78 | pretrained_dict = pretrained_model.state_dict() 79 | model_dict = self.state_dict() 80 | # filter out unnecessary keys 81 | pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} 82 | # overwrite entries in the existing state dict 83 | model_dict.update(pretrained_dict) 84 | # load the new state dict 85 | self.load_state_dict(model_dict) 86 | 87 | if IF_freeze_bn: 88 | self.freeze_bn() 89 | 90 | def forward(self,x): 91 | if self.bn: 92 | x = self.features[ 0:7](x) 93 | print(type(x)) 94 | print(x.shape) 95 | conv1_feat =x if self.decode_num>=4 else [] 96 | x = self.features[ 7:14](x) 97 | conv2_feat =x if self.decode_num>=3 else [] 98 | x = self.features[ 14:24](x) 99 | conv3_feat =x if self.decode_num>=2 else [] 100 | x = self.features[ 24:34](x) 101 | conv4_feat =x if self.decode_num>=1 else [] 102 | x = self.features[ 34:44](x) 103 | conv5_feat =x 104 | else: 105 | x = self.features[ 0: 5](x) 106 | conv1_feat =x if self.decode_num>=4 else [] 107 | x = self.features[ 5:10](x) 108 | conv2_feat =x if self.decode_num>=3 else [] 109 | x = self.features[ 10:17](x) 110 | conv3_feat =x if self.decode_num>=2 else [] 111 | x = self.features[ 17:24](x) 112 | conv4_feat =x if self.decode_num>=1 else [] 113 | x = self.features[ 24:31](x) 114 | conv5_feat =x 115 | 116 | feature_map = {'conv1':conv1_feat,'conv2': conv2_feat,\ 117 | 'conv3':conv3_feat,'conv4': conv4_feat, 'conv5': conv5_feat} 118 | 119 | # feature_map = [conv1_feat, conv2_feat, conv3_feat, conv4_feat, conv5_feat] 120 | 121 | return feature_map 122 | 123 | 124 | def freeze_bn(self): 125 | for m in self.modules(): 126 | if isinstance(m, nn.BatchNorm2d): 127 | m.eval() 128 | 129 | 130 | 131 | class SSDCNet_classify(nn.Module): 132 | def __init__(self, class_num,label_indice,div_times=2,\ 133 | frontend_name='VGG16',block_num=5,\ 134 | IF_pre_bn=True,IF_freeze_bn=False,load_weights=False,\ 135 | psize=64,pstride=64,parse_method ='maxp',den_for='p'): 136 | super(SSDCNet_classify, self).__init__() 137 | 138 | # example = torch.rand(1, 3, 768, 1024) 139 | # fr_end = VGG16_frontend(5,4,True,False,False) 140 | # init parameters 141 | self.label_indice = label_indice # this should be tensor 142 | self.class_num = len(self.label_indice)+1 143 | self.div_times = div_times 144 | self.frontend_name = frontend_name 145 | self.block_num = block_num 146 | 147 | self.IF_pre_bn = IF_pre_bn 148 | self.IF_freeze_bn = IF_freeze_bn 149 | self.load_weights = load_weights 150 | self.psize,self.pstride = psize,pstride 151 | self.parse_method = parse_method 152 | self.den_for = den_for 153 | 154 | # first, make frontend 155 | if self.frontend_name == 'VGG16': 156 | self.front_end = VGG16_frontend(block_num=self.block_num,decode_num=self.div_times, 157 | load_weights=self.load_weights,bn=self.IF_pre_bn,IF_freeze_bn=self.IF_freeze_bn) 158 | # self.front_end = torch.jit.trace(VGG16_frontend(block_num=self.block_num,decode_num=4, 159 | # load_weights=self.load_weights,bn=self.IF_pre_bn,IF_freeze_bn=self.IF_freeze_bn), x) 160 | # self.front_end = torch.jit.trace(fr_end) 161 | self.back_end_up = dict() 162 | # use light wight Refinet upsample 163 | up_in_ch = [512,512,256,128,64] 164 | up_out_ch = [256,256,128,64] 165 | cat_in_ch = [(256+512),(256+256)] 166 | cat_out_ch = [512,512] 167 | 168 | self.back_end_up = dict() 169 | back_end_up = [] 170 | for i in range(self.div_times): 171 | back_end_up.append( up(up_in_ch[i],up_out_ch[i],\ 172 | cat_in_ch[i],cat_out_ch[i]) ) 173 | 174 | if self.div_times>0: 175 | self.back_end_up = nn.Sequential(*back_end_up) 176 | 177 | # make backend pre 178 | self.back_end_cls = torch.nn.Sequential( 179 | torch.nn.AvgPool2d((2,2),stride=2), 180 | torch.nn.Conv2d(512, 512, (1, 1) ), 181 | torch.nn.ReLU(), 182 | torch.nn.Conv2d(512, class_num, (1, 1) ) ) 183 | 184 | self.back_end_lw_fc = torch.nn.Sequential( 185 | torch.nn.AvgPool2d((2,2),stride=2), 186 | torch.nn.Conv2d(512, 512, (1, 1) ), 187 | torch.nn.ReLU(), 188 | torch.nn.Conv2d(512, 1, (1, 1) ) ) 189 | 190 | # 2019/09/12 add density map predictor 191 | # 2x larger density map than class map 192 | 193 | self.back_end_denisty = torch.nn.Sequential( 194 | torch.nn.Conv2d(512, 512, (1, 1) ), 195 | torch.nn.ReLU(), 196 | torch.nn.Conv2d(512, 1, (1, 1) ) ) 197 | 198 | 199 | Gauss_initialize_weights(self.back_end_up) 200 | Gauss_initialize_weights(self.back_end_cls) 201 | Gauss_initialize_weights(self.back_end_lw_fc) 202 | Gauss_initialize_weights(self.back_end_denisty) 203 | 204 | 205 | 206 | def forward(self,x): 207 | x = self.front_end(x) 208 | # return { 'conv1':x[0] ,'conv2': x[1],\ 209 | # 'conv3':x[2] ,'conv4': x[3], 'conv5': x[4] } 210 | return x 211 | 212 | # @torch.jit.export 213 | def resample(self,feature_map): 214 | 215 | low_feat = feature_map['conv5'] 216 | 217 | div_res = dict() 218 | div_cls_name = 'cls' + str(0) 219 | new_conv_reg = self.back_end_cls(low_feat) 220 | div_res[div_cls_name] = new_conv_reg 221 | 222 | 223 | for i in range(self.div_times): 224 | # low feat to create density maps 225 | feat_h,feat_w = low_feat.size()[-2:] 226 | tmp_density = self.back_end_denisty(low_feat) 227 | tmp_density = F.unfold(tmp_density,kernel_size = 2,stride=2) 228 | # tmp_density = F.unfold(low_feat.sum(dim=1,keepdim=True),kernel_size = 2,stride=2) 229 | tmp_density = F.softmax(tmp_density,dim=1) 230 | tmp_density = F.fold(tmp_density,(feat_h,feat_w),kernel_size=2,stride=2) 231 | tmp_density_name = 'den' + str(i) 232 | div_res[tmp_density_name] = tmp_density 233 | 234 | high_feat_name = 'conv'+str(4-i) 235 | high_feat = feature_map[high_feat_name] 236 | low_feat = self.back_end_up[i](low_feat,high_feat) 237 | 238 | 239 | # div45: Upsample and get weight 240 | new_conv_w = self.back_end_lw_fc(low_feat) 241 | # new_conv4_w = F.sigmoid(new_conv4_w) 242 | new_conv_w = torch.sigmoid(new_conv_w) 243 | new_conv_reg = self.back_end_cls(low_feat) 244 | 245 | del feature_map[high_feat_name] 246 | 247 | div_w_name = 'w' + str(i+1) 248 | div_cls_name = 'cls' + str(i+1) 249 | 250 | div_res[div_w_name] = new_conv_w 251 | div_res[div_cls_name] = new_conv_reg 252 | 253 | del feature_map 254 | return div_res 255 | 256 | # @torch.jit.export 257 | def parse_merge(self,div_res): 258 | res = dict() 259 | # class2count 260 | for cidx in range(self.div_times+1): 261 | tname = 'c' + str(cidx) 262 | 263 | if self.parse_method == 'maxp': 264 | div_res['cls'+str(cidx)] = div_res['cls'+str(cidx)].max(dim=1,keepdim=True)[1] 265 | res[tname] = Class2Count(div_res['cls'+str(cidx)],self.label_indice) 266 | elif self.parse_method == 'mulp': 267 | div_res['cls'+str(cidx)] = F.softmax(div_res['cls'+str(cidx)],dim=1) 268 | res[tname] = Class2Count_mul(div_res['cls'+str(cidx)],self.label_indice) 269 | 270 | # merge_div_res 271 | # res['c0'] is the parse result 272 | res['div0'] = res['c0'] 273 | for divt in range(1,self.div_times+1): 274 | den_name = 'den' + str(divt-1) 275 | tden = div_res[den_name] 276 | 277 | tname = 'div' + str(divt) 278 | tchigh = res['c' + str(divt)] 279 | tclow = res['div' + str(int(divt-1))] 280 | 281 | IF_p = True if self.den_for == 'p' else False 282 | tclow = count_merge_low2high_batch(tclow,tden,IF_avg=False,IF_p=IF_p) 283 | tw = div_res['w'+str(divt)] 284 | res[tname] = (1-tw)*tclow + tw*tchigh 285 | 286 | # save div map 287 | res['w'+str(divt)] = tw 288 | 289 | del div_res 290 | return res 291 | 292 | 293 | 294 | 295 | 296 | -------------------------------------------------------------------------------- /utils/Network/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammedayub44/ObjectDetection/6d151e417ff9322b6be5722b40bc4a209282d13d/utils/Network/__init__.py -------------------------------------------------------------------------------- /utils/Network/base_Network_module.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import math 5 | 6 | # --1.2.1 7 | class one_conv(nn.Module): 8 | def __init__(self, in_ch, out_ch, normaliz=False): 9 | super(one_conv, self).__init__() 10 | 11 | ops = [] 12 | ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)] 13 | # ops += [nn.Dropout(p=0.1)] 14 | if normaliz: 15 | ops += [nn.BatchNorm2d(out_ch)] 16 | ops += [nn.ReLU(inplace=True)] 17 | 18 | self.conv = nn.Sequential(*ops) 19 | 20 | def forward(self, x): 21 | x = self.conv(x) 22 | return x 23 | 24 | # --1.2.2 25 | class double_conv(nn.Module): 26 | def __init__(self, in_ch, out_ch, normaliz=False): 27 | super(double_conv, self).__init__() 28 | 29 | ops = [] 30 | ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)] 31 | # ops += [nn.Dropout(p=0.1)] 32 | if normaliz: 33 | ops += [nn.BatchNorm2d(out_ch)] 34 | ops += [nn.ReLU(inplace=True)] 35 | ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)] 36 | # ops += [nn.Dropout(p=0.1)] 37 | if normaliz: 38 | ops += [nn.BatchNorm2d(out_ch)] 39 | ops += [nn.ReLU(inplace=True)] 40 | 41 | self.conv = nn.Sequential(*ops) 42 | 43 | def forward(self, x): 44 | x = self.conv(x) 45 | return x 46 | 47 | # --1.2.3 48 | class three_conv(nn.Module): 49 | def __init__(self, in_ch, out_ch, normaliz=False): 50 | super(three_conv, self).__init__() 51 | 52 | ops = [] 53 | ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)] 54 | # ops += [nn.Dropout(p=0.1)] 55 | if normaliz: 56 | ops += [nn.BatchNorm2d(out_ch)] 57 | ops += [nn.ReLU(inplace=True)] 58 | 59 | ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)] 60 | # ops += [nn.Dropout(p=0.1)] 61 | if normaliz: 62 | ops += [nn.BatchNorm2d(out_ch)] 63 | ops += [nn.ReLU(inplace=True)] 64 | 65 | ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)] 66 | # ops += [nn.Dropout(p=0.1)] 67 | if normaliz: 68 | ops += [nn.BatchNorm2d(out_ch)] 69 | ops += [nn.ReLU(inplace=True)] 70 | 71 | self.conv = nn.Sequential(*ops) 72 | 73 | def forward(self, x): 74 | x = self.conv(x) 75 | return x 76 | 77 | 78 | class resconv2(nn.Module): 79 | def __init__(self,in_ch,out_ch,ksize=3,kstride=1,kpad=1): 80 | super(resconv2,self).__init__() 81 | self.conv1 = nn.Conv2d(in_ch,out_ch,ksize,stride=kstride,padding=kpad) 82 | self.conv2 = nn.Conv2d(out_ch,out_ch,ksize,stride=kstride,padding=kpad) 83 | if in_ch != out_ch: 84 | self.red = nn.Conv2d(in_ch,out_ch,(1,1),stride=1,padding=0) 85 | else: 86 | self.red = None 87 | 88 | def forward(self,x): 89 | rx = self.conv1(x) 90 | rx = F.relu(rx) 91 | rx= self.conv2(rx) 92 | rx = F.relu(rx) 93 | 94 | if self.red!=None: 95 | x = self.red(x)+rx 96 | else: 97 | x = x + rx 98 | return rx 99 | 100 | class up_res(nn.Module): 101 | def __init__(self, up_in_ch, up_out_ch,cat_in_ch, cat_out_ch,if_convt=False): 102 | super(up_res, self).__init__() 103 | self.if_convt = if_convt 104 | if self.if_convt: 105 | self.up = nn.ConvTranspose2d(up_in_ch,up_out_ch, 2, stride=2) 106 | else: 107 | self.up = nn.Upsample(scale_factor=2, 108 | mode='bilinear', 109 | align_corners=False) 110 | 111 | self.conv1 = nn.Conv2d(up_in_ch,up_out_ch,(3,3)) 112 | 113 | self.conv2 = resconv2(cat_in_ch,cat_out_ch) 114 | 115 | def forward(self, x1, x2): 116 | 117 | if self.if_convt: 118 | x1 = self.up(x1) 119 | else: 120 | x1 = self.up(x1) 121 | x1 = self.conv1(x1) 122 | 123 | diffY = x2.size()[2] - x1.size()[2] 124 | diffX = x2.size()[3] - x1.size()[3] 125 | #pad to make up for the loss when downsampling 126 | x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)), 127 | diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5 128 | x = torch.cat([x2, x1], dim=1) 129 | del x2,x1 130 | x = self.conv2(x) 131 | return x 132 | 133 | 134 | # --1.3.1 135 | class up(nn.Module): 136 | def __init__(self, up_in_ch, up_out_ch,cat_in_ch, cat_out_ch,if_convt=False): 137 | super(up, self).__init__() 138 | self.if_convt = if_convt 139 | if self.if_convt: 140 | self.up = nn.ConvTranspose2d(up_in_ch,up_out_ch, 2, stride=2) 141 | else: 142 | self.up = nn.Upsample(scale_factor=2, 143 | mode='bilinear', 144 | align_corners=False) 145 | self.conv1 = one_conv(up_in_ch,up_out_ch) 146 | 147 | self.conv2 = double_conv(cat_in_ch, cat_out_ch) 148 | 149 | def forward(self, x1, x2): 150 | 151 | if self.if_convt: 152 | x1 = self.up(x1) 153 | else: 154 | x1 = self.up(x1) 155 | x1 = self.conv1(x1) 156 | 157 | diffY = x2.size()[2] - x1.size()[2] 158 | diffX = x2.size()[3] - x1.size()[3] 159 | #pad to make up for the loss when downsampling 160 | x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)), 161 | diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5 162 | x = torch.cat([x2, x1], dim=1) 163 | del x2,x1 164 | x = self.conv2(x) 165 | return x 166 | 167 | # --1.3.2 168 | class upcat(nn.Module): 169 | def __init__(self, up_in_ch, up_out_ch,if_convt=False): 170 | super(upcat, self).__init__() 171 | self.if_convt = if_convt 172 | if self.if_convt: 173 | self.up = nn.ConvTranspose2d(up_in_ch, up_out_ch, 2, stride=2) 174 | else: 175 | self.up = nn.Upsample(scale_factor=2, 176 | mode='bilinear', 177 | align_corners=False) 178 | self.conv1 = one_conv(up_in_ch,up_out_ch) 179 | 180 | def forward(self, x1, x2): 181 | 182 | if self.if_convt: 183 | x1 = self.up(x1) 184 | else: 185 | x1 = self.up(x1) 186 | x1 = self.conv1(x1) 187 | 188 | diffY = x2.size()[2] - x1.size()[2] 189 | diffX = x2.size()[3] - x1.size()[3] 190 | #pad to make up for the loss when downsampling 191 | x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)), 192 | diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5 193 | x = torch.cat([x2, x1], dim=1) 194 | del x2,x1 195 | 196 | return x 197 | 198 | # --1.4 199 | def change_padding(net,del_or_add='del',pad_size=(1,1)): 200 | for m in net.modules(): 201 | if isinstance(m,nn.Conv2d): 202 | m.padding = (0,0) if del_or_add =='del' else pad_size 203 | 204 | return net 205 | 206 | # --1.5 can only compute linear 207 | def compute_rf(net): 208 | rf_size,rf_pad,rf_stride = 1,0,1 209 | for m in net.modules(): 210 | if isinstance(m,(nn.Conv2d,nn.MaxPool2d)): 211 | tmp_kernel_size = m.kernel_size[0] if isinstance(m.kernel_size,(tuple,list)) else m.kernel_size 212 | tmp_padding = m.padding[0] if isinstance(m.padding,(tuple,list)) else m.padding 213 | tmp_stride = m.stride[0] if isinstance(m.stride,(tuple,list)) else m.stride 214 | 215 | # rf_pad relates with the last layer's rf_stride 216 | rf_pad += tmp_padding*rf_stride 217 | # rf_size relates with the last layers's rf_stride 218 | rf_size += (tmp_kernel_size-1)*rf_stride 219 | rf_stride *= tmp_stride 220 | 221 | return {'rf_size':rf_size,'rf_pad':rf_pad,'rf_stride':rf_stride} -------------------------------------------------------------------------------- /utils/Network/class_func.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | import numpy as np 7 | 8 | # Func1: change density map into count map 9 | # density map: batch size * 1 * w * h 10 | def get_local_count(density_map,psize,pstride): 11 | IF_gpu = torch.cuda.is_available() # if gpu, return gpu 12 | IF_ret_gpu = (density_map.device.type == 'cuda') 13 | psize,pstride = int(psize),int(pstride) 14 | density_map = density_map.cpu().type(torch.float32) 15 | conv_kernel = torch.ones(1,1,psize,psize,dtype = torch.float32) 16 | if IF_gpu: 17 | density_map,conv_kernel = density_map.cuda(),conv_kernel.cuda() 18 | 19 | count_map = F.conv2d(density_map,conv_kernel,stride=pstride) 20 | if not IF_ret_gpu: 21 | count_map = count_map.cpu() 22 | 23 | return count_map 24 | 25 | 26 | # Func2: convert count to class (0->c-1) 27 | def Count2Class(count_map,label_indice): 28 | if isinstance(label_indice,np.ndarray): 29 | label_indice = torch.from_numpy(label_indice) 30 | IF_gpu = torch.cuda.is_available() 31 | IF_ret_gpu = (count_map.device.type == 'cuda') 32 | label_indice = label_indice.cpu().type(torch.float32) 33 | cls_num = len(label_indice)+1 34 | cls_map = torch.zeros(count_map.size()).type(torch.LongTensor) 35 | if IF_gpu: 36 | count_map,label_indice,cls_map = count_map.cuda(),label_indice.cuda(),cls_map.cuda() 37 | 38 | for i in range(cls_num-1): 39 | if IF_gpu: 40 | cls_map = cls_map + (count_map >= label_indice[i]).cpu().type(torch.LongTensor).cuda() 41 | else: 42 | cls_map = cls_map + (count_map >= label_indice[i]).cpu().type(torch.LongTensor) 43 | if not IF_ret_gpu: 44 | cls_map = cls_map.cpu() 45 | return cls_map 46 | 47 | 48 | # Func3: convert class (0->c-1) to count number 49 | def Class2Count(pre_cls,label_indice): 50 | ''' 51 | # --Input: 52 | # 1.pre_cls is class label range in [0,1,2,...,C-1] 53 | # 2.label_indice not include 0 but the other points 54 | # --Output: 55 | # 1.count value, the same size as pre_cls 56 | ''' 57 | if isinstance(label_indice,np.ndarray): 58 | label_indice = torch.from_numpy(label_indice) 59 | label_indice = label_indice.squeeze() 60 | IF_gpu = torch.cuda.is_available() 61 | IF_ret_gpu = (pre_cls.device.type == 'cuda') 62 | 63 | # tranform interval to count value map 64 | label2count = [0.0] 65 | for (i,item) in enumerate(label_indice): 66 | if i