├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── custom_parsers └── nvds_customparser_centerface │ ├── Makefile │ ├── README │ └── nvdsinfer_custombboxparser_centerface.cpp ├── ds-centerface ├── models ├── Centerface │ ├── centerface_config.txt │ └── labels.txt └── Trackers │ ├── DCF │ ├── ds_tracker_config.txt │ └── tracker_config.yml │ └── KLT │ └── ds_tracker_config.txt ├── resources └── centerface.gif └── src ├── deepstream_centerface_app.cpp └── deepstream_centerface_app.hpp /.gitignore: -------------------------------------------------------------------------------- 1 | ds-centerface 2 | 3 | # Prerequisites 4 | *.d 5 | 6 | # Compiled Object files 7 | *.slo 8 | *.lo 9 | *.o 10 | *.obj 11 | 12 | # Precompiled Headers 13 | *.gch 14 | *.pch 15 | 16 | # Compiled Dynamic libraries 17 | *.so 18 | *.dylib 19 | *.dll 20 | 21 | # Fortran module files 22 | *.mod 23 | *.smod 24 | 25 | # Compiled Static libraries 26 | *.lai 27 | *.la 28 | *.a 29 | *.lib 30 | 31 | # Executables 32 | *.exe 33 | *.out 34 | *.app 35 | 36 | # Models 37 | *.onnx 38 | *.engine 39 | 40 | # Input source file 41 | inputsources.txt 42 | ds-centernet 43 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Akash James 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | APP:= ds-centerface 2 | 3 | CXX:= g++ -std=c++17 4 | 5 | TARGET_DEVICE = $(shell g++ -dumpmachine | cut -f1 -d -) 6 | 7 | NVDS_VERSION:=5.1 8 | 9 | LIB_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/lib/ 10 | 11 | ifeq ($(TARGET_DEVICE),aarch64) 12 | CFLAGS:= -DPLATFORM_TEGRA 13 | endif 14 | 15 | SRCS+= $(wildcard src/*.cpp) 16 | 17 | INCS:= $(wildcard src/*.hpp) 18 | 19 | PKGS:= gstreamer-1.0 opencv4 20 | 21 | OBJS:= $(SRCS:.cpp=.o) 22 | 23 | CFLAGS+= -I/opt/nvidia/deepstream/deepstream-5.1/sources/includes \ 24 | -DDS_VERSION_MINOR=1 -DDS_VERSION_MAJOR=5 25 | 26 | CFLAGS+= `pkg-config --cflags $(PKGS)` 27 | 28 | LIBS:= `pkg-config --libs $(PKGS)` 29 | 30 | LIBS+= -L$(LIB_INSTALL_DIR) -L/usr/local/cuda/lib64 -lcudart \ 31 | -lnvdsgst_meta -lnvds_meta -lnvdsgst_helper -lm -lrt \ 32 | -Wl,-rpath,$(LIB_INSTALL_DIR) 33 | 34 | LIBS+= -pthread -O3 -Ofast 35 | 36 | LIBS+= -lcurl -lgnutls -luuid -lnvbufsurface -lnvbufsurftransform 37 | 38 | LIBS+= -lboost_system -lz -lssl -lboost_program_options \ 39 | -lboost_filesystem -lboost_date_time -lboost_context -lboost_coroutine -lboost_chrono \ 40 | -lboost_log -lboost_thread -lboost_log_setup -lboost_regex -lboost_atomic 41 | 42 | all: app objdets 43 | 44 | objdets: centerface 45 | app: $(APP) 46 | 47 | %.o: %.cpp $(INCS) Makefile 48 | $(CXX) -c -o $@ $(CFLAGS) $< 49 | 50 | $(APP): $(OBJS) Makefile 51 | $(CXX) -o $(APP) $(OBJS) $(LIBS) 52 | 53 | centerface: 54 | cd custom_parsers/nvds_customparser_centerface && $(MAKE) 55 | 56 | clean: 57 | rm -rf $(OBJS) $(APP) 58 | cd custom_parsers/nvds_customparser_centerface && $(MAKE) clean -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deepstream 5.1 Centerface App 2 | 3 | This Deepstream application showcases Centerface running at high FPS throughput! 4 | 5 | [![FPS](resources/centerface.gif)](https://www.linkedin.com/posts/akashjames_facedetection-computervision-gpu-activity-6803721379087818752-yYz4) 6 | 7 | P.S - Click the gif to watch the entire video! 8 | 9 | ## Index 10 | 11 | 1. [Deepstream Setup](#Deepstream-Setup) 12 | 1. [Install System Dependencies](#Install-System-Dependencies) 13 | 2. [Install Deepstream](#Install-Deepstream) 14 | 2. [Running the Application](#Running-the-Application) 15 | 1. [Clone the repository](#Cloning-the-repository) 16 | 2. [Download the weights file](#download-the-weights-file) 17 | 3. [Build the application](#build-the-application) 18 | 4. [Run with different input sources](#Run-with-different-input-sources) 19 | 3. [Citations](#citations) 20 | 21 | ## Deepstream Setup 22 | 23 | This post assumes you have a fully functional Jetson device. If not, you can refer the documentation [here](https://docs.nvidia.com/jetson/jetpack/install-jetpack/index.html). 24 | 25 | ### 1. Install System Dependencies 26 | 27 | ```sh 28 | sudo apt install \ 29 | libssl1.0.0 \ 30 | libgstreamer1.0-0 \ 31 | gstreamer1.0-tools \ 32 | gstreamer1.0-plugins-good \ 33 | gstreamer1.0-plugins-bad \ 34 | gstreamer1.0-plugins-ugly \ 35 | gstreamer1.0-libav \ 36 | libgstrtspserver-1.0-0 \ 37 | libjansson4=2.11-1 38 | ``` 39 | 40 | ### 2. Install Deepstream 41 | 42 | Download the DeepStream 5.1 Jetson Debian package `deepstream-5.1_5.1.0-1_arm64.deb`, to the Jetson device from [here](https://developer.nvidia.com/deepstream-getting-started). Then enter the command: 43 | 44 | ```sh 45 | sudo apt install deepstream-5.1_5.1.0-1_arm64.deb 46 | ``` 47 | 48 | For more information, go to the get started page of Deepstream [here](https://docs.nvidia.com/metropolis/deepstream/dev-guide/index.html). 49 | 50 | ## Running the Application 51 | 52 | ### 1. Clone the repository 53 | 54 | This is a straightforward step, however, if you are new to git, I recommend glancing threw the steps. 55 | 56 | First, install git 57 | 58 | ```sh 59 | sudo apt install git 60 | ``` 61 | 62 | Next, clone the repository 63 | 64 | ```sh 65 | # Using HTTPS 66 | https://github.com/aj-ames/Centerface-Deepstream.git 67 | # Using SSH 68 | git@github.com:aj-ames/Centerface-Deepstream.git 69 | ``` 70 | 71 | ### 2. Download the weights file 72 | 73 | Download the weights file from google-drive and place it in `models/Centerface` directory. 74 | 75 | * [Centerface 1280x736](https://drive.google.com/file/d/1n5o_uJlinJ6FNv9r8FKSo2MpA9B1UNpG/view?usp=sharing) 76 | * [Centerface 960x544]([google-drive](https://drive.google.com/file/d/1znEKZV4-XXSgCP6_7qYIeFDHjAslo1sa/view?usp=sharing)) 77 | 78 | ### 3. Build the application 79 | 80 | First, build the application by running the following command: 81 | 82 | ```sh 83 | make clean && make -j$(nproc) 84 | ``` 85 | 86 | This will generate the binary called `ds-centerface`. This is a one-time step and you need to do this only when you make source-code changes. 87 | 88 | ### 4. Run with different input sources 89 | 90 | Next, create a file called `inputsources.txt` and paste the path of videos or rtsp url. 91 | 92 | ```sh 93 | file:///home/astr1x/Videos/sample.mp4 94 | rtsp://admin:admin%40123@192.168.1.1:554/stream 95 | ``` 96 | 97 | Now, run the application by running the following command: 98 | 99 | ```sh 100 | ./ds-centerface 101 | ``` 102 | 103 | ## Citations 104 | 105 | * [AlexeyAB/darknet](https://github.com/AlexeyAB/darknet) -------------------------------------------------------------------------------- /custom_parsers/nvds_customparser_centerface/Makefile: -------------------------------------------------------------------------------- 1 | CXX:= g++ -std=c++17 2 | 3 | CFLAGS:= -Wall -O3 -shared -fPIC 4 | 5 | CFLAGS+= -I/opt/nvidia/deepstream/deepstream-5.1/sources/includes \ 6 | -I/usr/local/cuda/include 7 | 8 | LIBS:= -lnvinfer -lnvparsers 9 | 10 | LFLAGS:= -Wl,--start-group $(LIBS) -Wl,--end-group 11 | 12 | SRCFILES:= nvdsinfer_custombboxparser_centerface.cpp 13 | TARGET_LIB:= libnvds_infercustomparser_centerface.so 14 | 15 | all: $(TARGET_LIB) 16 | 17 | $(TARGET_LIB) : $(SRCFILES) 18 | $(CXX) -o $@ $^ $(CFLAGS) $(LFLAGS) 19 | 20 | clean: 21 | rm -rf $(TARGET_LIB) 22 | -------------------------------------------------------------------------------- /custom_parsers/nvds_customparser_centerface/README: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # NVIDIA Corporation and its licensors retain all intellectual property 5 | # and proprietary rights in and to this software, related documentation 6 | # and any modifications thereto. Any use, reproduction, disclosure or 7 | # distribution of this software and related documentation without an express 8 | # license agreement from NVIDIA Corporation is strictly prohibited. 9 | # 10 | ################################################################################ 11 | 12 | Refer to the DeepStream SDK documentation for a description of the library. 13 | 14 | -------------------------------------------------------------------------------- 15 | Pre-requisites: 16 | - TensorRT 5.0 17 | 18 | -------------------------------------------------------------------------------- 19 | Compile the library using: 20 | make 21 | 22 | -------------------------------------------------------------------------------- 23 | This source has been written to parse the output layers of the resnet10 detector 24 | and the resnet18 vehicle type classifier model provided with the SDK. To use this 25 | library for bounding box / classifier output parsing instead of the inbuilt 26 | parsing function, modify the following parameters in [property] section of 27 | primary/secondary vehicle type infer configuration file (config_infer_primary.txt/ 28 | config_infer_secondary_vehicletypes.txt) provided with the SDK: 29 | 30 | # For resnet10 detector 31 | parse-bbox-func-name=NvDsInferParseCustomResnet 32 | custom-lib-path=/path/to/this/directory/libnvds_infercustomparser.so 33 | 34 | # For resnet18 vehicle type classifier 35 | parse-classifier-func-name=NvDsInferClassiferParseCustomSoftmax 36 | custom-lib-path=/path/to/this/directory/libnvds_infercustomparser.so 37 | -------------------------------------------------------------------------------- /custom_parsers/nvds_customparser_centerface/nvdsinfer_custombboxparser_centerface.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "nvdsinfer_custom_impl.h" 9 | 10 | float HEIGHT = 544.0; 11 | float WIDTH = 960.0; 12 | 13 | float PROB_THRESHOLD = 0.2; 14 | float NMS_THRESHOLD = 0.45; 15 | 16 | typedef struct { 17 | float score; 18 | float x1; 19 | float x2; 20 | float y1; 21 | float y2; 22 | float landmarks[10]; 23 | } Faces; 24 | 25 | std::vector>> 26 | reshape(float *buffer, int channels, int height, int width) { 27 | std::vector>> reshaped; 28 | for (int c = 0; c < channels; c++) { 29 | std::vector> channel_vector; 30 | for (int h = 0; h < height; h++) { 31 | std::vector width_vector; 32 | for(int w = 0; w < width; w++) { 33 | width_vector.emplace_back(buffer[(c * height + h) * width + w]); 34 | } 35 | channel_vector.emplace_back(width_vector); 36 | } 37 | reshaped.emplace_back(channel_vector); 38 | } 39 | return reshaped; 40 | } 41 | 42 | std::vector> 43 | getFilteredCoordinates(std::vector>heatmap, 44 | int hHeight, int hWidth, float probthreshold) { 45 | std::vector> filteredHeatmap; 46 | for(int h = 0; h < hHeight; h++) { 47 | for(int w = 0; w < hWidth; w++) { 48 | if (heatmap[h][w] >= probthreshold) { 49 | std::arraycoords = {h, w}; 50 | filteredHeatmap.emplace_back(coords); 51 | } 52 | } 53 | } 54 | return filteredHeatmap; 55 | } 56 | 57 | std::vector 58 | nms (std::vector& input, float nmsthreshold) { 59 | std::vector faceData; 60 | std::sort(input.begin(), input.end(), 61 | [](const Faces&a, const Faces& b) 62 | { 63 | return a.score > b.score; 64 | }); 65 | 66 | int box_num = input.size(); 67 | std::vector merged(box_num, 0); 68 | 69 | for(int i = 0; i < box_num; i++) { 70 | if(merged[i]) { 71 | continue; 72 | } 73 | 74 | faceData.emplace_back(input[i]); 75 | 76 | float h0 = input[i].y2 - input[i].y1 + 1; 77 | float w0 = input[i].x2 - input[i].x1 + 1; 78 | 79 | float area0 = h0 * w0; 80 | 81 | for(int j = i + 1; j < box_num; j++) { 82 | if(merged[j]) { 83 | continue; 84 | } 85 | 86 | float inner_x0 = std::max(input[i].x1, input[j].x1); 87 | float inner_y0 = std::max(input[i].y1, input[j].y1); 88 | 89 | float inner_x1 = std::min(input[i].x2, input[j].x2); 90 | float inner_y1 = std::min(input[i].y2, input[j].y2); 91 | 92 | float inner_h = inner_y1 - inner_y0 + 1; 93 | float inner_w = inner_x1 - inner_x0 + 1; 94 | 95 | if (inner_h <= 0 || inner_w <= 0) 96 | continue; 97 | 98 | float inner_area = inner_h * inner_w; 99 | 100 | float h1 = input[j].y2 - input[j].y1 + 1; 101 | float w1 = input[j].x2 - input[j].x1 + 1; 102 | 103 | float area1 = h1 * w1; 104 | 105 | float score; 106 | 107 | score = inner_area / (area0 + area1 - inner_area); 108 | 109 | if (score >= nmsthreshold) 110 | merged[j] = true; 111 | } 112 | } 113 | return faceData; 114 | } 115 | 116 | void 117 | squareBox(std::vector& faces) { 118 | float w=0, h=0, maxSize=0; 119 | float cenx, ceny; 120 | for (unsigned int i = 0; i < faces.size(); i++) { 121 | w = faces[i].x2 - faces[i].x1; 122 | h = faces[i].y2 - faces[i].y1; 123 | 124 | maxSize = std::max(w, h); 125 | cenx = faces[i].x1 + w / 2; 126 | ceny = faces[i].y1 + h / 2; 127 | 128 | faces[i].x1 = std::max(cenx - maxSize / 2, 0.f); 129 | faces[i].y1 = std::max(ceny - maxSize/ 2, 0.f); 130 | faces[i].x2 = std::min(cenx + maxSize / 2, WIDTH - 1.f); 131 | faces[i].y2 = std::min(ceny + maxSize / 2, HEIGHT - 1.f); 132 | } 133 | } 134 | 135 | /* C-linkage to prevent name-mangling */ 136 | extern "C" 137 | bool NvDsInferParseCustomCenterFace (std::vector const &outputLayersInfo, 138 | NvDsInferNetworkInfo const &networkInfo, 139 | NvDsInferParseDetectionParams const &detectionParams, 140 | std::vector &objectList); 141 | 142 | extern "C" 143 | bool NvDsInferParseCustomCenterFace (std::vector const &outputLayersInfo, 144 | NvDsInferNetworkInfo const &networkInfo, 145 | NvDsInferParseDetectionParams const &detectionParams, 146 | std::vector &objectList) { 147 | static NvDsInferDimsCHW HeatmapDims; 148 | static NvDsInferDimsCHW ScaleDims; 149 | static NvDsInferDimsCHW OffsetDims; 150 | static NvDsInferDimsCHW LandmarksDims; 151 | 152 | static int HeatmapLayerIndex = -1; 153 | static int ScaleLayerIndex = -1; 154 | static int OffsetLayerIndex = -1; 155 | static int LandmarksLayerIndex = -1; 156 | static bool classMismatchWarn = false; 157 | int numClassesToParse; 158 | 159 | // Find Heatmap Layer 160 | if (HeatmapLayerIndex == -1) { 161 | for (unsigned int i = 0; i < outputLayersInfo.size(); i++) { 162 | if (strcmp(outputLayersInfo[i].layerName, "537") == 0) { 163 | HeatmapLayerIndex = i; 164 | getDimsCHWFromDims(HeatmapDims, outputLayersInfo[i].dims); 165 | break; 166 | } 167 | } 168 | if (HeatmapLayerIndex == -1) { 169 | std::cerr << "Could not find heatmap layer buffer while parsing" << std::endl; 170 | return false; 171 | } 172 | } 173 | 174 | // Find Scale Layer 175 | if (ScaleLayerIndex == -1) { 176 | for (unsigned int i = 0; i < outputLayersInfo.size(); i++) { 177 | if (strcmp(outputLayersInfo[i].layerName, "538") == 0) { 178 | ScaleLayerIndex = i; 179 | getDimsCHWFromDims(ScaleDims, outputLayersInfo[i].dims); 180 | break; 181 | } 182 | } 183 | if (ScaleLayerIndex == -1) { 184 | std::cerr << "Could not find scale layer buffer while parsing" << std::endl; 185 | return false; 186 | } 187 | } 188 | 189 | // Find Offset Layer 190 | if (OffsetLayerIndex == -1) { 191 | for (unsigned int i = 0; i < outputLayersInfo.size(); i++) { 192 | if (strcmp(outputLayersInfo[i].layerName, "539") == 0) { 193 | OffsetLayerIndex = i; 194 | getDimsCHWFromDims(OffsetDims, outputLayersInfo[i].dims); 195 | break; 196 | } 197 | } 198 | if (OffsetLayerIndex == -1) { 199 | std::cerr << "Could not find offset layer buffer while parsing" << std::endl; 200 | return false; 201 | } 202 | } 203 | 204 | // Find Landmarks Layer 205 | if (LandmarksLayerIndex == -1) { 206 | for (unsigned int i = 0; i < outputLayersInfo.size(); i++) { 207 | if (strcmp(outputLayersInfo[i].layerName, "540") == 0) { 208 | LandmarksLayerIndex = i; 209 | getDimsCHWFromDims(LandmarksDims, outputLayersInfo[i].dims); 210 | break; 211 | } 212 | } 213 | if (LandmarksLayerIndex == -1) { 214 | std::cerr << "Could not find landmarks layer buffer while parsing" << std::endl; 215 | return false; 216 | } 217 | } 218 | 219 | /* Warn in case of mismatch in number of classes */ 220 | if (!classMismatchWarn) { 221 | if (HeatmapDims.c != detectionParams.numClassesConfigured) { 222 | std::cerr << "WARNING: Num classes mismatch. Configured:" << 223 | detectionParams.numClassesConfigured << ", detected by network: " << 224 | HeatmapDims.c << std::endl; 225 | } 226 | classMismatchWarn = true; 227 | } 228 | 229 | // Calculate the number of classes to parse 230 | numClassesToParse = std::min(HeatmapDims.c, detectionParams.numClassesConfigured); 231 | // Access all buffers 232 | float *outputHeatmapBuf = (float *) outputLayersInfo[HeatmapLayerIndex].buffer; 233 | int heatmapC = HeatmapDims.c; 234 | int heatmapH = HeatmapDims.h; 235 | int heatmapW = HeatmapDims.w; 236 | 237 | std::vector>> reshapedHeatmapBuf = 238 | reshape(outputHeatmapBuf, heatmapC, heatmapH, heatmapW); 239 | 240 | float *outputScaleBuf = (float *) outputLayersInfo[ScaleLayerIndex].buffer; 241 | int scaleC = ScaleDims.c; 242 | int scaleH = ScaleDims.h; 243 | int scaleW = ScaleDims.w; 244 | std::vector>> reshapedScaleBuf = 245 | reshape(outputScaleBuf, scaleC, scaleH, scaleW); 246 | 247 | float *outputOffsetBuf = (float *) outputLayersInfo[OffsetLayerIndex].buffer; 248 | int offsetC = OffsetDims.c; 249 | int offsetH = OffsetDims.h; 250 | int offsetW = OffsetDims.w; 251 | std::vector>> reshapedOffsetBuf = 252 | reshape(outputOffsetBuf, offsetC, offsetH, offsetW); 253 | 254 | float *outputLandmarksBuf = (float *) outputLayersInfo[LandmarksLayerIndex].buffer; 255 | int landmarksC = LandmarksDims.c; 256 | int landmarksH = LandmarksDims.h; 257 | int landmarksW = LandmarksDims.w; 258 | std::vector>> reshapedLandmarksBuf = 259 | reshape(outputLandmarksBuf, landmarksC, landmarksH, landmarksW); 260 | 261 | // Squeeze Heatmap Buffer 262 | std::vector> squeezedHeatmap = 263 | reshapedHeatmapBuf[0]; 264 | 265 | // Divide and squeeze Scales Buffer 266 | std::vector>> split_scale0( 267 | reshapedScaleBuf.begin(), 268 | reshapedScaleBuf.begin() + reshapedScaleBuf.size() / 2); 269 | std::vector>> split_scale1( 270 | reshapedScaleBuf.begin() + reshapedScaleBuf.size() / 2, 271 | reshapedScaleBuf.end()); 272 | 273 | std::vector> scale0 = split_scale0[0]; 274 | std::vector> scale1 = split_scale1[0]; 275 | 276 | // Divide and squeeze Offset Buffer 277 | std::vector>> split_offset0( 278 | reshapedOffsetBuf.begin(), 279 | reshapedOffsetBuf.begin() + reshapedOffsetBuf.size() / 2); 280 | std::vector>> split_offset1( 281 | reshapedOffsetBuf.begin() + reshapedOffsetBuf.size() / 2, 282 | reshapedOffsetBuf.end()); 283 | std::vector> offset0 = split_offset0[0]; 284 | std::vector> offset1 = split_offset1[0]; 285 | 286 | // Get coordinates above threshold 287 | std::vector> filteredHeatmapCoords = 288 | getFilteredCoordinates(squeezedHeatmap, heatmapH, heatmapW, PROB_THRESHOLD); 289 | 290 | if(filteredHeatmapCoords.size() <= 0) { 291 | return true; 292 | } 293 | 294 | // Generate coordinates and landmarks 295 | std::vector rawFaces; 296 | for(unsigned int i = 0; i < filteredHeatmapCoords.size(); i++) { 297 | std::array coords = filteredHeatmapCoords[i]; 298 | float s0 = std::exp(scale0[coords[0]][coords[1]]) * 4; 299 | float s1 = std::exp(scale1[coords[0]][coords[1]]) * 4; 300 | float o0 = offset0[coords[0]][coords[1]]; 301 | float o1 = offset1[coords[0]][coords[1]]; 302 | float score = squeezedHeatmap[coords[0]][coords[1]]; 303 | 304 | float x1 = 0.0, y1 = 0.0, 305 | x2 = 0.0, y2 = 0.0; 306 | x1 = std::max(0.0, (coords[1] + o1 + 0.5) * 4 - s1 / 2); 307 | y1 = std::max(0.0,(coords[0] + o0 + 0.5) * 4 - s0 / 2); 308 | x1 = std::min(x1, WIDTH); 309 | y1 = std::min(y1, HEIGHT); 310 | x2 = std::min(x1 + s1, WIDTH); 311 | y2 = std::min(y1 + s0, HEIGHT); 312 | 313 | Faces rawFace; 314 | rawFace.score = score; 315 | rawFace.x1 = x1; 316 | rawFace.y1 = y1; 317 | rawFace.x2 = x2; 318 | rawFace.y2 = y2; 319 | 320 | for(int j = 0; j < 5; j++) { 321 | rawFace.landmarks[2*j] = 322 | reshapedLandmarksBuf[j * 2 + 1][coords[0]][coords[1]] * s1 + x1; 323 | rawFace.landmarks[2*j+1] = 324 | reshapedLandmarksBuf[j * 2][coords[0]][coords[1]] * s0 + y1; 325 | } 326 | rawFaces.emplace_back(rawFace); 327 | } 328 | 329 | // NMS 330 | std::vector faceData = 331 | nms(rawFaces, NMS_THRESHOLD); 332 | 333 | // Square Boxes 334 | // squareBox(faceData); 335 | 336 | // Pass values to DS 337 | for (int c = 0; c < numClassesToParse; c++) { 338 | for(unsigned int i = 0; i < faceData.size(); i++) { 339 | NvDsInferObjectDetectionInfo object; 340 | int x = 0, y = 0, w = 0, h = 0; 341 | x = faceData[i].x1; 342 | y = faceData[i].y1; 343 | w = faceData[i].x2 - faceData[i].x1; 344 | h = faceData[i].y2 - faceData[i].y1; 345 | 346 | object.classId = c; 347 | object.detectionConfidence = faceData[i].score; 348 | object.left = x; 349 | object.top = y; 350 | object.width = w; 351 | object.height = h; 352 | objectList.emplace_back(object); 353 | } 354 | } 355 | return true; 356 | } 357 | 358 | /* Check that the custom function has been defined correctly */ 359 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomCenterFace); 360 | -------------------------------------------------------------------------------- /ds-centerface: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kingardor/Centerface-Deepstream/117e94ba4e7e87d72d0deb97b0b717997338fbfb/ds-centerface -------------------------------------------------------------------------------- /models/Centerface/centerface_config.txt: -------------------------------------------------------------------------------- 1 | # Following properties are mandatory when engine files are not specified: 2 | # int8-calib-file(Only in INT8) 3 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 4 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 5 | # ONNX: onnx-file 6 | # 7 | # Mandatory properties for detectors: 8 | # parse-func, num-detected-classes, 9 | # custom-lib-path (when parse-func=0 i.e. custom), 10 | # parse-bbox-func-name (when parse-func=0) 11 | # 12 | # Optional properties for detectors: 13 | # enable-dbscan(Default=false), interval(Primary mode only, Default=0) 14 | # 15 | # Mandatory properties for classifiers: 16 | # classifier-threshold, is-classifier 17 | # 18 | # Optional properties for classifiers: 19 | # classifier-async-mode(Secondary mode only, Default=false) 20 | # 21 | # Optional properties in secondary mode: 22 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 23 | # input-object-min-width, input-object-min-height, input-object-max-width, 24 | # input-object-max-height 25 | # 26 | # Following properties are always recommended: 27 | # batch-size(Default=1) 28 | # 29 | # Other optional properties: 30 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 31 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 32 | # mean-file, gie-unique-id(Default=0), offsets, gie-mode (Default=1 i.e. primary), 33 | # custom-lib-path, network-mode(Default=0 i.e FP32) 34 | # 35 | # The values in the config file are overridden by values set through GObject 36 | # properties. 37 | 38 | 39 | [property] 40 | gpu-id=0 41 | net-scale-factor=1.0 42 | model-color-format=0 43 | batch-size=1 44 | gie-unique-id=1 45 | onnx-file=centerface_544x960.onnx 46 | labelfile-path=labels.txt 47 | output-blob-names=537;538;539;540 48 | 49 | process-mode=1 50 | ## 0=FP32, 1=INT8, 2=FP16 mode 51 | network-mode=2 52 | num-detected-classes=1 53 | interval=0 54 | maintain-aspect-ratio=0 55 | parse-bbox-func-name=NvDsInferParseCustomCenterFace 56 | custom-lib-path=../../custom_parsers/nvds_customparser_centerface/libnvds_infercustomparser_centerface.so 57 | 58 | [class-attrs-all] 59 | pre-cluster-threshold=0.3 60 | -------------------------------------------------------------------------------- /models/Centerface/labels.txt: -------------------------------------------------------------------------------- 1 | Face 2 | -------------------------------------------------------------------------------- /models/Trackers/DCF/ds_tracker_config.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # Mandatory properties for the tracker: 24 | # tracker-width: needs to be multiple of 32 for NvDCF 25 | # tracker-height: needs to be multiple of 32 for NvDCF 26 | # gpu-id 27 | # ll-lib-file: path to low-level tracker lib 28 | # ll-config-file: required for NvDCF, optional for KLT and IOU 29 | # 30 | [tracker] 31 | tracker-width=640 32 | tracker-height=384 33 | gpu-id=0 34 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_mot_klt.so 35 | ll-lib-file=/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_nvdcf.so 36 | ll-config-file=tracker_config.yml 37 | enable-batch-process=1 38 | -------------------------------------------------------------------------------- /models/Trackers/DCF/tracker_config.yml: -------------------------------------------------------------------------------- 1 | %YAML:1.0 2 | ################################################################################ 3 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | ################################################################################ 23 | 24 | NvDCF: 25 | # [General] 26 | useUniqueID: 0 # Use 64-bit long Unique ID when assignining tracker ID. Default is [true] 27 | maxTargetsPerStream: 15 # Max number of targets to track per stream. Recommended to set >10. Note: this value should account for the targets being tracked in shadow mode as well. Max value depends on the GPU memory capacity 28 | 29 | # [Feature Extraction] 30 | useColorNames: 1 # Use ColorNames feature 31 | useHog: 0 # Use Histogram-of-Oriented-Gradient (HOG) feature 32 | useHighPrecisionFeature: 1 # Use high-precision in feature extraction. Default is [true] 33 | 34 | # [DCF] 35 | filterLr: 0.15 # learning rate for DCF filter in exponential moving average. Valid Range: [0.0, 1.0] 36 | filterChannelWeightsLr: 0.22 # learning rate for the channel weights among feature channels. Valid Range: [0.0, 1.0] 37 | gaussianSigma: 0.75 # Standard deviation for Gaussian for desired response when creating DCF filter [pixels] 38 | featureImgSizeLevel: 3 # Size of a feature image. Valid range: {1, 2, 3, 4, 5}, from the smallest to the largest 39 | SearchRegionPaddingScale: 1 # Search region size. Determines how large the search region should be scaled from the target bbox. Valid range: {1, 2, 3}, from the smallest to the largest 40 | 41 | # [MOT] [False Alarm Handling] 42 | maxShadowTrackingAge: 30 # Max length of shadow tracking (the shadow tracking age is incremented when (1) there's detector input yet no match or (2) tracker confidence is lower than minTrackerConfidence). Once reached, the tracker will be terminated. 43 | probationAge: 3 # Once the tracker age (incremented at every frame) reaches this, the tracker is considered to be valid 44 | earlyTerminationAge: 1 # Early termination age (in terms of shadow tracking age) during the probation period. If reached during the probation period, the tracker will be terminated prematurely. 45 | 46 | # [Tracker Creation Policy] [Target Candidacy] 47 | minDetectorConfidence: -1 # If the confidence of a detector bbox is lower than this, then it won't be considered for tracking 48 | minTrackerConfidence: 0.7 # If the confidence of an object tracker is lower than this on the fly, then it will be tracked in shadow mode. Valid Range: [0.0, 1.0] 49 | minTargetBboxSize: 10 # If the width or height of the bbox size gets smaller than this threshold, the target will be terminated. 50 | minDetectorBboxVisibilityTobeTracked: 0.0 # If the detector-provided bbox's visibility (i.e., IOU with image) is lower than this, it won't be considered. 51 | minVisibiilty4Tracking: 0.0 # If the visibility of the tracked object (i.e., IOU with image) is lower than this, it will be terminated immediately, assuming it is going out of scene. 52 | 53 | # [Tracker Termination Policy] 54 | targetDuplicateRunInterval: 5 # The interval in which the duplicate target detection removal is carried out. A Negative value indicates indefinite interval. Unit: [frames] 55 | minIou4TargetDuplicate: 0.9 # If the IOU of two target bboxes are higher than this, the newer target tracker will be terminated. 56 | 57 | # [Data Association] Matching method 58 | useGlobalMatching: 0 # If true, enable a global matching algorithm (i.e., Hungarian method). Otherwise, a greedy algorithm wll be used. 59 | usePersistentThreads: 0 # If true, create data association threads once and re-use them 60 | 61 | # [Data Association] Thresholds in matching scores to be considered as a valid candidate for matching 62 | minMatchingScore4Overall: 0.0 # Min total score 63 | minMatchingScore4SizeSimilarity: 0.5 # Min bbox size similarity score 64 | minMatchingScore4Iou: 0.1 # Min IOU score 65 | minMatchingScore4VisualSimilarity: 0.2 # Min visual similarity score 66 | 67 | # [Data Association] Weights for each matching score term 68 | matchingScoreWeight4VisualSimilarity: 0.8 # Weight for the visual similarity (in terms of correlation response ratio) 69 | matchingScoreWeight4SizeSimilarity: 0.0 # Weight for the Size-similarity score 70 | matchingScoreWeight4Iou: 0.1 # Weight for the IOU score 71 | matchingScoreWeight4Age: 0.1 # Weight for the tracker age 72 | 73 | # [State Estimator] 74 | useTrackSmoothing: 1 # Use a state estimator 75 | stateEstimatorType: 1 # The type of state estimator among { moving_avg:1, kalman_filter:2 } 76 | 77 | # [State Estimator] [MovingAvgEstimator] 78 | trackExponentialSmoothingLr_loc: 0.5 # Learning rate for new location 79 | trackExponentialSmoothingLr_scale: 0.3 # Learning rate for new scale 80 | trackExponentialSmoothingLr_velocity: 0.05 # Learning rate for new velocity 81 | 82 | # [State Estimator] [Kalman Filter] 83 | kfProcessNoiseVar4Loc: 0.1 # Process noise variance for location in Kalman filter 84 | kfProcessNoiseVar4Scale: 0.04 # Process noise variance for scale in Kalman filter 85 | kfProcessNoiseVar4Vel: 0.04 # Process noise variance for velocity in Kalman filter 86 | kfMeasurementNoiseVar4Trk: 9 # Measurement noise variance for tracker's detection in Kalman filter 87 | kfMeasurementNoiseVar4Det: 9 # Measurement noise variance for detector's detection in Kalman filter 88 | 89 | # [Past-frame Data] 90 | useBufferedOutput: 0 # Enable storing of past-frame data in a buffer and report it back 91 | 92 | # [Instance-awareness] 93 | useInstanceAwareness: 0 # Use instance-awareness for multi-object tracking 94 | lambda_ia: 2 # Regularlization factor for each instance 95 | maxInstanceNum_ia: 4 # The number of nearby object instances to use for instance-awareness 96 | 97 | -------------------------------------------------------------------------------- /models/Trackers/KLT/ds_tracker_config.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # Mandatory properties for the tracker: 24 | # tracker-width: needs to be multiple of 32 for NvDCF 25 | # tracker-height: needs to be multiple of 32 for NvDCF 26 | # gpu-id 27 | # ll-lib-file: path to low-level tracker lib 28 | # ll-config-file: required for NvDCF, optional for KLT and IOU 29 | # 30 | [tracker] 31 | tracker-width=640 32 | tracker-height=384 33 | gpu-id=0 34 | ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_mot_klt.so 35 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so 36 | #ll-config-file=tracker_config.yml 37 | enable-batch-process=1 -------------------------------------------------------------------------------- /resources/centerface.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kingardor/Centerface-Deepstream/117e94ba4e7e87d72d0deb97b0b717997338fbfb/resources/centerface.gif -------------------------------------------------------------------------------- /src/deepstream_centerface_app.cpp: -------------------------------------------------------------------------------- 1 | #include "deepstream_centerface_app.hpp" 2 | 3 | namespace CenterFace { 4 | void 5 | Nexus::update_fps(gint id) { 6 | 7 | gdouble current_fps = duration_cast(system_clock::now() - fps[id].fps_timer).count(); 8 | current_fps /= 1000; 9 | current_fps = 1 / current_fps; 10 | fps[id].rolling_fps = (gint)(fps[id].rolling_fps * 0.7 + current_fps * 0.3); 11 | auto timer = duration_cast(system_clock::now() - fps[id].display_timer).count(); 12 | if (timer > PERF_INTERVAL) { 13 | fps[id].display_fps = fps[id].rolling_fps; 14 | fps[id].display_timer = system_clock::now(); 15 | } 16 | fps[id].fps_timer = system_clock::now(); 17 | } 18 | 19 | int 20 | Nexus::create_input_sources(gpointer pipe, gpointer mux, guint num_sources) { 21 | 22 | GstElement *pipeline = (GstElement *)pipe; 23 | GstElement *streammux = (GstElement *)mux; 24 | 25 | std::ifstream infile(SOURCE_PATH); 26 | std::string source; 27 | 28 | if (infile.is_open()) { 29 | while (getline(infile, source)) { 30 | GstPad *sinkpad, *srcpad; 31 | GstElement *source_bin = NULL; 32 | gchar pad_name[16] = {}; 33 | 34 | source_bin = create_source_bin(num_sources, (gchar *)source.c_str()); 35 | 36 | if (!source_bin) { 37 | g_printerr("Failed to create source bin. Exiting.\n"); 38 | return -1; 39 | } 40 | 41 | gst_bin_add(GST_BIN(pipeline), source_bin); 42 | 43 | g_snprintf(pad_name, 15, "sink_%u", num_sources); 44 | sinkpad = gst_element_get_request_pad(streammux, pad_name); 45 | if (!sinkpad) { 46 | g_printerr("Streammux request sink pad failed. Exiting.\n"); 47 | return -1; 48 | } 49 | 50 | srcpad = gst_element_get_static_pad(source_bin, "src"); 51 | if (!srcpad) { 52 | g_printerr("Failed to get src pad of source bin. Exiting.\n"); 53 | return -1; 54 | } 55 | 56 | if (gst_pad_link(srcpad, sinkpad) != GST_PAD_LINK_OK) { 57 | g_printerr("Failed to link source bin to stream muxer. Exiting.\n"); 58 | return -1; 59 | } 60 | 61 | gst_object_unref(srcpad); 62 | gst_object_unref(sinkpad); 63 | num_sources++; 64 | } 65 | } 66 | infile.close(); 67 | return num_sources; 68 | } 69 | 70 | void 71 | Nexus::changeBBoxColor(gpointer obj_meta_data, int has_bg_color, float red, float green, 72 | float blue, float alpha) { 73 | 74 | NvDsObjectMeta *obj_meta = (NvDsObjectMeta *)obj_meta_data; 75 | #ifndef PLATFORM_TEGRA 76 | obj_meta->rect_params.has_bg_color = has_bg_color; 77 | obj_meta->rect_params.bg_color.red = red; 78 | obj_meta->rect_params.bg_color.green = green; 79 | obj_meta->rect_params.bg_color.blue = blue; 80 | obj_meta->rect_params.bg_color.alpha = alpha; 81 | #endif 82 | obj_meta->rect_params.border_color.red = red; 83 | obj_meta->rect_params.border_color.green = green; 84 | obj_meta->rect_params.border_color.blue = blue; 85 | obj_meta->rect_params.border_color.alpha = alpha; 86 | obj_meta->text_params.font_params.font_size = 14; 87 | } 88 | 89 | void 90 | Nexus::addDisplayMeta(gpointer batch_meta_data, gpointer frame_meta_data) { 91 | 92 | NvDsBatchMeta *batch_meta = (NvDsBatchMeta *)batch_meta_data; 93 | NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)frame_meta_data; 94 | 95 | // To access the data that will be used to draw 96 | NvDsDisplayMeta *display_meta = NULL; 97 | NvOSD_TextParams *txt_params = NULL; 98 | NvOSD_LineParams *line_params = NULL; 99 | 100 | int offset = 0; 101 | display_meta = nvds_acquire_display_meta_from_pool(batch_meta); 102 | txt_params = display_meta->text_params; 103 | line_params = display_meta->line_params; 104 | display_meta->num_labels = 1; 105 | 106 | // if (txt_params->display_text) 107 | // g_free (txt_params->display_text); 108 | txt_params->display_text = (char *)g_malloc0(MAX_DISPLAY_LEN); 109 | 110 | update_fps(frame_meta->source_id); 111 | 112 | offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Source: %d | FPS: %d | ", 113 | frame_meta->source_id, fps[frame_meta->source_id].display_fps); 114 | 115 | /* Now set the offsets where the string should appear */ 116 | txt_params->x_offset = 10; 117 | txt_params->y_offset = 12; 118 | 119 | /* Font , font-color and font-size */ 120 | txt_params->font_params.font_name = (char *)"Serif"; 121 | txt_params->font_params.font_size = 14; 122 | txt_params->font_params.font_color.red = 1.0; 123 | txt_params->font_params.font_color.green = 1.0; 124 | txt_params->font_params.font_color.blue = 1.0; 125 | txt_params->font_params.font_color.alpha = 1.0; 126 | 127 | /* Text background color */ 128 | txt_params->set_bg_clr = 1; 129 | txt_params->text_bg_clr.red = 0.0; 130 | txt_params->text_bg_clr.green = 0.0; 131 | txt_params->text_bg_clr.blue = 0.0; 132 | txt_params->text_bg_clr.alpha = 1.0; 133 | 134 | nvds_add_display_meta_to_frame(frame_meta, display_meta); 135 | } 136 | 137 | GstPadProbeReturn 138 | Nexus::tiler_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, 139 | gpointer u_data) { 140 | GstBuffer *buf = (GstBuffer *)info->data; 141 | 142 | // To access the entire batch data 143 | NvDsBatchMeta *batch_meta = NULL; 144 | 145 | NvDsObjectMeta *obj_meta = NULL; 146 | NvDsFrameMeta *frame_meta = NULL; 147 | 148 | // To access the frame 149 | NvBufSurface *surface = NULL; 150 | // TO generate message meta 151 | NvDsEventMsgMeta *msg_meta = NULL; 152 | 153 | NvDsMetaList *l_frame = NULL; 154 | NvDsMetaList *l_obj = NULL; 155 | 156 | // Get original raw data 157 | GstMapInfo in_map_info; 158 | char *src_data = NULL; 159 | 160 | if (!gst_buffer_map(buf, &in_map_info, GST_MAP_READ)) { 161 | g_print("Error: Failed to map gst buffer\n"); 162 | gst_buffer_unmap(buf, &in_map_info); 163 | return GST_PAD_PROBE_OK; 164 | } 165 | 166 | batch_meta = gst_buffer_get_nvds_batch_meta(buf); 167 | 168 | if (!batch_meta) { 169 | return GST_PAD_PROBE_OK; 170 | } 171 | 172 | for (l_frame = batch_meta->frame_meta_list; l_frame != NULL; 173 | l_frame = l_frame->next) { 174 | frame_meta = (NvDsFrameMeta *)(l_frame->data); 175 | 176 | if (frame_meta == NULL) { 177 | // Ignore Null frame meta. 178 | continue; 179 | } 180 | 181 | guint person_count = 0; 182 | 183 | for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; 184 | l_obj = l_obj->next) { 185 | 186 | obj_meta = (NvDsObjectMeta *)(l_obj->data); 187 | 188 | if (obj_meta == NULL) { 189 | // Ignore Null object. 190 | continue; 191 | } 192 | 193 | gint class_index = obj_meta->class_id; 194 | 195 | if(class_index == FACE) { 196 | changeBBoxColor(obj_meta, 1, 0.0, 1.0, 0.0, 0.25); 197 | } 198 | 199 | } 200 | // Add Information to every stream 201 | addDisplayMeta(batch_meta, frame_meta); 202 | } 203 | gst_buffer_unmap(buf, &in_map_info); 204 | return GST_PAD_PROBE_OK; 205 | } 206 | 207 | gboolean 208 | Nexus::bus_call(GstBus *bus, GstMessage *msg, gpointer data) { 209 | GMainLoop *loop = (GMainLoop *)data; 210 | switch (GST_MESSAGE_TYPE(msg)) { 211 | case GST_MESSAGE_EOS: 212 | g_print("End of stream\n"); 213 | g_main_loop_quit(loop); 214 | break; 215 | case GST_MESSAGE_WARNING: { 216 | gchar *debug; 217 | GError *error; 218 | gst_message_parse_warning(msg, &error, &debug); 219 | g_printerr("WARNING from element %s: %s\n", 220 | GST_OBJECT_NAME(msg->src), error->message); 221 | g_free(debug); 222 | g_printerr("Warning: %s\n", error->message); 223 | g_error_free(error); 224 | break; 225 | } 226 | case GST_MESSAGE_ERROR: { 227 | gchar *debug; 228 | GError *error; 229 | gst_message_parse_error(msg, &error, &debug); 230 | g_printerr("ERROR from element %s: %s\n", 231 | GST_OBJECT_NAME(msg->src), error->message); 232 | if (debug) 233 | g_printerr("Error details: %s\n", debug); 234 | g_free(debug); 235 | g_error_free(error); 236 | g_main_loop_quit(loop); 237 | break; 238 | } 239 | #ifndef PLATFORM_TEGRA 240 | case GST_MESSAGE_ELEMENT: { 241 | if (gst_nvmessage_is_stream_eos(msg)) { 242 | guint stream_id; 243 | if (gst_nvmessage_parse_stream_eos(msg, &stream_id)) { 244 | g_print("Got EOS from stream %d\n", stream_id); 245 | } 246 | } 247 | break; 248 | } 249 | #endif 250 | default: 251 | break; 252 | } 253 | return TRUE; 254 | } 255 | 256 | void 257 | Nexus::cb_newpad(GstElement *decodebin, GstPad *decoder_src_pad, gpointer data) { 258 | g_print("In cb_newpad\n"); 259 | GstCaps *caps = gst_pad_get_current_caps(decoder_src_pad); 260 | const GstStructure *str = gst_caps_get_structure(caps, 0); 261 | const gchar *name = gst_structure_get_name(str); 262 | GstElement *source_bin = (GstElement *)data; 263 | GstCapsFeatures *features = gst_caps_get_features(caps, 0); 264 | 265 | /* Need to check if the pad created by the decodebin is for video and not 266 | * audio. */ 267 | if (!strncmp(name, "video", 5)) { 268 | /* Link the decodebin pad only if decodebin has picked nvidia 269 | * decoder plugin nvdec_*. We do this by checking if the pad caps contain 270 | * NVMM memory features. */ 271 | if (gst_caps_features_contains(features, GST_CAPS_FEATURES_NVMM)) { 272 | /* Get the source bin ghost pad */ 273 | GstPad *bin_ghost_pad = gst_element_get_static_pad(source_bin, "src"); 274 | if (!gst_ghost_pad_set_target(GST_GHOST_PAD(bin_ghost_pad), 275 | decoder_src_pad)) { 276 | g_printerr("Failed to link decoder src pad to source bin ghost pad\n"); 277 | } 278 | gst_object_unref(bin_ghost_pad); 279 | } 280 | else { 281 | g_printerr("Error: Decodebin did not pick nvidia decoder plugin.\n"); 282 | } 283 | } 284 | } 285 | 286 | void 287 | Nexus::decodebin_child_added(GstChildProxy *child_proxy, GObject *object, gchar *name, gpointer user_data) { 288 | g_print("Decodebin child added: %s\n", name); 289 | if (g_strrstr(name, "decodebin") == name) { 290 | g_signal_connect(G_OBJECT(object), "child-added", 291 | G_CALLBACK(decodebin_child_added), user_data); 292 | } 293 | if (g_strstr_len(name, -1, "nvv4l2decoder") == name) { 294 | g_print("Seting bufapi_version\n"); 295 | g_object_set(object, "bufapi-version", TRUE, NULL); 296 | } 297 | } 298 | 299 | GstElement * 300 | Nexus::create_source_bin(guint index, gchar *uri) { 301 | GstElement *bin = NULL, *uri_decode_bin = NULL; 302 | gchar bin_name[16] = {}; 303 | 304 | g_snprintf(bin_name, 15, "source-bin-%02d", index); 305 | /* Create a source GstBin to abstract this bin's content from the rest of the 306 | * pipeline */ 307 | bin = gst_bin_new(bin_name); 308 | 309 | /* Source element for reading from the uri. 310 | * We will use decodebin and let it figure out the container format of the 311 | * stream and the codec and plug the appropriate demux and decode plugins. */ 312 | uri_decode_bin = gst_element_factory_make("uridecodebin", "uri-decode-bin"); 313 | 314 | if (!bin || !uri_decode_bin) { 315 | g_printerr("One element in source bin could not be created.\n"); 316 | return NULL; 317 | } 318 | 319 | /* We set the input uri to the source element */ 320 | g_object_set(G_OBJECT(uri_decode_bin), "uri", uri, NULL); 321 | 322 | /* Connect to the "pad-added" signal of the decodebin which generates a 323 | * callback once a new pad for raw data has beed created by the decodebin */ 324 | g_signal_connect(G_OBJECT(uri_decode_bin), "pad-added", 325 | G_CALLBACK(cb_newpad), bin); 326 | g_signal_connect(G_OBJECT(uri_decode_bin), "child-added", 327 | G_CALLBACK(decodebin_child_added), bin); 328 | 329 | gst_bin_add(GST_BIN(bin), uri_decode_bin); 330 | 331 | /* We need to create a ghost pad for the source bin which will act as a proxy 332 | * for the video decoder src pad. The ghost pad will not have a target right 333 | * now. Once the decode bin creates the video decoder and generates the 334 | * cb_newpad callback, we will set the ghost pad target to the video decoder 335 | * src pad. */ 336 | if (!gst_element_add_pad(bin, gst_ghost_pad_new_no_target("src", 337 | GST_PAD_SRC))) { 338 | g_printerr("Failed to add ghost pad in source bin\n"); 339 | return NULL; 340 | } 341 | return bin; 342 | } 343 | 344 | gchar * 345 | Nexus::get_absolute_file_path(gchar *cfg_file_path, gchar *file_path) { 346 | gchar abs_cfg_path[PATH_MAX + 1]; 347 | gchar *abs_file_path; 348 | gchar *delim; 349 | 350 | if (file_path && file_path[0] == '/') { 351 | return file_path; 352 | } 353 | 354 | if (!realpath(cfg_file_path, abs_cfg_path)) { 355 | g_free(file_path); 356 | return NULL; 357 | } 358 | 359 | // Return absolute path of config file if file_path is NULL. 360 | if (!file_path) { 361 | abs_file_path = g_strdup(abs_cfg_path); 362 | return abs_file_path; 363 | } 364 | 365 | delim = g_strrstr(abs_cfg_path, "/"); 366 | *(delim + 1) = '\0'; 367 | 368 | abs_file_path = g_strconcat(abs_cfg_path, file_path, NULL); 369 | g_free(file_path); 370 | 371 | return abs_file_path; 372 | } 373 | 374 | gboolean 375 | Nexus::set_tracker_properties(GstElement *nvtracker) { 376 | gboolean ret = FALSE; 377 | GError *error = NULL; 378 | gchar **keys = NULL; 379 | gchar **key = NULL; 380 | GKeyFile *key_file = g_key_file_new(); 381 | 382 | if (!g_key_file_load_from_file(key_file, TRACKER_CONFIG_FILE, G_KEY_FILE_NONE, 383 | &error)) { 384 | g_printerr("Failed to load config file: %s\n", error->message); 385 | return FALSE; 386 | } 387 | 388 | keys = g_key_file_get_keys(key_file, CONFIG_GROUP_TRACKER, NULL, &error); 389 | CHECK_ERROR(error); 390 | 391 | for (key = keys; *key; key++) { 392 | if (!g_strcmp0(*key, CONFIG_GROUP_TRACKER_WIDTH)) { 393 | gint width = 394 | g_key_file_get_integer(key_file, CONFIG_GROUP_TRACKER, 395 | CONFIG_GROUP_TRACKER_WIDTH, &error); 396 | CHECK_ERROR(error); 397 | g_object_set(G_OBJECT(nvtracker), "tracker-width", width, NULL); 398 | } 399 | else if (!g_strcmp0(*key, CONFIG_GROUP_TRACKER_HEIGHT)) { 400 | gint height = 401 | g_key_file_get_integer(key_file, CONFIG_GROUP_TRACKER, 402 | CONFIG_GROUP_TRACKER_HEIGHT, &error); 403 | CHECK_ERROR(error); 404 | g_object_set(G_OBJECT(nvtracker), "tracker-height", height, NULL); 405 | } 406 | else if (!g_strcmp0(*key, CONFIG_GPU_ID)) { 407 | guint gpu_id = 408 | g_key_file_get_integer(key_file, CONFIG_GROUP_TRACKER, 409 | CONFIG_GPU_ID, &error); 410 | CHECK_ERROR(error); 411 | g_object_set(G_OBJECT(nvtracker), "gpu_id", gpu_id, NULL); 412 | } 413 | else if (!g_strcmp0(*key, CONFIG_GROUP_TRACKER_LL_CONFIG_FILE)) { 414 | char *ll_config_file = get_absolute_file_path(TRACKER_CONFIG_FILE, 415 | g_key_file_get_string(key_file, 416 | CONFIG_GROUP_TRACKER, 417 | CONFIG_GROUP_TRACKER_LL_CONFIG_FILE, &error)); 418 | CHECK_ERROR(error); 419 | g_object_set(G_OBJECT(nvtracker), "ll-config-file", ll_config_file, NULL); 420 | } 421 | else if (!g_strcmp0(*key, CONFIG_GROUP_TRACKER_LL_LIB_FILE)) { 422 | char *ll_lib_file = get_absolute_file_path(TRACKER_CONFIG_FILE, 423 | g_key_file_get_string(key_file, 424 | CONFIG_GROUP_TRACKER, 425 | CONFIG_GROUP_TRACKER_LL_LIB_FILE, &error)); 426 | CHECK_ERROR(error); 427 | g_object_set(G_OBJECT(nvtracker), "ll-lib-file", ll_lib_file, NULL); 428 | } 429 | else if (!g_strcmp0(*key, CONFIG_GROUP_TRACKER_ENABLE_BATCH_PROCESS)) { 430 | gboolean enable_batch_process = 431 | g_key_file_get_integer(key_file, CONFIG_GROUP_TRACKER, 432 | CONFIG_GROUP_TRACKER_ENABLE_BATCH_PROCESS, &error); 433 | CHECK_ERROR(error); 434 | g_object_set(G_OBJECT(nvtracker), "enable_batch_process", 435 | enable_batch_process, NULL); 436 | } 437 | else { 438 | g_printerr("Unknown key '%s' for group [%s]", *key, 439 | CONFIG_GROUP_TRACKER); 440 | } 441 | } 442 | 443 | ret = TRUE; 444 | 445 | done: 446 | if (error) { 447 | g_error_free(error); 448 | } 449 | if (keys) { 450 | g_strfreev(keys); 451 | } 452 | if (!ret) { 453 | g_printerr("%s failed", __func__); 454 | } 455 | return ret; 456 | } 457 | 458 | int 459 | Nexus::configure_element_properties(int num_sources, GstElement *streammux, GstElement *pgie_centerface_detector, 460 | GstElement *nvtracker, GstElement *sink, GstElement *tiler) { 461 | 462 | guint tiler_rows, tiler_columns; 463 | 464 | g_object_set(G_OBJECT(streammux), "width", MUXER_OUTPUT_WIDTH, 465 | "height", MUXER_OUTPUT_HEIGHT, "batch-size", num_sources, 466 | "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, 467 | "live-source", TRUE, NULL); 468 | 469 | // Set all important properties of pgie_centerface_detector 470 | g_object_set(G_OBJECT(pgie_centerface_detector), 471 | "config-file-path", PGIE_CENTERFACE_DETECTOR_CONFIG_FILE_PATH, NULL); 472 | 473 | int batch_size = 0; 474 | 475 | if(num_sources <= 4) { 476 | batch_size = num_sources; 477 | } 478 | else { 479 | batch_size = 4; 480 | } 481 | 482 | // Override batch-size of pgie_centerface_detector 483 | g_object_set(G_OBJECT(pgie_centerface_detector), "batch-size", batch_size, NULL); 484 | 485 | // Check if Engine Exists 486 | if (boost::filesystem::exists(boost::filesystem::path( 487 | CenterFace::Nexus::PGIE_CENTERFACE_ENGINE_PATH))) { 488 | 489 | g_object_set(G_OBJECT(pgie_centerface_detector), 490 | "model-engine-file", CenterFace::Nexus::PGIE_CENTERFACE_ENGINE_PATH.c_str(), NULL); 491 | } 492 | else { 493 | cout << str(boost::format("CENTERFACE Engine for batch-size: %d and compute-mode: %s not found.") 494 | % batch_size % COMPUTE_MODE) << endl; 495 | return EXIT_FAILURE; 496 | } 497 | 498 | // Set necessary properties of the tracker element 499 | if (!CenterFace::Nexus::set_tracker_properties(nvtracker)) { 500 | g_printerr("Failed to set tracker properties. Exiting.\n"); 501 | return -1; 502 | } 503 | 504 | g_object_set(G_OBJECT(sink), 505 | "sync", FALSE, NULL); 506 | 507 | tiler_rows = (guint)sqrt(num_sources); 508 | tiler_columns = (guint)ceil(1.0 * num_sources / tiler_rows); 509 | 510 | // Tiler Properties 511 | g_object_set(G_OBJECT(tiler), "rows", tiler_rows, "columns", tiler_columns, 512 | "width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL); 513 | return EXIT_SUCCESS; 514 | } 515 | 516 | void Nexus::setPaths(guint batch_size) { 517 | // Config Paths 518 | PGIE_CENTERFACE_DETECTOR_CONFIG_FILE_PATH = 519 | strdup("models/Centerface/centerface_config.txt"); 520 | 521 | TRACKER_CONFIG_FILE = 522 | strdup("models/Trackers/DCF/ds_tracker_config.txt"); 523 | 524 | // Engine Paths 525 | PGIE_CENTERFACE_ENGINE_PATH = 526 | str(boost::format("models/Centerface/centerface_544x960.onnx_b%d_gpu0_%s.engine") % batch_size % COMPUTE_MODE); 527 | } 528 | } 529 | 530 | int main(int argc, char *argv[]) { 531 | 532 | CenterFace::Nexus nexus; 533 | GMainLoop *loop = NULL; 534 | GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, 535 | *pgie_centerface_detector = NULL, *nvtracker = NULL, 536 | *nvvidconv = NULL, *caps_filter = NULL, *dsexample = NULL, 537 | *nvosd = NULL, *tiler = NULL; 538 | 539 | #ifdef PLATFORM_TEGRA 540 | GstElement *transform = NULL; 541 | #endif 542 | 543 | GstBus *bus = NULL; 544 | guint bus_watch_id = 0; 545 | GstPad *tiler_src_pad = NULL; 546 | guint i; 547 | 548 | // Standard GStreamer initialization 549 | gst_init(&argc, &argv); 550 | loop = g_main_loop_new(NULL, FALSE); 551 | 552 | GOptionContext *ctx = NULL; 553 | GOptionGroup *group = NULL; 554 | GError *error = NULL; 555 | 556 | ctx = g_option_context_new("Nexus DeepStream App"); 557 | group = g_option_group_new("Nexus", NULL, NULL, NULL, NULL); 558 | g_option_group_add_entries(group, nexus.entries); 559 | 560 | g_option_context_set_main_group(ctx, group); 561 | g_option_context_add_group(ctx, gst_init_get_option_group()); 562 | 563 | if (!g_option_context_parse(ctx, &argc, &argv, &error)) { 564 | g_option_context_free(ctx); 565 | g_printerr("%s", error->message); 566 | return -1; 567 | } 568 | g_option_context_free(ctx); 569 | 570 | /* Create gstreamer elements */ 571 | // Create Pipeline element to connect all elements 572 | pipeline = gst_pipeline_new("centerface-pipeline"); 573 | 574 | // Stream Multiplexer for input 575 | streammux = gst_element_factory_make("nvstreammux", "stream-muxer"); 576 | 577 | if (!pipeline || !streammux) { 578 | g_printerr("One element could not be created. Exiting.\n"); 579 | return -1; 580 | } 581 | gst_bin_add(GST_BIN(pipeline), streammux); 582 | 583 | gint sources = nexus.create_input_sources(pipeline, streammux, num_sources); 584 | if (sources == -1) { 585 | return -1; 586 | } 587 | else { 588 | num_sources = sources; 589 | } 590 | 591 | int batch_size = 0; 592 | if(num_sources <= 4) { 593 | batch_size = num_sources; 594 | } 595 | else { 596 | batch_size = 4; 597 | } 598 | nexus.setPaths(batch_size); 599 | 600 | // Primary GPU Inference Engine 601 | pgie_centerface_detector = gst_element_factory_make("nvinfer", "primary-centerface-nvinference-engine"); 602 | 603 | if (!pgie_centerface_detector) { 604 | g_printerr("PGIE CENTERFACE Detector could not be created.\n"); 605 | return -1; 606 | } 607 | 608 | // Initialize Tracker 609 | nvtracker = gst_element_factory_make("nvtracker", "tracker"); 610 | 611 | if (!nvtracker ) { 612 | g_printerr("NVTRACKER could not be created.\n"); 613 | return -1; 614 | } 615 | 616 | // Use convertor to convert from NV12 to RGBA as required by nvosd 617 | nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter"); 618 | 619 | if (!nvvidconv) { 620 | g_printerr("NVVIDCONV could not be created.\n"); 621 | return -1; 622 | } 623 | 624 | caps_filter = gst_element_factory_make ("capsfilter", NULL); 625 | 626 | dsexample = gst_element_factory_make ("dsexample", "example-plugin"); 627 | 628 | // Create OSD to draw on the converted RGBA buffer 629 | nvosd = gst_element_factory_make("nvdsosd", "nv-onscreendisplay"); 630 | 631 | if (!nvosd) { 632 | g_printerr("NVOSD could not be created.\n"); 633 | return -1; 634 | } 635 | 636 | /* Redner OSD Output */ 637 | #ifdef PLATFORM_TEGRA 638 | transform = gst_element_factory_make("nvegltransform", "nvegl-transform"); 639 | #endif 640 | 641 | if (nexus.display_off) { 642 | sink = gst_element_factory_make("fakesink", "nvvideo-renderer"); 643 | } 644 | else { 645 | sink = gst_element_factory_make("nveglglessink", "nvvideo-renderer"); 646 | } 647 | 648 | // Compose all the sources into one 2D tiled window 649 | tiler = gst_element_factory_make("nvmultistreamtiler", "nvtiler"); 650 | 651 | if (!tiler) { 652 | g_printerr("SINK could not be created.\n"); 653 | return -1; 654 | } 655 | 656 | if (!sink) { 657 | g_printerr("SINK could not be created.\n"); 658 | return -1; 659 | } 660 | 661 | #ifdef PLATFORM_TEGRA 662 | if (!transform) { 663 | g_printerr("Tegra element TRANSFORM could not be created. Exiting.\n"); 664 | return -1; 665 | } 666 | #endif 667 | 668 | int fail_safe = nexus.configure_element_properties(num_sources, streammux, pgie_centerface_detector, 669 | nvtracker, sink, tiler); 670 | 671 | if(fail_safe == -1) { 672 | return -1; 673 | } 674 | 675 | #ifndef PLATFORM_TEGRA 676 | /* Set properties of the nvvideoconvert element 677 | * requires unified cuda memory for opencv blurring on CPU 678 | */ 679 | g_object_set (G_OBJECT (nvvidconv), "nvbuf-memory-type", 1, NULL); 680 | #endif 681 | 682 | /* Set properties of the caps_filter element */ 683 | GstCaps *caps = 684 | gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "RGBA", 685 | NULL); 686 | GstCapsFeatures *feature = gst_caps_features_new (GST_CAPS_FEATURES_NVMM, NULL); 687 | gst_caps_set_features (caps, 0, feature); 688 | 689 | g_object_set (G_OBJECT (caps_filter), "caps", caps, NULL); 690 | 691 | /* Set properties of the dsexample element */ 692 | g_object_set (G_OBJECT (dsexample), "full-frame", FALSE, NULL); 693 | g_object_set (G_OBJECT (dsexample), "blur-objects", TRUE, NULL); 694 | 695 | // Message Handler 696 | bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline)); 697 | bus_watch_id = gst_bus_add_watch(bus, nexus.bus_call, loop); 698 | gst_object_unref(bus); 699 | 700 | /* Set up the pipeline */ 701 | #ifdef PLATFORM_TEGRA 702 | if (!nexus.display_off) { 703 | gst_bin_add_many(GST_BIN(pipeline), 704 | pgie_centerface_detector, nvtracker, tiler, nvvidconv, nvosd, transform, sink, NULL); 705 | 706 | if (!gst_element_link_many(streammux, pgie_centerface_detector, nvtracker, 707 | tiler, nvvidconv, nvosd, transform, sink, NULL)) { 708 | g_printerr("Elements could not be linked. Exiting.\n"); 709 | return -1; 710 | } 711 | } 712 | else { 713 | gst_bin_add_many(GST_BIN(pipeline), 714 | pgie_centerface_detector, nvtracker, tiler, nvvidconv, nvosd, sink, NULL); 715 | 716 | if (!gst_element_link_many(streammux, pgie_centerface_detector, nvtracker, 717 | tiler, nvvidconv, nvosd, sink, NULL)) { 718 | g_printerr("Elements could not be linked. Exiting.\n"); 719 | return -1; 720 | } 721 | } 722 | 723 | #else 724 | gst_bin_add_many(GST_BIN(pipeline), 725 | pgie_centerface_detector, nvtracker, tiler, nvvidconv, caps_filter, dsexample, nvosd, sink, NULL); 726 | 727 | if (!gst_element_link_many(streammux, pgie_centerface_detector, nvtracker, 728 | tiler, nvvidconv, caps_filter, dsexample, nvosd, sink, NULL)) { 729 | g_printerr("Elements could not be linked. Exiting.\n"); 730 | return -1; 731 | } 732 | #endif 733 | 734 | /* Lets add probe to get informed of the meta data generated, we add probe to 735 | * the sink pad of the osd element, since by that time, the buffer would have 736 | * had got all the metadata. */ 737 | tiler_src_pad = gst_element_get_static_pad(tiler, "sink"); 738 | if (!tiler_src_pad) { 739 | g_print("Unable to get sink pad\n"); 740 | } 741 | else { 742 | gst_pad_add_probe(tiler_src_pad, GST_PAD_PROBE_TYPE_BUFFER, 743 | nexus.tiler_src_pad_buffer_probe, NULL, NULL); 744 | } 745 | /* Set the pipeline to "playing" state */ 746 | cout << "Now playing:" << endl; 747 | std::ifstream infile(SOURCE_PATH); 748 | std::string source; 749 | if (infile.is_open()) { 750 | while (getline(infile, source)) { 751 | cout << source << endl; 752 | } 753 | } 754 | infile.close(); 755 | 756 | gst_element_set_state(pipeline, GST_STATE_PLAYING); 757 | 758 | /* Wait till pipeline encounters an error or EOS */ 759 | g_print("Running...\n"); 760 | g_main_loop_run(loop); 761 | 762 | /* Out of the main loop, clean up nicely */ 763 | g_print("Returned, stopping playback\n"); 764 | gst_element_set_state(pipeline, GST_STATE_NULL); 765 | g_print("Deleting pipeline\n"); 766 | gst_object_unref(GST_OBJECT(pipeline)); 767 | g_source_remove(bus_watch_id); 768 | g_main_loop_unref(loop); 769 | return 0; 770 | } -------------------------------------------------------------------------------- /src/deepstream_centerface_app.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | #include "gstnvdsmeta.h" 6 | #include "nvdsmeta_schema.h" 7 | #include "nvbufsurface.h" 8 | // #include "gstnvstreammeta.h" 9 | #ifndef PLATFORM_TEGRA 10 | #include "gst-nvmessage.h" 11 | #endif 12 | 13 | #include 14 | #include 15 | 16 | // OpenCV for image operations 17 | #include 18 | 19 | // Asynchronous calls and timers 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | #include 30 | #include 31 | #include 32 | 33 | #include 34 | 35 | #include 36 | #include 37 | 38 | using namespace std; 39 | using namespace std::chrono; 40 | using namespace cv; 41 | 42 | #define SOURCE_PATH "inputsources.txt" 43 | 44 | #define PERF_INTERVAL 2 45 | 46 | #define MAX_DISPLAY_LEN 64 47 | 48 | // Network Compute Mode 49 | #define COMPUTE_MODE "fp16" 50 | 51 | #define MAX_TRACKING_ID_LEN 16 52 | 53 | // Muxer Resolution 54 | #define MUXER_OUTPUT_WIDTH 1920 55 | #define MUXER_OUTPUT_HEIGHT 1080 56 | 57 | /* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set 58 | * based on the fastest source's framerate. */ 59 | #define MUXER_BATCH_TIMEOUT_USEC 4000000 60 | 61 | // Tiles Resolution 62 | #define TILED_OUTPUT_WIDTH 1920 63 | #define TILED_OUTPUT_HEIGHT 1080 64 | 65 | /* NVIDIA Decoder source pad memory feature. This feature signifies that source 66 | * pads having this capability will push GstBuffers containing cuda buffers. */ 67 | #define GST_CAPS_FEATURES_NVMM "memory:NVMM" 68 | 69 | #define CHECK_ERROR(error) \ 70 | if (error) { \ 71 | g_printerr ("Error while parsing config file: %s\n", error->message); \ 72 | goto done; \ 73 | } 74 | 75 | #define CONFIG_GROUP_TRACKER "tracker" 76 | #define CONFIG_GROUP_TRACKER_WIDTH "tracker-width" 77 | #define CONFIG_GROUP_TRACKER_HEIGHT "tracker-height" 78 | #define CONFIG_GROUP_TRACKER_LL_CONFIG_FILE "ll-config-file" 79 | #define CONFIG_GROUP_TRACKER_LL_LIB_FILE "ll-lib-file" 80 | #define CONFIG_GROUP_TRACKER_ENABLE_BATCH_PROCESS "enable-batch-process" 81 | #define CONFIG_GPU_ID "gpu-id" 82 | 83 | enum PGIE_CLASS {FACE = 0}; 84 | 85 | enum GIE_UID {FACE_DETECTOR = 1}; 86 | 87 | int num_sources = 0; 88 | 89 | namespace CenterFace{ 90 | class Nexus { 91 | private: 92 | gchar pgie_centerface_classes_str[1][10] = { 93 | "Face" 94 | }; 95 | 96 | struct fps_calculator { 97 | system_clock::time_point fps_timer; 98 | system_clock::time_point display_timer; 99 | gint rolling_fps; 100 | gint display_fps; 101 | }; 102 | 103 | inline static fps_calculator fps[16]; 104 | 105 | inline static char *PGIE_CENTERFACE_DETECTOR_CONFIG_FILE_PATH; 106 | 107 | inline static char *TRACKER_CONFIG_FILE; 108 | 109 | public: 110 | // To save the frames 111 | gint frame_number; 112 | 113 | gboolean display_off; 114 | 115 | GOptionEntry entries[2] = { 116 | {"no-display", 0, 0, G_OPTION_ARG_NONE, &display_off, "Disable display", NULL}, 117 | {NULL} 118 | }; 119 | 120 | std::string PGIE_CENTERFACE_ENGINE_PATH; 121 | 122 | static void 123 | update_fps (gint id); 124 | 125 | static int 126 | create_input_sources (gpointer pipe, gpointer mux, guint num_sources); 127 | 128 | size_t 129 | WriteCallback (char *contents, size_t size, size_t nmemb, void *userp); 130 | 131 | static void 132 | changeBBoxColor (gpointer obj_meta_data, int has_bg_color, float red, float green, 133 | float blue, float alpha); 134 | 135 | static void 136 | addDisplayMeta (gpointer batch_meta_data, gpointer frame_meta_data); 137 | 138 | static GstPadProbeReturn 139 | tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, 140 | gpointer u_data); 141 | 142 | static gboolean 143 | bus_call (GstBus * bus, GstMessage * msg, gpointer data); 144 | 145 | static void 146 | cb_newpad (GstElement * decodebin, GstPad * decoder_src_pad, gpointer data); 147 | 148 | static void 149 | decodebin_child_added (GstChildProxy *child_proxy, GObject *object, 150 | gchar *name, gpointer user_data); 151 | 152 | static GstElement * 153 | create_source_bin (guint index, gchar * uri); 154 | 155 | static gchar * 156 | get_absolute_file_path (gchar *cfg_file_path, gchar *file_path); 157 | 158 | static gboolean 159 | set_tracker_properties (GstElement *nvtracker); 160 | 161 | int 162 | configure_element_properties(int num_sources, GstElement *streammux, GstElement *pgie_centerface_detector, 163 | GstElement *nvtracker, GstElement *sink, GstElement *tiler); 164 | 165 | void 166 | setPaths(guint num_sources); 167 | 168 | Nexus() { 169 | 170 | int counter; 171 | for (counter = 0; counter < 16; counter++) { 172 | fps[counter].fps_timer = system_clock::now(); 173 | fps[counter].display_timer = system_clock::now(); 174 | fps[counter].rolling_fps = 0; 175 | fps[counter].display_fps = 0; 176 | } 177 | 178 | display_off = false; 179 | frame_number = 0; 180 | } 181 | 182 | ~Nexus() {} 183 | }; 184 | } --------------------------------------------------------------------------------