├── data_tools ├── __init__.py ├── plot_images.py └── coco_tools.py ├── open_images ├── __init__.py ├── unzip_open_images.sh ├── download_open_images.sh └── open_image_to_json.py ├── odtk_models ├── redaction_labels.txt └── README.md ├── configs ├── README.md ├── odtk_model_config_int8.txt ├── odtk_model_config_fp16.txt ├── test_source1_int8.txt ├── test_source1_fp16.txt ├── test_source8_fp16.txt ├── test_source8_int8.txt ├── test_source4_fp16.txt └── test_source4_int8.txt ├── LICENSE.md ├── TRAINING_README.md ├── Makefile ├── .gitignore ├── INT8-README.md ├── src ├── deepstream_app.h ├── deepstream_app_config_parser.c ├── deepstream_app_main.c └── deepstream_redaction_app.c ├── DATA_README.md └── README.md /data_tools/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /open_images/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /odtk_models/redaction_labels.txt: -------------------------------------------------------------------------------- 1 | Face 2 | -------------------------------------------------------------------------------- /odtk_models/README.md: -------------------------------------------------------------------------------- 1 | Please place your own TensorRT engine files (*.plan) in this folder. 2 | 3 | -------------------------------------------------------------------------------- /open_images/unzip_open_images.sh: -------------------------------------------------------------------------------- 1 | echo "Unzipping test data" 2 | unzip -q test.zip 3 | echo "Unzipping test challenge data" 4 | unzip -q test_challenge.zip 5 | 6 | echo "Unzipping validation data" 7 | unzip -q validation.zip 8 | 9 | echo "Unzipping train data" 10 | unzip -q train_00.zip 11 | unzip -q train_01.zip 12 | unzip -q train_02.zip 13 | unzip -q train_03.zip 14 | unzip -q train_04.zip 15 | unzip -q train_05.zip 16 | unzip -q train_06.zip 17 | unzip -q train_07.zip 18 | unzip -q train_08.zip 19 | 20 | mkdir zips 21 | mv *.zip zips/ -------------------------------------------------------------------------------- /configs/README.md: -------------------------------------------------------------------------------- 1 | ## Config files 2 | In this directory, we have eight config files. 3 | 4 | 5 | * `test_source1_fp16.txt`, `test_source1_int8.txt` are for running the app with one input source. The current source in these files is a live camera input. 6 | 7 | 8 | * `test_source4_fp16.txt`, `test_source4_int8.txt` are for running the app with four input sources. The current sources in these files are 4 different local mp4 files. 9 | 10 | 11 | * `test_source8_fp16.txt`, `test_source8_int8.txt` are for running the app with eight input sources. The current sources in these files is a single local mp4 files streaming eight times. 12 | 13 | 14 | * `odtk_model_config_fp16.txt`, `odtk_model_config_int8.txt` are config files to specify parameters about the model that is represented in TRT engine plan format. Note that running the app with these 2 config files as command line input will results in DeepStream errors. 15 | 16 | For how to config different parameters, please see section **Config files** in the [README](../README.md) in the root folder. -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /TRAINING_README.md: -------------------------------------------------------------------------------- 1 | # Training 2 | 3 | These instructions show how to use the NVIDIA PyTorch implementation of RetinaNet to train a face detection model, 4 | which we can then use of redact faces in video streams. 5 | 6 | ## Training setup 7 | 8 | ```bash 9 | DATA_DIR=/ 10 | WORKING_DIR=/ 11 | docker run -it --gpus all --rm --ipc=host -v$DATA_DIR:/data -v$WORKING_DIR:/src -w/src nvcr.io/nvidian/pytorch:19.10-py3 12 | ``` 13 | 14 | Install `retinanet` from the NVIDIA implementation repository. 15 | 16 | ```bash 17 | git clone https://github.com/NVIDIA/retinanet-examples.git 18 | cd retinanet-examples 19 | git checkout 19.10 20 | pip install -e . 21 | ``` 22 | 23 | ## Train 24 | 25 | We assume that your data has been pre-processed, as described in the [data README](DATA_README.md). An example training 26 | command is shown below. 27 | 28 | ```bash 29 | retinanet train redaction.pth --backbone ResNet18FPN --fine-tune retinanet_rn18fpn.pth --classes 1 \ 30 | --lr 0.0001 --batch 80 \ 31 | --images /data/open_images/train_faces --annotations /data/open_images/train_faces.json \ 32 | --val-images /data/open_images/validation --val-annotations /data/open_images/val_faces.json \ 33 | --val-iters 5000 --max-size 880 --iters 50000 --milestones 30000 40000 34 | ``` 35 | 36 | -------------------------------------------------------------------------------- /data_tools/plot_images.py: -------------------------------------------------------------------------------- 1 | from PIL import Image, ImageDraw, ImageFont 2 | import os 3 | from data_tools.coco_tools import read_json 4 | 5 | def draw_boxes(image_dir, output_dir, anns): 6 | """ 7 | Plot GT boxes 8 | """ 9 | 10 | # Read annotations 11 | annotations=read_json(anns) 12 | 13 | # Get info we need 14 | catid2name = {} 15 | for cat in annotations['categories']: 16 | catid2name[cat['id']] = cat['name'] 17 | 18 | 19 | imageid2filename = {} 20 | for ann in annotations['images']: 21 | imageid2filename[ann['id']] = ann['file_name'] 22 | 23 | imageid2annboxes = {} # [(bbox, catid)] 24 | for ann in annotations['annotations']: 25 | this_entry = (ann['bbox'], ann['category_id']) 26 | if ann['image_id'] not in imageid2annboxes: 27 | imageid2annboxes[ann['image_id']] = [this_entry] 28 | else: 29 | imageid2annboxes[ann['image_id']].append(this_entry) 30 | 31 | # Now work through image IDs (imageid2filename.keys()) and create images. 32 | for done, imageid in enumerate(imageid2filename): 33 | filepath = os.path.join(image_dir, imageid2filename[imageid]) 34 | image = Image.open(filepath).convert("RGB") 35 | # image = Image.open(filepath) 36 | draw = ImageDraw.Draw(image) 37 | # Add GT bounding boxes. 38 | anns = [] 39 | try: 40 | anns = imageid2annboxes[imageid] 41 | except KeyError: 42 | pass 43 | for bbox, catid in anns: 44 | [xmin, ymin, w, h] = bbox 45 | xmax = int(round(xmin + w)) 46 | ymax = int(round(ymin + h)) 47 | draw.line([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin)], 48 | fill=(53, 111, 19), width=4) 49 | draw.text((xmin + 3, ymin - 18), catid2name[catid], (53, 111, 19)) 50 | 51 | 52 | filename_root, filename_ext = os.path.splitext(imageid2filename[imageid]) 53 | output_path = os.path.join(output_dir, filename_root + "_detections" + filename_ext) 54 | image.save(output_path) 55 | if (done + 1) % 25 == 0 and done > 0: 56 | print(" Saved {} of {} images.".format(done + 1, len(imageid2filename)),) 57 | print(" Saved {} of {} images.".format(done + 1, len(imageid2filename)), ) 58 | 59 | 60 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | APP:= deepstream-redaction-app 24 | 25 | TARGET_DEVICE = $(shell gcc -dumpmachine | cut -f1 -d -) 26 | 27 | NVDS_VERSION:=4.0 28 | 29 | LIB_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/lib/ 30 | 31 | ifeq ($(TARGET_DEVICE),aarch64) 32 | CFLAGS:= -DPLATFORM_TEGRA 33 | endif 34 | 35 | SRCS:= $(wildcard src/*.c) 36 | SRCS+= $(wildcard ../apps-common/src/*.c) 37 | 38 | INCS:= $(wildcard src/*.h) 39 | 40 | PKGS:= gstreamer-1.0 gstreamer-video-1.0 x11 41 | 42 | OBJS:= $(SRCS:.c=.o) 43 | 44 | CFLAGS+= -I../apps-common/includes -I../../includes -DDS_VERSION_MINOR=0 -DDS_VERSION_MAJOR=4 45 | 46 | LIBS+= -L$(LIB_INSTALL_DIR) -lnvdsgst_meta -lnvds_meta -lnvdsgst_helper -lnvds_utils -lm \ 47 | -lgstrtspserver-1.0 -Wl,-rpath,$(LIB_INSTALL_DIR) 48 | 49 | CFLAGS+= `pkg-config --cflags $(PKGS)` 50 | 51 | LIBS+= `pkg-config --libs $(PKGS)` 52 | 53 | all: $(APP) 54 | 55 | %.o: %.c $(INCS) Makefile 56 | $(CC) -c -o $@ $(CFLAGS) $< 57 | 58 | $(APP): $(OBJS) Makefile 59 | $(CC) -o $(APP) $(OBJS) $(LIBS) 60 | 61 | clean: 62 | rm -rf $(OBJS) $(APP) 63 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # JMS added 132 | .idea/ -------------------------------------------------------------------------------- /INT8-README.md: -------------------------------------------------------------------------------- 1 | # Export an ONNX file into TensorRT in INT8 precision 2 | 3 | During the first export, we need to use some images to produce a calibration table. To do this we need to modify the sample 4 | [export.cpp file](https://github.com/NVIDIA/retinanet-examples/blob/master/extras/cppapi/export.cpp) provided by RetinanNet. 5 | 6 | ### Code changes 7 | 8 | Replace this code bloc... 9 | ``` 10 | const vector calibration_files; 11 | string model_name = ""; 12 | string calibration_table = argc == 4 ? string(argv[3]) : ""; 13 | ``` 14 | ...with specific images of your choosing. These calibration images should represent the distribution of your image space. 15 | 16 | ``` 17 | vector calibration_files; 18 | calibration_files.push_back("path-to/your-image-1"); 19 | calibration_files.push_back("path-to/your-image-2"); 20 | calibration_files.push_back("path-to/your-image-3"); 21 | ... (the list goes on) 22 | calibration_files.push_back("path-to/your-image-n"); 23 | 24 | string model_name = "ResNet34FPN"; //because we are using ResNet34 backbone 25 | string calibration_table = ""; 26 | ``` 27 | 28 | When exporting a model from ONNX to TensorRT INT8, the `batch` parameter in `export.cpp` should be smaller than the total 29 | number of images `n`. Once you have a calibration table, you don't have to make it again on the same device, 30 | even if you're exporting to an engine of a different batch size, provided that the batch size remains smaller 31 | than the number of calibration images `n`. 32 | 33 | ### Create the calibration table 34 | 35 | The command for the first export should be: 36 | ``` 37 | ./export model.onnx engine.plan int8calibrationtablename 38 | ``` 39 | This command will take several minutes to run. After the first export, we should have a calibration table with a name 40 | similar to `Int8CalibrationTable_ResNet34FPN512x864_20`. 41 | We now pass the calibration table name to the command line. 42 | 43 | ### Using the table for future exports. 44 | Before the next exports, we will want to change our export.cpp code back to 45 | ``` 46 | const vector calibration_files; 47 | string model_name = ""; 48 | string calibration_table = argc == 4 ? string(argv[3]) : ""; 49 | ``` 50 | 51 | For each engine, if we intend to have a different batch size, we will also need to remember to modify the line 52 | ```int batch = 1;``` 53 | and re-run `make` to obtain a new `export` executable. 54 | 55 | 56 | Now we just pass in the actual generated calibration table name. Example command: 57 | 58 | ``` 59 | ./export model.onnx engine.plan Int8CalibrationTable_ResNet34FPN512x864_20 60 | ``` 61 | 62 | 63 | -------------------------------------------------------------------------------- /configs/odtk_model_config_int8.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2019 NVIDIA Corporation. All rights reserved. 2 | # NVIDIA Corporation and its licensors retain all intellectual property 3 | # and proprietary rights in and to this software, related documentation 4 | # and any modifications thereto. Any use, reproduction, disclosure or 5 | # distribution of this software and related documentation without an express 6 | # license agreement from NVIDIA Corporation is strictly prohibited. 7 | 8 | # Following properties are mandatory when engine files are not specified: 9 | # int8-calib-file(Only in INT8) 10 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 11 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 12 | # ONNX: onnx-file 13 | # 14 | # Mandatory properties for detectors: 15 | # parse-func, num-detected-classes, 16 | # custom-lib-path (when parse-func=0 i.e. custom), 17 | # parse-bbox-func-name (when parse-func=0) 18 | # 19 | # Optional properties for detectors: 20 | # enable-dbscan(Default=false), interval(Primary mode only, Default=0) 21 | # 22 | # Mandatory properties for classifiers: 23 | # classifier-threshold, is-classifier 24 | # 25 | # Optional properties for classifiers: 26 | # classifier-async-mode(Secondary mode only, Default=false) 27 | # 28 | # Optional properties in secondary mode: 29 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 30 | # input-object-min-width, input-object-min-height, input-object-max-width, 31 | # input-object-max-height 32 | # 33 | # Following properties are always recommended: 34 | # batch-size(Default=1) 35 | # 36 | # Other optional properties: 37 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 38 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 39 | # mean-file, gie-unique-id(Default=0), offsets, gie-mode (Default=1 i.e. primary), 40 | # custom-lib-path, network-mode(Default=0 i.e FP32) 41 | # 42 | # The values in the config file are overridden by values set through GObject 43 | # properties. 44 | 45 | [property] 46 | gpu-id=0 47 | net-scale-factor=0.017352074 48 | offsets=123.675;116.28;103.53 49 | 50 | ## 0=FP32, 1=INT8, 2=FP16 mode 51 | network-mode=1 52 | num-detected-classes=1 53 | gie-unique-id=1 54 | is-classifier=0 55 | parse-bbox-func-name=NvDsInferParseRetinaNet 56 | custom-lib-path=/path-to/retinanet-examples/extras/deepstream/deepstream-sample/build/libnvdsparsebbox_retinanet.so 57 | #enable-dbscan=1 58 | 59 | 60 | [class-attrs-all] 61 | threshold=0.2 62 | group-threshold=0 63 | ## Set eps=0.7 and minBoxes for enable-dbscan=1 64 | #eps=0.2 65 | #minBoxes=3 66 | #roi-top-offset=0 67 | #roi-bottom-offset=0 68 | detected-min-w=4 69 | detected-min-h=4 70 | #detected-max-w=800 71 | #detected-max-h=600 72 | 73 | 74 | -------------------------------------------------------------------------------- /configs/odtk_model_config_fp16.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2019 NVIDIA Corporation. All rights reserved. 2 | # NVIDIA Corporation and its licensors retain all intellectual property 3 | # and proprietary rights in and to this software, related documentation 4 | # and any modifications thereto. Any use, reproduction, disclosure or 5 | # distribution of this software and related documentation without an express 6 | # license agreement from NVIDIA Corporation is strictly prohibited. 7 | 8 | # Following properties are mandatory when engine files are not specified: 9 | # int8-calib-file(Only in INT8) 10 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 11 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 12 | # ONNX: onnx-file 13 | # 14 | # Mandatory properties for detectors: 15 | # parse-func, num-detected-classes, 16 | # custom-lib-path (when parse-func=0 i.e. custom), 17 | # parse-bbox-func-name (when parse-func=0) 18 | # 19 | # Optional properties for detectors: 20 | # enable-dbscan(Default=false), interval(Primary mode only, Default=0) 21 | # 22 | # Mandatory properties for classifiers: 23 | # classifier-threshold, is-classifier 24 | # 25 | # Optional properties for classifiers: 26 | # classifier-async-mode(Secondary mode only, Default=false) 27 | # 28 | # Optional properties in secondary mode: 29 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 30 | # input-object-min-width, input-object-min-height, input-object-max-width, 31 | # input-object-max-height 32 | # 33 | # Following properties are always recommended: 34 | # batch-size(Default=1) 35 | # 36 | # Other optional properties: 37 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 38 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 39 | # mean-file, gie-unique-id(Default=0), offsets, gie-mode (Default=1 i.e. primary), 40 | # custom-lib-path, network-mode(Default=0 i.e FP32) 41 | # 42 | # The values in the config file are overridden by values set through GObject 43 | # properties. 44 | 45 | [property] 46 | gpu-id=0 47 | net-scale-factor=0.017352074 48 | offsets=123.675;116.28;103.53 49 | 50 | ## 0=FP32, 1=INT8, 2=FP16 mode 51 | network-mode=2 52 | num-detected-classes=1 53 | interval=0 54 | gie-unique-id=1 55 | is-classifier=0 56 | parse-bbox-func-name=NvDsInferParseRetinaNet 57 | custom-lib-path=/path-to/retinanet-examples/extras/deepstream/deepstream-sample/build/libnvdsparsebbox_retinanet.so 58 | #enable-dbscan=1 59 | 60 | 61 | [class-attrs-all] 62 | threshold=0.2 63 | group-threshold=0 64 | ## Set eps=0.7 and minBoxes for enable-dbscan=1 65 | #eps=0.2 66 | #minBoxes=3 67 | #roi-top-offset=0 68 | #roi-bottom-offset=0 69 | detected-min-w=4 70 | detected-min-h=4 71 | #detected-max-w=0 72 | #detected-max-h=0 73 | 74 | 75 | -------------------------------------------------------------------------------- /open_images/download_open_images.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/zip_files_copy/train_00.zip 4 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/zip_files_copy/train_01.zip 5 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/zip_files_copy/train_02.zip 6 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/zip_files_copy/train_03.zip 7 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/zip_files_copy/train_04.zip 8 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/zip_files_copy/train_05.zip 9 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/zip_files_copy/train_06.zip 10 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/zip_files_copy/train_07.zip 11 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/zip_files_copy/train_08.zip 12 | 13 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/zip_files_copy/validation.zip 14 | wget https://storage.googleapis.com/openimages/v5/validation-annotations-bbox.csv 15 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/validation-images.csv 16 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/train-annotations-bbox.csv 17 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/train-images-boxable.csv 18 | wget https://storage.googleapis.com/openimages/v5/test-annotations-bbox.csv 19 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/test-images.csv 20 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/test_challenge.zip 21 | wget https://datasets.figure-eight.com/figure_eight_datasets/open-images/zip_files_copy/test.zip 22 | wget https://storage.googleapis.com/openimages/v5/class-descriptions-boxable.csv 23 | wget https://storage.googleapis.com/openimages/2019_01/challenge-2018-relationships-description.csv 24 | wget https://storage.googleapis.com/openimages/2019_01/challenge-2018-attributes-description.csv 25 | wget https://storage.googleapis.com/openimages/2019_01/challenge-2018-relationship-triplets.csv 26 | wget https://storage.googleapis.com/openimages/v5/train-annotations-human-imagelabels-boxable.csv 27 | wget https://storage.googleapis.com/openimages/v5/validation-annotations-human-imagelabels-boxable.csv 28 | wget https://storage.googleapis.com/openimages/v5/test-annotations-human-imagelabels-boxable.csv 29 | 30 | wget https://storage.googleapis.com/openimages/2018_04/train/train-images-boxable-with-rotation.csv 31 | wget https://storage.googleapis.com/openimages/2018_04/validation/validation-images-with-rotation.csv 32 | wget https://storage.googleapis.com/openimages/2018_04/test/test-images-with-rotation.csv -------------------------------------------------------------------------------- /configs/test_source1_int8.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 2 | # 3 | # NVIDIA Corporation and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA Corporation is strictly prohibited. 8 | 9 | [application] 10 | enable-perf-measurement=1 11 | perf-measurement-interval-sec=5 12 | #gie-kitti-output-dir=streamscl 13 | 14 | [tiled-display] 15 | enable=1 16 | rows=1 17 | columns=1 18 | width=1280 19 | height=720 20 | 21 | [source0] 22 | enable=1 23 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 24 | type=1 25 | camera-width=1280 26 | camera-height=720 27 | camera-fps-n=30 28 | camera-fps-d=1 29 | camera-v4l2-dev-node=0 30 | 31 | 32 | [source1] 33 | enable=0 34 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 35 | type=2 36 | num-sources=1 37 | uri=file:///home/path-to/your-file.mp4 38 | gpu-id=0 39 | 40 | 41 | 42 | [sink0] 43 | enable=1 44 | #Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming 5=Overlay 45 | type=5 46 | sync=0 47 | display-id=0 48 | offset-x=0 49 | offset-y=0 50 | width=0 51 | height=0 52 | overlay-id=1 53 | source-id=0 54 | 55 | [sink1] 56 | enable=1 57 | type=3 58 | #1=mp4 2=mkv 59 | container=1 60 | #1=h264 2=h265 3=mpeg4 61 | codec=1 62 | sync=1 63 | bitrate=8000000 64 | output-file=out.mp4 65 | source-id=0 66 | 67 | [osd] 68 | enable=1 69 | #border-width=2 70 | #text-size=15 71 | #text-color=1;1;1;1; 72 | #text-bg-color=0.3;0.3;0.3;1 73 | #font=Serif 74 | #show-clock=0 75 | #clock-x-offset=800 76 | #clock-y-offset=820 77 | #clock-text-size=12 78 | #clock-color=1;0;0;0 79 | #gpu-id=0 80 | #border-width=0 81 | #nvbuf-memory-type=0 82 | #process-mode=1 83 | 84 | [streammux] 85 | ##Boolean property to inform muxer that sources are live 86 | live-source=1 87 | batch-size=1 88 | ##time out in usec, to wait after the first buffer is available 89 | ##to push the batch even if the complete batch is not formed 90 | batched-push-timeout=40000 91 | ## Set muxer output width and height 92 | width=1280 93 | height=720 94 | 95 | 96 | # config-file property is mandatory for any gie section. 97 | # Other properties are optional and if set will override the properties set in 98 | # the infer config file. 99 | 100 | [primary-gie] 101 | enable=1 102 | model-engine-file=../odtk_models/your-int8-engine.plan 103 | #property 104 | batch-size=1 105 | #Required by the app for OSD, not a plugin property 106 | #bbox-border-color0=1;0;0;1 107 | #bbox-border-color1=0;1;1;1 108 | #bbox-border-color2=0;0;1;1 109 | #bbox-border-color3=0;1;0;1 110 | interval=0 111 | #Required by the app for SGIE, when used along with config-file property 112 | gie-unique-id=1 113 | config-file=odtk_model_config_int8.txt 114 | 115 | 116 | [tracker] 117 | enable=1 118 | tracker-width=640 119 | tracker-height=384 120 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_iou.so 121 | ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_klt.so 122 | #ll-config-file required for IOU only 123 | #ll-config-file=iou_config.txt 124 | gpu-id=0 125 | 126 | [tests] 127 | file-loop=0 128 | 129 | -------------------------------------------------------------------------------- /configs/test_source1_fp16.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 2 | # 3 | # NVIDIA Corporation and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA Corporation is strictly prohibited. 8 | 9 | [application] 10 | enable-perf-measurement=1 11 | perf-measurement-interval-sec=5 12 | #gie-kitti-output-dir=streamscl 13 | 14 | [tiled-display] 15 | enable=1 16 | rows=1 17 | columns=1 18 | width=1280 19 | height=720 20 | 21 | [source0] 22 | enable=1 23 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 24 | type=1 25 | camera-width=1280 26 | camera-height=720 27 | camera-fps-n=30 28 | camera-fps-d=1 29 | camera-v4l2-dev-node=0 30 | 31 | 32 | [source1] 33 | enable=0 34 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 35 | type=2 36 | num-sources=1 37 | uri=file:///home/path-to/your-file.mp4 38 | gpu-id=0 39 | 40 | 41 | [sink0] 42 | enable=1 43 | #Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming 5=Overlay 44 | type=5 45 | sync=0 46 | display-id=0 47 | offset-x=0 48 | offset-y=0 49 | width=0 50 | height=0 51 | overlay-id=1 52 | source-id=0 53 | 54 | [sink1] 55 | enable=1 56 | #Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming 5=Overlay 57 | type=3 58 | #1=mp4 2=mkv 59 | container=1 60 | #codec - 1=h264 2=h265 3=mpeg4 61 | codec=1 62 | sync=1 63 | bitrate=8000000 64 | output-file=out.mp4 65 | source-id=0 66 | 67 | 68 | [osd] 69 | enable=1 70 | #border-width=2 71 | #text-size=15 72 | #text-color=1;1;1;1; 73 | #text-bg-color=0.3;0.3;0.3;1 74 | #font=Serif 75 | #show-clock=0 76 | #clock-x-offset=800 77 | #clock-y-offset=820 78 | #clock-text-size=12 79 | #clock-color=1;0;0;0 80 | #gpu-id=0 81 | #border-width=0 82 | #nvbuf-memory-type=0 83 | #process-mode=1 84 | 85 | [streammux] 86 | ##Boolean property to inform muxer that sources are live 87 | live-source=1 88 | batch-size=1 89 | ##time out in usec, to wait after the first buffer is available 90 | ##to push the batch even if the complete batch is not formed 91 | batched-push-timeout=40000 92 | ## Set muxer output width and height 93 | width=1280 94 | height=720 95 | 96 | 97 | # config-file property is mandatory for any gie section. 98 | # Other properties are optional and if set will override the properties set in 99 | # the infer config file. 100 | 101 | [primary-gie] 102 | enable=1 103 | model-engine-file=../odtk_models/your-fp16-engine.plan 104 | #Required to display the PGIE labels, should be added even when using config-file 105 | #property 106 | batch-size=1 107 | #Required by the app for OSD, not a plugin property 108 | #bbox-border-color0=1;0;0;1 109 | #bbox-border-color1=0;1;1;1 110 | #bbox-border-color2=0;0;1;1 111 | #bbox-border-color3=0;1;0;1 112 | interval=0 113 | #Required by the app for SGIE, when used along with config-file property 114 | gie-unique-id=1 115 | config-file=odtk_model_config_fp16.txt 116 | 117 | 118 | [tracker] 119 | enable=1 120 | tracker-width=640 121 | tracker-height=384 122 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_iou.so 123 | ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_klt.so 124 | #ll-config-file required for IOU only 125 | #ll-config-file=iou_config.txt 126 | gpu-id=0 127 | 128 | [tests] 129 | file-loop=0 130 | 131 | -------------------------------------------------------------------------------- /configs/test_source8_fp16.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2019 NVIDIA Corporation. All rights reserved. 2 | # 3 | # NVIDIA Corporation and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA Corporation is strictly prohibited. 8 | 9 | [application] 10 | enable-perf-measurement=1 11 | perf-measurement-interval-sec=5 12 | #gie-kitti-output-dir=streamscl 13 | 14 | [tiled-display] 15 | enable=1 16 | rows=4 17 | columns=2 18 | width=1280 19 | height=1440 20 | gpu-id=0 21 | #(0): nvbuf-mem-default - Default memory allocated, specific to particular platform 22 | #(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory applicable for Tesla 23 | #(2): nvbuf-mem-cuda-device - Allocate Device cuda memory applicable for Tesla 24 | #(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory applicable for Tesla 25 | #(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson 26 | nvbuf-memory-type=0 27 | 28 | [source0] 29 | enable=1 30 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 31 | type=3 32 | uri=file:///path-to/your-file.mp4 33 | num-sources=8 34 | #drop-frame-interval=2 35 | gpu-id=0 36 | # (0): memtype_device - Memory type Device 37 | # (1): memtype_pinned - Memory type Host Pinned 38 | # (2): memtype_unified - Memory type Unified 39 | cudadec-memtype=0 40 | 41 | 42 | 43 | [sink0] 44 | enable=1 45 | #Type - 1=FakeSink 2=EglSink 3=File 46 | type=2 47 | sync=1 48 | source-id=0 49 | gpu-id=0 50 | nvbuf-memory-type=0 51 | 52 | [sink1] 53 | enable=1 54 | type=3 55 | #1=mp4 2=mkv 56 | container=1 57 | #1=h264 2=h265 58 | codec=1 59 | sync=1 60 | bitrate=8000000 61 | output-file=out.mp4 62 | source-id=0 63 | 64 | 65 | [osd] 66 | enable=1 67 | 68 | [streammux] 69 | gpu-id=0 70 | ##Boolean property to inform muxer that sources are live 71 | live-source=0 72 | batch-size=8 73 | ##time out in usec, to wait after the first buffer is available 74 | ##to push the batch even if the complete batch is not formed 75 | batched-push-timeout=40000 76 | ## Set muxer output width and height 77 | width=1920 78 | height=2160 79 | ##Enable to maintain aspect ratio wrt source, and allow black borders, works 80 | ##along with width, height properties 81 | enable-padding=0 82 | nvbuf-memory-type=0 83 | 84 | # config-file property is mandatory for any gie section. 85 | # Other properties are optional and if set will override the properties set in 86 | # the infer config file. 87 | 88 | [primary-gie] 89 | enable=1 90 | gpu-id=0 91 | model-engine-file=../odtk_models/your-fp16-engine-with-batchsize-8.plan 92 | batch-size=8 93 | interval=2 94 | gie-unique-id=1 95 | nvbuf-memory-type=0 96 | 97 | config-file=odtk_model_config_fp16.txt 98 | 99 | [tracker] 100 | enable=1 101 | tracker-width=640 102 | tracker-height=384 103 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_iou.so 104 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_nvdcf.so 105 | ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_klt.so 106 | #ll-config-file required for DCF/IOU only 107 | #ll-config-file=tracker_config.yml 108 | #ll-config-file=iou_config.txt 109 | gpu-id=0 110 | #enable-batch-process applicable to DCF only 111 | enable-batch-process=1 112 | 113 | [tests] 114 | file-loop=0 115 | -------------------------------------------------------------------------------- /configs/test_source8_int8.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2019 NVIDIA Corporation. All rights reserved. 2 | # 3 | # NVIDIA Corporation and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA Corporation is strictly prohibited. 8 | 9 | [application] 10 | enable-perf-measurement=1 11 | perf-measurement-interval-sec=5 12 | #gie-kitti-output-dir=streamscl 13 | 14 | [tiled-display] 15 | enable=1 16 | rows=4 17 | columns=2 18 | width=1280 19 | height=1440 20 | gpu-id=0 21 | #(0): nvbuf-mem-default - Default memory allocated, specific to particular platform 22 | #(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory applicable for Tesla 23 | #(2): nvbuf-mem-cuda-device - Allocate Device cuda memory applicable for Tesla 24 | #(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory applicable for Tesla 25 | #(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson 26 | nvbuf-memory-type=0 27 | 28 | [source0] 29 | enable=1 30 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 31 | type=3 32 | uri=file:///path-to/your-file.mp4 33 | num-sources=8 34 | gpu-id=0 35 | # (0): memtype_device - Memory type Device 36 | # (1): memtype_pinned - Memory type Host Pinned 37 | # (2): memtype_unified - Memory type Unified 38 | cudadec-memtype=0 39 | 40 | 41 | [sink0] 42 | enable=1 43 | #Type - 1=FakeSink 2=EglSink 3=File 44 | type=2 45 | sync=1 46 | source-id=0 47 | gpu-id=0 48 | nvbuf-memory-type=0 49 | 50 | [sink1] 51 | enable=1 52 | type=3 53 | #1=mp4 2=mkv 54 | container=1 55 | #1=h264 2=h265 56 | codec=1 57 | sync=1 58 | bitrate=8000000 59 | output-file=out.mp4 60 | source-id=0 61 | 62 | 63 | [osd] 64 | enable=1 65 | 66 | [streammux] 67 | gpu-id=0 68 | ##Boolean property to inform muxer that sources are live 69 | live-source=0 70 | batch-size=8 71 | ##time out in usec, to wait after the first buffer is available 72 | ##to push the batch even if the complete batch is not formed 73 | batched-push-timeout=40000 74 | ## Set muxer output width and height 75 | width=1920 76 | height=2160 77 | ##Enable to maintain aspect ratio wrt source, and allow black borders, works 78 | ##along with width, height properties 79 | enable-padding=0 80 | nvbuf-memory-type=0 81 | 82 | # config-file property is mandatory for any gie section. 83 | # Other properties are optional and if set will override the properties set in 84 | # the infer config file. 85 | [primary-gie] 86 | enable=1 87 | gpu-id=0 88 | model-engine-file=../odtk_models/your-int8-engine-with-batchsize-8.plan 89 | batch-size=8 90 | #Required by the app for OSD, not a plugin property 91 | #bbox-border-color0=1;0;0;1 92 | #bbox-border-color1=0;1;1;1 93 | #bbox-border-color2=0;0;1;1 94 | #bbox-border-color3=0;1;0;1 95 | interval=2 96 | gie-unique-id=1 97 | nvbuf-memory-type=0 98 | config-file=odtk_model_config_int8.txt 99 | 100 | [tracker] 101 | enable=1 102 | tracker-width=640 103 | tracker-height=368 104 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_iou.so 105 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_nvdcf.so 106 | ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_klt.so 107 | #ll-config-file required for DCF/IOU only 108 | #ll-config-file=tracker_config.yml 109 | #ll-config-file=iou_config.txt 110 | gpu-id=0 111 | #enable-batch-process applicable to DCF only 112 | enable-batch-process=1 113 | 114 | [tests] 115 | file-loop=0 116 | -------------------------------------------------------------------------------- /configs/test_source4_fp16.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2019 NVIDIA Corporation. All rights reserved. 2 | # 3 | # NVIDIA Corporation and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA Corporation is strictly prohibited. 8 | 9 | [application] 10 | enable-perf-measurement=1 11 | perf-measurement-interval-sec=5 12 | #gie-kitti-output-dir=streamscl 13 | 14 | [tiled-display] 15 | enable=1 16 | rows=2 17 | columns=2 18 | width=1280 19 | height=720 20 | gpu-id=0 21 | #(0): nvbuf-mem-default - Default memory allocated, specific to particular platform 22 | #(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory applicable for Tesla 23 | #(2): nvbuf-mem-cuda-device - Allocate Device cuda memory applicable for Tesla 24 | #(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory applicable for Tesla 25 | #(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson 26 | nvbuf-memory-type=0 27 | 28 | [source0] 29 | enable=1 30 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 31 | type=3 32 | uri=file:///path-to/your-file.mp4 33 | num-sources=1 34 | #drop-frame-interval=2 35 | gpu-id=0 36 | # (0): memtype_device - Memory type Device 37 | # (1): memtype_pinned - Memory type Host Pinned 38 | # (2): memtype_unified - Memory type Unified 39 | cudadec-memtype=0 40 | 41 | [source1] 42 | enable=1 43 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 44 | type=3 45 | uri=file:///path-to/your-file.mp4 46 | num-sources=1 47 | #drop-frame-interval=2 48 | gpu-id=0 49 | cudadec-memtype=0 50 | 51 | [source2] 52 | enable=1 53 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 54 | type=3 55 | uri=file:///path-to/your-file.mp4 56 | num-sources=1 57 | #drop-frame-interval=2 58 | gpu-id=0 59 | cudadec-memtype=0 60 | 61 | [source3] 62 | enable=1 63 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 64 | type=3 65 | uri=file:///path-to/your-file.mp4 66 | num-sources=1 67 | #drop-frame-interval=2 68 | gpu-id=0 69 | cudadec-memtype=0 70 | 71 | 72 | [sink0] 73 | enable=1 74 | #Type - 1=FakeSink 2=EglSink 3=File 75 | type=2 76 | sync=1 77 | source-id=0 78 | gpu-id=0 79 | nvbuf-memory-type=0 80 | 81 | [sink1] 82 | enable=1 83 | type=3 84 | #1=mp4 2=mkv 85 | container=1 86 | #1=h264 2=h265 87 | codec=1 88 | sync=1 89 | #iframeinterval=10 90 | bitrate=8000000 91 | output-file=out.mp4 92 | source-id=0 93 | 94 | [osd] 95 | enable=1 96 | #gpu-id=0 97 | #border-width=1 98 | #text-size=15 99 | #text-color=1;1;1;1; 100 | #text-bg-color=0.3;0.3;0.3;1 101 | #font=Serif 102 | #show-clock=0 103 | #clock-x-offset=800 104 | #clock-y-offset=820 105 | #clock-text-size=12 106 | #clock-color=1;0;0;0 107 | #nvbuf-memory-type=0 108 | 109 | [streammux] 110 | gpu-id=0 111 | ##Boolean property to inform muxer that sources are live 112 | live-source=0 113 | batch-size=4 114 | ##time out in usec, to wait after the first buffer is available 115 | ##to push the batch even if the complete batch is not formed 116 | batched-push-timeout=40000 117 | ## Set muxer output width and height 118 | width=1280 119 | height=720 120 | ##Enable to maintain aspect ratio wrt source, and allow black borders, works 121 | ##along with width, height properties 122 | enable-padding=0 123 | nvbuf-memory-type=0 124 | 125 | # config-file property is mandatory for any gie section. 126 | # Other properties are optional and if set will override the properties set in 127 | # the infer config file. 128 | [primary-gie] 129 | enable=1 130 | gpu-id=0 131 | model-engine-file=../odtk_models/your-fp16-engine-with-batchsize-4.plan 132 | batch-size=4 133 | #Required by the app for OSD, not a plugin property 134 | #bbox-border-color0=1;0;0;1 135 | #bbox-border-color1=0;1;1;1 136 | #bbox-border-color2=0;0;1;1 137 | #bbox-border-color3=0;1;0;1 138 | interval=2 139 | gie-unique-id=1 140 | nvbuf-memory-type=0 141 | config-file=odtk_model_config_fp16.txt 142 | 143 | [tracker] 144 | enable=1 145 | tracker-width=640 146 | tracker-height=384 147 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_iou.so 148 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_nvdcf.so 149 | ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_klt.so 150 | #ll-config-file required for DCF/IOU only 151 | #ll-config-file=tracker_config.yml 152 | #ll-config-file=iou_config.txt 153 | gpu-id=0 154 | #enable-batch-process applicable to DCF only 155 | enable-batch-process=1 156 | 157 | [tests] 158 | file-loop=0 159 | -------------------------------------------------------------------------------- /configs/test_source4_int8.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2019 NVIDIA Corporation. All rights reserved. 2 | # 3 | # NVIDIA Corporation and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA Corporation is strictly prohibited. 8 | 9 | [application] 10 | enable-perf-measurement=1 11 | perf-measurement-interval-sec=5 12 | #gie-kitti-output-dir=streamscl 13 | 14 | [tiled-display] 15 | enable=1 16 | rows=2 17 | columns=2 18 | width=1280 19 | height=720 20 | gpu-id=0 21 | nvbuf-memory-type=0 22 | #(0): nvbuf-mem-default - Default memory allocated, specific to particular platform 23 | #(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory applicable for Tesla 24 | #(2): nvbuf-mem-cuda-device - Allocate Device cuda memory applicable for Tesla 25 | #(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory applicable for Tesla 26 | #(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson 27 | 28 | [source0] 29 | enable=1 30 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 31 | type=3 32 | uri=file:///path-to/your-file.mp4 33 | num-sources=1 34 | #drop-frame-interval=2 35 | gpu-id=0 36 | # (0): memtype_device - Memory type Device 37 | # (1): memtype_pinned - Memory type Host Pinned 38 | # (2): memtype_unified - Memory type Unified 39 | cudadec-memtype=0 40 | 41 | [source1] 42 | enable=1 43 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 44 | type=3 45 | uri=file:///path-to/your-file.mp4 46 | num-sources=1 47 | #drop-frame-interval=2 48 | gpu-id=0 49 | cudadec-memtype=0 50 | 51 | [source2] 52 | enable=1 53 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 54 | type=3 55 | uri=file:///path-to/your-file.mp4 56 | num-sources=1 57 | #drop-frame-interval=2 58 | gpu-id=0 59 | cudadec-memtype=0 60 | 61 | [source3] 62 | enable=1 63 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 64 | type=3 65 | uri=file:///path-to/your-file.mp4 66 | num-sources=1 67 | #drop-frame-interval=2 68 | gpu-id=0 69 | cudadec-memtype=0 70 | 71 | 72 | [sink0] 73 | enable=1 74 | #Type - 1=FakeSink 2=EglSink 3=File 75 | type=2 76 | sync=1 77 | source-id=0 78 | gpu-id=0 79 | nvbuf-memory-type=0 80 | 81 | [sink1] 82 | enable=1 83 | type=3 84 | #1=mp4 2=mkv 85 | container=1 86 | #1=h264 2=h265 87 | codec=1 88 | sync=1 89 | #iframeinterval=10 90 | bitrate=8000000 91 | output-file=out.mp4 92 | source-id=0 93 | 94 | 95 | [osd] 96 | enable=1 97 | #gpu-id=0 98 | #border-width=1 99 | #text-size=15 100 | #text-color=1;1;1;1; 101 | #text-bg-color=0.3;0.3;0.3;1 102 | #font=Serif 103 | #show-clock=0 104 | #clock-x-offset=800 105 | #clock-y-offset=820 106 | #clock-text-size=12 107 | #clock-color=1;0;0;0 108 | #nvbuf-memory-type=0 109 | 110 | [streammux] 111 | gpu-id=0 112 | ##Boolean property to inform muxer that sources are live 113 | live-source=0 114 | batch-size=4 115 | ##time out in usec, to wait after the first buffer is available 116 | ##to push the batch even if the complete batch is not formed 117 | batched-push-timeout=40000 118 | ## Set muxer output width and height 119 | width=1280 120 | height=720 121 | ##Enable to maintain aspect ratio wrt source, and allow black borders, works 122 | ##along with width, height properties 123 | enable-padding=0 124 | nvbuf-memory-type=0 125 | 126 | # config-file property is mandatory for any gie section. 127 | # Other properties are optional and if set will override the properties set in 128 | # the infer config file. 129 | [primary-gie] 130 | enable=1 131 | gpu-id=0 132 | model-engine-file=../odtk_models/your-int8-engine-with-batchsize-4.plan 133 | batch-size=4 134 | #Required by the app for OSD, not a plugin property 135 | #bbox-border-color0=1;0;0;1 136 | #bbox-border-color1=0;1;1;1 137 | #bbox-border-color2=0;0;1;1 138 | #bbox-border-color3=0;1;0;1 139 | interval=2 140 | gie-unique-id=1 141 | nvbuf-memory-type=0 142 | config-file=odtk_model_config_int8.txt 143 | 144 | [tracker] 145 | enable=1 146 | tracker-width=640 147 | tracker-height=384 148 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_iou.so 149 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_nvdcf.so 150 | ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_klt.so 151 | #ll-config-file required for DCF/IOU only 152 | #ll-config-file=tracker_config.yml 153 | #ll-config-file=iou_config.txt 154 | gpu-id=0 155 | #enable-batch-process applicable to DCF only 156 | enable-batch-process=1 157 | 158 | [tests] 159 | file-loop=0 160 | -------------------------------------------------------------------------------- /src/deepstream_app.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __NVGSTDS_APP_H__ 24 | #define __NVGSTDS_APP_H__ 25 | 26 | #ifdef __cplusplus 27 | extern "C" 28 | { 29 | #endif 30 | 31 | #include 32 | #include 33 | 34 | #include "deepstream_app_version.h" 35 | #include "deepstream_common.h" 36 | #include "deepstream_config.h" 37 | #include "deepstream_osd.h" 38 | #include "deepstream_perf.h" 39 | #include "deepstream_primary_gie.h" 40 | #include "deepstream_sinks.h" 41 | #include "deepstream_sources.h" 42 | #include "deepstream_streammux.h" 43 | #include "deepstream_tiled_display.h" 44 | #include "deepstream_dsexample.h" 45 | #include "deepstream_tracker.h" 46 | #include "deepstream_secondary_gie.h" 47 | 48 | typedef struct _AppCtx AppCtx; 49 | 50 | typedef void (*bbox_generated_callback) (AppCtx *appCtx, GstBuffer *buf, 51 | NvDsBatchMeta *batch_meta, guint index); 52 | typedef gboolean (*overlay_graphics_callback) (AppCtx *appCtx, GstBuffer *buf, 53 | NvDsBatchMeta *batch_meta, guint index); 54 | 55 | 56 | typedef struct 57 | { 58 | guint index; 59 | gulong all_bbox_buffer_probe_id; 60 | gulong primary_bbox_buffer_probe_id; 61 | gulong fps_buffer_probe_id; 62 | GstElement *bin; 63 | NvDsPrimaryGieBin primary_gie_bin; 64 | NvDsOSDBin osd_bin; 65 | NvDsSecondaryGieBin secondary_gie_bin; 66 | NvDsTrackerBin tracker_bin; 67 | NvDsSinkBin sink_bin; 68 | NvDsDsExampleBin dsexample_bin; 69 | AppCtx *appCtx; 70 | } NvDsInstanceBin; 71 | 72 | typedef struct 73 | { 74 | gulong primary_bbox_buffer_probe_id; 75 | guint bus_id; 76 | GstElement *pipeline; 77 | NvDsSrcParentBin multi_src_bin; 78 | NvDsInstanceBin instance_bins[MAX_SOURCE_BINS]; 79 | NvDsInstanceBin common_elements; 80 | NvDsTiledDisplayBin tiled_display_bin; 81 | GstElement *demuxer; 82 | NvDsDsExampleBin dsexample_bin; 83 | AppCtx *appCtx; 84 | } NvDsPipeline; 85 | 86 | typedef struct 87 | { 88 | gboolean enable_perf_measurement; 89 | gint file_loop; 90 | guint num_source_sub_bins; 91 | guint num_secondary_gie_sub_bins; 92 | guint num_sink_sub_bins; 93 | guint perf_measurement_interval_sec; 94 | gchar *bbox_dir_path; 95 | gchar *kitti_track_dir_path; 96 | 97 | NvDsSourceConfig multi_source_config[MAX_SOURCE_BINS]; 98 | NvDsStreammuxConfig streammux_config; 99 | NvDsOSDConfig osd_config; 100 | NvDsGieConfig primary_gie_config; 101 | NvDsTrackerConfig tracker_config; 102 | NvDsGieConfig secondary_gie_sub_bin_config[MAX_SECONDARY_GIE_BINS]; 103 | NvDsSinkSubBinConfig sink_bin_sub_bin_config[MAX_SINK_BINS]; 104 | NvDsTiledDisplayConfig tiled_display_config; 105 | NvDsDsExampleConfig dsexample_config; 106 | } NvDsConfig; 107 | 108 | typedef struct 109 | { 110 | gulong frame_num; 111 | } NvDsInstanceData; 112 | 113 | struct _AppCtx 114 | { 115 | gboolean version; 116 | gboolean cintr; 117 | gboolean show_bbox_text; 118 | gboolean seeking; 119 | gboolean quit; 120 | gint person_class_id; 121 | gint car_class_id; 122 | gint return_value; 123 | guint index; 124 | 125 | GMutex app_lock; 126 | GCond app_cond; 127 | 128 | NvDsPipeline pipeline; 129 | NvDsConfig config; 130 | NvDsInstanceData instance_data[MAX_SOURCE_BINS]; 131 | NvDsAppPerfStructInt perf_struct; 132 | bbox_generated_callback primary_bbox_generated_cb; 133 | bbox_generated_callback all_bbox_generated_cb; 134 | overlay_graphics_callback overlay_graphics_cb; 135 | NvDsFrameLatencyInfo *latency_info; 136 | GMutex latency_lock; 137 | }; 138 | 139 | gboolean create_pipeline (AppCtx * appCtx, 140 | bbox_generated_callback primary_bbox_generated_cb, 141 | bbox_generated_callback all_bbox_generated_cb, 142 | perf_callback perf_cb, 143 | overlay_graphics_callback overlay_graphics_cb); 144 | 145 | gboolean pause_pipeline (AppCtx * appCtx); 146 | gboolean resume_pipeline (AppCtx * appCtx); 147 | gboolean seek_pipeline (AppCtx * appCtx, glong milliseconds, gboolean seek_is_relative); 148 | 149 | void toggle_show_bbox_text (AppCtx * appCtx); 150 | 151 | void destroy_pipeline (AppCtx * appCtx); 152 | void restart_pipeline (AppCtx * appCtx); 153 | 154 | 155 | /** 156 | * Function to read properties from configuration file. 157 | * 158 | * @param[in] config pointer to @ref NvDsConfig 159 | * @param[in] cfg_file_path path of configuration file. 160 | * 161 | * @return true if parsed successfully. 162 | */ 163 | gboolean 164 | parse_config_file (NvDsConfig * config, gchar * cfg_file_path); 165 | 166 | #ifdef __cplusplus 167 | } 168 | #endif 169 | 170 | #endif 171 | -------------------------------------------------------------------------------- /DATA_README.md: -------------------------------------------------------------------------------- 1 | # Data preparation 2 | 3 | These instructions show how to convert [Open Images v5](https://storage.googleapis.com/openimages/web/index.html) 4 | annotations into a COCO format dataset that we can use for face detection. 5 | 6 | ## Enter container 7 | 8 | We will work in a PyTorch container, which we download from the [NVIDIA GPU Cloud](https://ngc.nvidia.com). 9 | If you don't have one, sign up for a free account and create an API KEY. 10 | 11 | Login to `nvcr.io` 12 | ```bash 13 | docker login nvcr.io 14 | ``` 15 | 16 | Now we can enter the container 17 | 18 | ```bash 19 | DATA_DIR=/ 20 | WORKING_DIR=/ 21 | docker run -it --gpus all --rm --ipc=host -v$DATA_DIR:/data -v$WORKING_DIR:/src -w/src nvcr.io/nvidian/pytorch:19.09-py3 22 | ``` 23 | 24 | ## Download Open Images 25 | 26 | Download the dataset by running the download script from the data directory. 27 | ```bash 28 | cd /data/open_images 29 | bash /src/open_images/download_open_images.sh 30 | bash /src/open_images/unzip_open_images.sh 31 | ``` 32 | 33 | Your `/data` directory should look like this: 34 | 35 | ``` 36 | >> du -sh * 37 | 10G challenge2018 38 | 4.0K challenge-2018-attributes-description.csv 39 | 4.0K challenge-2018-relationships-description.csv 40 | 12K challenge-2018-relationship-triplets.csv 41 | 12K class-descriptions-boxable.csv 42 | 37G test 43 | 74M test-annotations-bbox.csv 44 | 31M test-annotations-human-imagelabels-boxable.csv 45 | 15M test-images.csv 46 | 44M test-images-with-rotation.csv 47 | 60G train_00 48 | 60G train_01 49 | 60G train_02 50 | 60G train_03 51 | 60G train_04 52 | 59G train_05 53 | 59G train_06 54 | 60G train_07 55 | 43G train_08 56 | 1.2G train-annotations-bbox.csv 57 | 360M train-annotations-human-imagelabels-boxable.csv 58 | 207M train-images-boxable.csv 59 | 609M train-images-boxable-with-rotation.csv 60 | 13G validation 61 | 24M validation-annotations-bbox.csv 62 | 11M validation-annotations-human-imagelabels-boxable.csv 63 | 5.2M validation-images.csv 64 | 15M validation-images-with-rotation.csv 65 | 570G zips 66 | ``` 67 | 68 | ## Parse validation data 69 | We want to produce a `.json` file that contains all the images from some classes, and a subset of images from the other classes. 70 | 71 | Working in the `/src` directory, we start by defining the Open Images validation images and annotation files, and the location of our output data. 72 | ```python 73 | images_dir = '/data/open_images/validation' 74 | annotation_csv = '/data/open_images/validation-annotations-bbox.csv' 75 | category_csv = '/data/open_images/class-descriptions-boxable.csv' 76 | output_json = '/data/open_images/val_faces.json' 77 | 78 | # Now we read the Open Images categories and parse our data. 79 | 80 | import open_images.open_image_to_json as oij 81 | from data_tools.coco_tools import write_json 82 | catmid2name = oij.read_catMIDtoname(category_csv) 83 | oidata = oij.parse_open_images(annotation_csv) # This is a representation of our dataset. 84 | 85 | # We only want images that contain the 'Human Face' class, so we run a function that removes all other images. 86 | 87 | set1 = oij.reduce_data(oidata, catmid2name, keep_classes=['Human face']) 88 | 89 | # Finally we convert this data to COCO format, using this as an opportunity to exclude any annotations 90 | # that are smaller than 2 x 2 when the input images are resized to maxdim 640, and save to a file. 91 | 92 | cocodata = oij.openimages2coco(set1, catmid2name, images_dir, 93 | desc="Open Image validation data, set 1.", 94 | output_class_ids={'Human face': 1}, 95 | max_size=880, min_ann_size=(1,1), 96 | min_ratio=2.0) 97 | write_json_data(cocodata, output_json) 98 | 99 | ``` 100 | 101 | ## Parse training data 102 | Following the same process, we can produce a training dataset. 103 | 104 | ```python 105 | import open_images.open_image_to_json as oij 106 | 107 | # Definine paths 108 | images_dir = ['/data/open_images/train_0%i'%oo for oo in range(9)] # There are nine image directories. 109 | annotation_csv = '/data/open_images/train-annotations-bbox.csv' 110 | category_csv = '/data/open_images/class-descriptions-boxable.csv' 111 | output_json = '/data/open_images/train_faces.json' 112 | 113 | # Read the category names 114 | catmid2name = oij.read_catMIDtoname(category_csv) 115 | # Parse the annotations 116 | oidata = oij.parse_open_images(annotation_csv) 117 | 118 | # Keep only human faces 119 | trainset1 = oij.reduce_data(oidata, catmid2name, keep_classes=['Human face']) 120 | cocodata = oij.openimages2coco(trainset1, catmid2name, images_dir, desc="Open Image train data, set 1.", 121 | output_class_ids={'Human face': 1}, 122 | max_size=880, min_ann_size=(1,1), 123 | min_ratio=2.0) 124 | write_json_data(cocodata, output_json) 125 | 126 | ``` 127 | 128 | ## Copy images in our dataset 129 | 130 | Copy images that are in our dataset, from the Open Images directories to a new directory. 131 | 132 | 133 | ```python 134 | import open_images.open_image_to_json as oij 135 | oij.copy_images('/data/open_images/val_faces.json', 136 | '/data/open_images/validation', '/data/open_images/val_faces') 137 | images_dir = ['/data/open_images/train_0%i'%oo for oo in range(9)] # There are nine image directories. 138 | oij.copy_images('/data/open_images/train_faces.json', images_dir, 139 | '/data/open_images/train_faces') 140 | ``` 141 | 142 | ## Plot ground truth 143 | 144 | As a quick sanity check, let's plot some of our training set. 145 | 146 | ```python 147 | from data_tools.plot_images import draw_boxes 148 | image_dir = '/data/open_images/train_faces' 149 | anns = '/data/open_images/processed_train/train_faces.json' 150 | output_dir = '/data/open_images/gt_plot_train_faces' 151 | draw_boxes(image_dir, output_dir, anns) 152 | ``` -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------ 2 | # This sample application is no longer maintained 3 | # ------------------------------------------------------ 4 | 5 | # Face Redaction with DeepStream 6 | 7 | This sample shows how to train and deploy a deep learning model for the real time redaction of faces from video streams using the NVIDIA DeepStream SDK. 8 | 9 | ## Blog posts 10 | There are two blog posts that accompany this repo: 11 | - [Building a Real-time Redaction App Using NVIDIA DeepStream, Part 1: Training](https://devblogs.nvidia.com/real-time-redaction-app-nvidia-deepstream-part-1-training/) 12 | - [Building a Real-time Redaction App Using NVIDIA DeepStream, Part 2: Deployment](https://devblogs.nvidia.com/real-time-redaction-app-nvidia-deepstream-part-2-deployment/) 13 | 14 | ## Getting Started Guide 15 | 16 | ### Data preparation 17 | 18 | The [data README](DATA_README.md) explains how we used python to convert 19 | [Open Images v5](https://storage.googleapis.com/openimages/web/index.html) annotations into COCO format annotations. 20 | 21 | ### Training using RetinaNet 22 | 23 | The [training README](TRAINING_README.md) shows how to train, evaluate and export a model using the [NVIDIA PyTorch implementation of RetinaNet](https://github.com/NVIDIA/retinanet-examples). 24 | 25 | ### Redaction app setup 26 | 27 | #### Jetpack compatibility 28 | 29 | Please note that the master branch has been designed for running DeepStream 4.0 in Jetpack 4.2. For running in DeepStream 4.0.2 in Jetpack 4.3, please switch over to the Jetpack4.3 branch. 30 | 31 | #### DeepStream 32 | Before we start with our redaction app, please make sure that you're able to successfully run the DeepStream sample apps. 33 | From the DeepStream `samples` folder, you can call any of the config files (`source*.txt`) in `configs/deepstream-app/`. 34 | ``` 35 | deepstream-app -c configs/deepstream-app/source4_1080p_dec_infer-resnet_tracker_sgie_tiled_display_int8.txt 36 | ``` 37 | Running the example above will show four streams of the same video, with vehicles and pedestrians being detected. If the performance of sample apps such as is lagging, try running *sudo /usr/bin/jetson_clocks* to set max performance. 38 | 39 | #### Making the redaction app 40 | 41 | Now we can make the redaction app. 42 | 43 | * Copy the contents of this folder to `/deepstream_sdk_v4.0_jetson/sources/apps/`. 44 | * Install the prerequisites. 45 | 46 | ```bash 47 | apt install libssl1.0.0 libgstreamer1.0-0 gstreamer1.0-tools gstreamer1.0-plugins-good gstreamer1.0-plugins-bad \ 48 | gstreamer1.0-plugins-ugly gstreamer1.0-libav libgstrtspserver-1.0-0 libjansson4=2.11-1 \ 49 | librdkafka1=0.11.3-1build1 50 | ``` 51 | 52 | * Run `make` 53 | * If `make` gives any error, check that 1) in the Makefile we are pointing to correct and valid paths, and 2) the appropriate packages have been installed, including libgstreamer1.0-dev, libgstreamer1.0, and libgstreamer-plugins-base1.0-dev. 54 | 55 | 56 | ### Using the redaction app 57 | 58 | ```bash 59 | ./deepstream-redaction-app -c configs/test_source*_.txt 60 | ``` 61 | 62 | However, before we can run our app with a config file, we'll want to modify a few parameters in the config files. We will introduce the parameters in the following section. 63 | 64 | 65 | ## DeepStream config files 66 | 67 | ### 1. Main pipeline config file 68 | 69 | Examples in our config/ folder include: test_source1_fp16, test_source4_fp16, test_source8_fp16, test_source1_int8, test_source4_int8, test_source8_int8. For official DeepStream examples, please see config files in folder `/deepstream_sdk_v4.0_jetson/samples/configs/deepstream-app/`. 70 | 71 | The parameter `enable` turns each of the pipeline elements on and off. 72 | 73 | #### source 74 | 75 | Change `[source*]` for the type of source you want to stream from. Type 1 is from a live camera connected to Jetson. Type 2 URI can be a local mp4 file or a rtsp source. Type 3 MultiURI pertains to the case where we want to have more than one source streaming. 76 | 77 | If we want to stream a local mp4 as the source, we can change the uri parameter in `[source*]` to point to the path of that mp4 file. For example, if the mp4 file is located in /home/this_user/videos/, we would want to write uri=file:///home/this_user/videos/your-file.mp4 78 | 79 | #### sink 80 | 81 | Change `[sink*]` for the type of output sink you want the stream to go to. 82 | 83 | #### multiple streams 84 | 85 | For multiple streams we need to modify and add `[source*]` as necessary. We will also need to change: the rows and columns number in `[tiled-display]`, the batch-size in `[streammux]` to equal the number of sources, and the batch-size in `[primary-gie]` as well. If we see a performance drop as the number of streams increases, one adjustment we can make to meet real-time criteria is to modify the interval parameter in `[primary-gie]`: 86 | if we set `interval = 1`, that means we're inferring every other frame instead of every single frame (when `interval = 0`). When we set interval > 0, we should turn on the `[tracker]` to track objects of interest. 87 | 88 | #### model engine file 89 | 90 | The model-engine-file parameter in [primary-gie] should point to the TRT model engine file to be deployed. Find the path to your engine.plan file that you generated on this Jetson device (this file was generated using the `./export` command in the retinanet repo's `cppapi` folder). We can copy the file over to the `odtk_models` folder for clarity. 91 | 92 | #### config file 93 | 94 | The config-file parameter in [primary-gie] should point to the model config file. 95 | 96 | #### loop 97 | 98 | If you are streaming mp4 files or rtsp sources of a finite time period and would like to run an infinite loop on the source, you could change the following "file-loop" parameter to 1 in the config file. 99 | ``` 100 | [tests] 101 | file-loop=0 102 | ``` 103 | #### osd 104 | 105 | In this app, our goal is to redact faces, therefore our on screen display (osd) doesn't need to display anything other than a box. If you want to display texts or more, please see example config files at `/deepstream_sdk_v4.0_jetson/samples/configs/deepstream-app/`. To get started, you'd want to modify the parameters in [osd] and [primary-gie]. 106 | 107 | ### 2. Model config file 108 | 109 | Examples in our config/ folder include: odtk_model_config_int8.txt and odtk_model_config_fp16.txt. 110 | 111 | The parameter network-mode in [property] should correspond to the precision of your TensorRT model engine file. 112 | 113 | We need to make sure that custom-lib-path in [property] is pointing to the correct file path for the custom library `libnvdsparsebbox_retinanet.so` we just built. 114 | 115 | The parameter threshold in [class-attrs-all] will decide the threshold above which detections are outputted. You can modify the value of threshold to have more detections or less. 116 | 117 | 118 | For the provided config file examples in folder configs/, we will want to modify at least the following to get the app running: 119 | 120 | 1. In odtk_model_config_*.txt, the custom-lib-path parameter in [property], which should point to the `libnvdsparsebbox_retinanet.so` file that we built. 121 | 122 | 2. In test_source*.txt, the model-engine-file parameter in [primary-gie], which should point to an engine plan file. 123 | 124 | 3. In test_source*.txt, the uri parameter in [source*], which points to the path to the mp4 file if we are inferring on a mp4 file. 125 | 126 | 127 | 128 | 129 | ### Possible runtime errors 130 | 131 | 132 | ``` 133 | pp:511:gst_nvinfer_logger: NvDsInferContext[UID 1]:generateTRTModel(): No model files specified 134 | ``` 135 | 136 | 137 | This is caused by not having a valid model engine file in the config file. There are some possibilities: it's either that we do not have the file with the specified name in the path, or, we have a model engine in this path, but the batch size that the model engine has is smaller than then number of sources/streams we are trying to process. For example, if we exported the onnx file to a engine.plan file using an executable made with batch size = 4 specified in `export.cpp`, then our engine cannot process more than 4 streams at a time. 138 | 139 | 140 | 141 | -------------------------------------------------------------------------------- /data_tools/coco_tools.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tools for manipulating coco annotaitons 3 | """ 4 | 5 | import os 6 | import json 7 | from PIL import Image 8 | import random 9 | import shutil 10 | 11 | def resize(img_folder, annotations, resize_factor, output_img_folder, output_annotations): 12 | """ 13 | Resize images to (original size * resize_factor) 14 | :param img_folder: Folder containing original images 15 | :param annotations: File containing COCO style annotations 16 | :param resize_factor: factor to increase each dim size by. 0.25 = shrink by 4 x 17 | :param output_img_folder: Folder that will contain the new images 18 | :param output_annotations: File that will contain the new annotations. 19 | :return: 20 | """ 21 | # Check all files and directories exist 22 | assert os.path.isdir(img_folder), "Directory %s does not exist" % img_folder 23 | assert os.path.isdir(output_img_folder), "Directory %s does not exist" % output_img_folder 24 | assert os.path.isdir(os.path.split(output_annotations)[0]), "Directory %s does not exist" % os.path.split(output_annotations)[0] 25 | assert os.path.isfile(annotations), "File %s does not exist" % annotations 26 | 27 | # Read in annotations 28 | print("Reading annotaitons from", annotations) 29 | with open(annotations) as f: 30 | anns = json.load(f) 31 | if not anns: 32 | raise IOError("The annotation file is empty.") 33 | 34 | new_images = [] 35 | old_images = anns['images'] 36 | # Work through each image in annotations, resizing height & width attributes and resizing and copying image. 37 | for img in old_images: 38 | old_filepath = os.path.join(img_folder, img['file_name']) 39 | new_filepath = os.path.join(output_img_folder, img['file_name']) 40 | new_w = int(resize_factor * img['width']) 41 | new_h = int(resize_factor * img['height']) 42 | img['width'] = new_w 43 | img['height'] = new_h 44 | new_images.append(img) 45 | 46 | # Now resize 47 | try: 48 | image = Image.open(old_filepath).convert("RGB") 49 | except FileNotFoundError: 50 | print("Image not found:", old_filepath) 51 | continue 52 | except OSError: 53 | print("Image damaged:", old_filepath) 54 | continue 55 | new_image = image.resize((new_w, new_h), Image.BILINEAR) 56 | new_image.save(new_filepath, quality=95) 57 | 58 | # print("DEBUG: Image size requested:", new_w, new_h) 59 | # print("DEBUG: new_image.size", new_image.size) 60 | # print("DEBUG: new_image location", new_filepath) 61 | # 62 | # raise NotImplementedError("TEST RESIZE)") 63 | 64 | anns['images'] = new_images 65 | 66 | # Work through annotations, resizing xmin, ymin, w, h. 67 | old_anns = anns['annotations'] 68 | new_anns = [] 69 | for ann in old_anns: 70 | [xmin, ymin, w, h] = ann['bbox'] 71 | xmin = int(xmin * resize_factor) 72 | ymin = int(ymin * resize_factor) 73 | w = int(w * resize_factor) 74 | h = int(h * resize_factor) 75 | xmax = xmin + w 76 | ymax = ymin + h 77 | ann['bbox'] = [xmin, ymin, w, h] 78 | ann['area'] = w * h 79 | ann['seg'] = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin] 80 | new_anns.append(ann) 81 | anns['annotations'] = new_anns 82 | # Save out new annotations. 83 | 84 | print("All images resized and copied.") 85 | with open(output_annotations, 'w') as outfile: 86 | json.dump(anns, outfile) 87 | 88 | 89 | def split_dataset(input_annotations, frac_split_a, a_output_path, b_output_path): 90 | """ 91 | Split the dataset into two fractions, a and b. 92 | :param input_annotations: 93 | :param frac_split_a: 0.8 = 80% of data goes to a, 20% to b 94 | :param a_output_path: 95 | :param b_output_path: 96 | :return: 97 | """ 98 | # Check all files and directories exist 99 | assert os.path.isdir(os.path.split(a_output_path)[0]), "Directory %s does not exist" % os.path.split(a_output_path)[0] 100 | assert os.path.isdir(os.path.split(b_output_path)[0]), "Directory %s does not exist" % os.path.split(b_output_path)[0] 101 | assert os.path.isfile(input_annotations), "File %s does not exist" % input_annotations 102 | assert 0 < frac_split_a < 1, "frac_split must be between 0 and 1" 103 | 104 | # Read in annotations 105 | print("Reading annotaitons from", input_annotations) 106 | with open(input_annotations) as f: 107 | input_anns = json.load(f) 108 | if not input_anns: 109 | raise IOError("The annotation file is empty.") 110 | 111 | # Loop through images, assigning each to either 'a' or 'b'. 112 | image_split_dict = {} 113 | for im in input_anns['images']: 114 | if random.random() < frac_split_a: 115 | image_split_dict[im['id']] = 'a' 116 | else: 117 | image_split_dict[im['id']] = 'b' 118 | 119 | # Now create two outputs and assign image and annotations to each. 120 | output_a = {'info': 121 | input_anns['info'] + ' Split ' + str(frac_split_a), 122 | 'licenses': input_anns['licenses'], 123 | 'images': [], 124 | 'annotations': [], 125 | 'categories': input_anns['categories']} # Prepare output 126 | output_b = {'info': 127 | input_anns['info'] + ' Split ' + str(1 - frac_split_a), 128 | 'licenses': input_anns['licenses'], 129 | 'images': [], 130 | 'annotations': [], 131 | 'categories': input_anns['categories']} # Prepare output 132 | 133 | output_a_raw_images = [] 134 | output_b_raw_images = [] 135 | 136 | for im in input_anns['images']: 137 | if image_split_dict[im['id']] == 'a': 138 | output_a_raw_images.append(im) 139 | elif image_split_dict[im['id']] == 'b': 140 | output_b_raw_images.append(im) 141 | else: 142 | raise SyntaxError('im not assigned to a nor b.', im) 143 | 144 | output_a_raw_anns = [] 145 | output_b_raw_anns = [] 146 | 147 | for ann in input_anns['annotations']: 148 | if image_split_dict[ann['image_id']] == 'a': 149 | output_a_raw_anns.append(ann) 150 | elif image_split_dict[ann['image_id']] == 'b': 151 | output_b_raw_anns.append(ann) 152 | else: 153 | raise SyntaxError('ann not assigned to a nor b.', ann) 154 | 155 | # Now renumber the annotations and images in each 156 | output_a_images = [] 157 | a_oldimgid2newimgid = {} # to use to convert annotations 158 | for indx, aa in enumerate(output_a_raw_images): 159 | a_oldimgid2newimgid[aa['id']] = indx 160 | aa['id'] = indx 161 | output_a_images.append(aa) 162 | output_b_images = [] 163 | b_oldimgid2newimgid = {} # to use to convert annotations 164 | for indx, bb in enumerate(output_b_raw_images): 165 | b_oldimgid2newimgid[bb['id']] = indx 166 | bb['id'] = indx 167 | output_b_images.append(bb) 168 | 169 | output_a_annotations = [] 170 | for indx, aa in enumerate(output_a_raw_anns): 171 | new_image_id = a_oldimgid2newimgid[aa['image_id']] 172 | aa['image_id'] = new_image_id 173 | aa['id'] = indx 174 | output_a_annotations.append(aa) 175 | 176 | output_b_annotations = [] 177 | for indx, bb in enumerate(output_b_raw_anns): 178 | new_image_id = b_oldimgid2newimgid[bb['image_id']] 179 | bb['image_id'] = new_image_id 180 | bb['id'] = indx 181 | output_b_annotations.append(bb) 182 | 183 | output_a['images'] = output_a_images 184 | output_a['annotations'] = output_a_annotations 185 | 186 | output_b['images'] = output_b_images 187 | output_b['annotations'] = output_b_annotations 188 | 189 | # Write some info 190 | print("Split A contains %i images and %i annotations." % (len(output_a['images']), len(output_a['annotations']))) 191 | print("Split B contains %i images and %i annotations." % (len(output_b['images']), len(output_b['annotations']))) 192 | 193 | # Write each out 194 | with open(a_output_path, 'w') as outfile: 195 | json.dump(output_a, outfile) 196 | with open(b_output_path, 'w') as outfile: 197 | json.dump(output_b, outfile) 198 | 199 | 200 | def copy_images(all_img_dir, new_img_dir, ann_file): 201 | """ 202 | Copy all images mentioned in ann_file from all_img_dir to new_img_dir 203 | """ 204 | assert os.path.isdir(all_img_dir), "Directory %s does not exist" % all_img_dir 205 | assert os.path.isdir(new_img_dir), "Directory %s does not exist" % new_img_dir 206 | assert os.path.isfile(ann_file), "File %s does not exist" % ann_file 207 | 208 | print("Reading annotaitons from", ann_file) 209 | with open(ann_file) as f: 210 | input_anns = json.load(f) 211 | if not input_anns: 212 | raise IOError("The annotation file is empty.") 213 | 214 | # Get list of images 215 | img_list = [im['file_name'] for im in input_anns['images']] 216 | 217 | # Work through list 218 | for im in img_list: 219 | old_path = os.path.join(all_img_dir, im) 220 | new_path = os.path.join(new_img_dir, im) 221 | shutil.copy(old_path, new_path) 222 | 223 | def read_json(coco_annotation, verbose=False): 224 | if verbose: 225 | print("Reading annotaitons from", coco_annotation) 226 | with open(coco_annotation) as f: 227 | anns = json.load(f) 228 | if not anns: 229 | raise IOError("The annotation file is empty.") 230 | return anns 231 | 232 | def write_json(data, filepath): 233 | """Write JSON file""" 234 | dir_ = os.path.split(filepath)[0] 235 | assert os.path.isdir(dir_), "Directory %s does not exist" % dir_ 236 | 237 | with open(filepath, 'w') as outfile: 238 | json.dump(data, outfile) 239 | 240 | def get_filename2imgid(annfile, verbose=False): 241 | anns = read_json(annfile, verbose=verbose) 242 | filename2imgid = {} 243 | for img in anns['images']: 244 | filename2imgid[img['file_name']] = img['id'] 245 | return filename2imgid 246 | 247 | def get_imgid2anns(annfile, verbose=False): 248 | anns = read_json(annfile, verbose=verbose) 249 | imgid2anns = {} 250 | for ann in anns['annotations']: 251 | imgid = ann['image_id'] 252 | if imgid not in imgid2anns: 253 | imgid2anns[imgid] = [] 254 | imgid2anns[imgid].append(ann) 255 | return imgid2anns 256 | 257 | def get_imgid2img(annfile, verbose=False): 258 | anns = read_json(annfile, verbose=verbose) 259 | imgid2img = {} 260 | for img in anns['images']: 261 | imgid2img[img['id']] = img 262 | return imgid2img 263 | 264 | 265 | 266 | -------------------------------------------------------------------------------- /src/deepstream_app_config_parser.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | #include 23 | #include "deepstream_app.h" 24 | #include "deepstream_config_file_parser.h" 25 | 26 | #define CONFIG_GROUP_APP "application" 27 | #define CONFIG_GROUP_APP_ENABLE_PERF_MEASUREMENT "enable-perf-measurement" 28 | #define CONFIG_GROUP_APP_PERF_MEASUREMENT_INTERVAL "perf-measurement-interval-sec" 29 | #define CONFIG_GROUP_APP_GIE_OUTPUT_DIR "gie-kitti-output-dir" 30 | #define CONFIG_GROUP_APP_GIE_TRACK_OUTPUT_DIR "kitti-track-output-dir" 31 | 32 | #define CONFIG_GROUP_TESTS "tests" 33 | #define CONFIG_GROUP_TESTS_FILE_LOOP "file-loop" 34 | 35 | GST_DEBUG_CATEGORY_EXTERN (APP_CFG_PARSER_CAT); 36 | 37 | 38 | #define CHECK_ERROR(error) \ 39 | if (error) { \ 40 | GST_CAT_ERROR (APP_CFG_PARSER_CAT, "%s", error->message); \ 41 | goto done; \ 42 | } 43 | 44 | static gboolean 45 | parse_tests (NvDsConfig *config, GKeyFile *key_file) 46 | { 47 | gboolean ret = FALSE; 48 | gchar **keys = NULL; 49 | gchar **key = NULL; 50 | GError *error = NULL; 51 | 52 | keys = g_key_file_get_keys (key_file, CONFIG_GROUP_TESTS, NULL, &error); 53 | CHECK_ERROR (error); 54 | 55 | for (key = keys; *key; key++) { 56 | if (!g_strcmp0 (*key, CONFIG_GROUP_TESTS_FILE_LOOP)) { 57 | config->file_loop = 58 | g_key_file_get_integer (key_file, CONFIG_GROUP_TESTS, 59 | CONFIG_GROUP_TESTS_FILE_LOOP, &error); 60 | CHECK_ERROR (error); 61 | } else { 62 | NVGSTDS_WARN_MSG_V ("Unknown key '%s' for group [%s]", *key, 63 | CONFIG_GROUP_TESTS); 64 | } 65 | } 66 | 67 | ret = TRUE; 68 | done: 69 | if (error) { 70 | g_error_free (error); 71 | } 72 | if (keys) { 73 | g_strfreev (keys); 74 | } 75 | if (!ret) { 76 | NVGSTDS_ERR_MSG_V ("%s failed", __func__); 77 | } 78 | return ret; 79 | } 80 | 81 | 82 | static gboolean 83 | parse_app (NvDsConfig *config, GKeyFile *key_file, gchar *cfg_file_path) 84 | { 85 | gboolean ret = FALSE; 86 | gchar **keys = NULL; 87 | gchar **key = NULL; 88 | GError *error = NULL; 89 | 90 | keys = g_key_file_get_keys (key_file, CONFIG_GROUP_APP, NULL, &error); 91 | CHECK_ERROR (error); 92 | 93 | for (key = keys; *key; key++) { 94 | if (!g_strcmp0 (*key, CONFIG_GROUP_APP_ENABLE_PERF_MEASUREMENT)) { 95 | config->enable_perf_measurement = 96 | g_key_file_get_integer (key_file, CONFIG_GROUP_APP, 97 | CONFIG_GROUP_APP_ENABLE_PERF_MEASUREMENT, &error); 98 | CHECK_ERROR (error); 99 | } else if (!g_strcmp0 (*key, CONFIG_GROUP_APP_PERF_MEASUREMENT_INTERVAL)) { 100 | config->perf_measurement_interval_sec = 101 | g_key_file_get_integer (key_file, CONFIG_GROUP_APP, 102 | CONFIG_GROUP_APP_PERF_MEASUREMENT_INTERVAL, &error); 103 | CHECK_ERROR (error); 104 | } else if (!g_strcmp0 (*key, CONFIG_GROUP_APP_GIE_OUTPUT_DIR)) { 105 | config->bbox_dir_path = get_absolute_file_path (cfg_file_path, 106 | g_key_file_get_string (key_file, CONFIG_GROUP_APP, 107 | CONFIG_GROUP_APP_GIE_OUTPUT_DIR, &error)); 108 | CHECK_ERROR (error); 109 | } else if (!g_strcmp0 (*key, CONFIG_GROUP_APP_GIE_TRACK_OUTPUT_DIR)) { 110 | config->kitti_track_dir_path = get_absolute_file_path (cfg_file_path, 111 | g_key_file_get_string (key_file, CONFIG_GROUP_APP, 112 | CONFIG_GROUP_APP_GIE_TRACK_OUTPUT_DIR, &error)); 113 | CHECK_ERROR (error); 114 | } else { 115 | NVGSTDS_WARN_MSG_V ("Unknown key '%s' for group [%s]", *key, 116 | CONFIG_GROUP_APP); 117 | } 118 | } 119 | 120 | ret = TRUE; 121 | done: 122 | if (error) { 123 | g_error_free (error); 124 | } 125 | if (keys) { 126 | g_strfreev (keys); 127 | } 128 | if (!ret) { 129 | NVGSTDS_ERR_MSG_V ("%s failed", __func__); 130 | } 131 | return ret; 132 | } 133 | 134 | 135 | gboolean 136 | parse_config_file (NvDsConfig *config, gchar *cfg_file_path) 137 | { 138 | GKeyFile *cfg_file = g_key_file_new (); 139 | GError *error = NULL; 140 | gboolean ret = FALSE; 141 | gchar **groups = NULL; 142 | gchar **group; 143 | guint i, j; 144 | 145 | if (!APP_CFG_PARSER_CAT) { 146 | GST_DEBUG_CATEGORY_INIT (APP_CFG_PARSER_CAT, "NVDS_CFG_PARSER", 0, NULL); 147 | } 148 | 149 | if (!g_key_file_load_from_file (cfg_file, cfg_file_path, G_KEY_FILE_NONE, 150 | &error)) { 151 | GST_CAT_ERROR (APP_CFG_PARSER_CAT, "Failed to load uri file: %s", 152 | error->message); 153 | goto done; 154 | } 155 | groups = g_key_file_get_groups (cfg_file, NULL); 156 | 157 | for (group = groups; *group; group++) { 158 | gboolean parse_err = FALSE; 159 | GST_CAT_DEBUG (APP_CFG_PARSER_CAT, "Parsing group: %s", *group); 160 | if (!g_strcmp0 (*group, CONFIG_GROUP_APP)) { 161 | parse_err = !parse_app (config, cfg_file, cfg_file_path); 162 | } 163 | 164 | if (!strncmp (*group, CONFIG_GROUP_SOURCE, sizeof (CONFIG_GROUP_SOURCE) - 1)) { 165 | if (config->num_source_sub_bins == MAX_SOURCE_BINS) { 166 | NVGSTDS_ERR_MSG_V ("App supports max %d sources", MAX_SOURCE_BINS); 167 | ret = FALSE; 168 | goto done; 169 | } 170 | parse_err = !parse_source (&config->multi_source_config[config->num_source_sub_bins], 171 | cfg_file, *group, cfg_file_path); 172 | if (config->multi_source_config[config->num_source_sub_bins].enable) { 173 | config->num_source_sub_bins++; 174 | } 175 | } 176 | 177 | if (!g_strcmp0 (*group, CONFIG_GROUP_STREAMMUX)) { 178 | parse_err = !parse_streammux (&config->streammux_config, cfg_file); 179 | } 180 | 181 | if (!g_strcmp0 (*group, CONFIG_GROUP_OSD)) { 182 | parse_err = !parse_osd (&config->osd_config, cfg_file); 183 | } 184 | 185 | if (!g_strcmp0 (*group, CONFIG_GROUP_PRIMARY_GIE)) { 186 | parse_err = 187 | !parse_gie (&config->primary_gie_config, cfg_file, 188 | CONFIG_GROUP_PRIMARY_GIE, cfg_file_path); 189 | } 190 | 191 | if (!g_strcmp0 (*group, CONFIG_GROUP_TRACKER)) { 192 | parse_err = !parse_tracker (&config->tracker_config, cfg_file, cfg_file_path); 193 | } 194 | 195 | if (!strncmp (*group, CONFIG_GROUP_SECONDARY_GIE, 196 | sizeof (CONFIG_GROUP_SECONDARY_GIE) - 1)) { 197 | if (config->num_secondary_gie_sub_bins == MAX_SECONDARY_GIE_BINS) { 198 | NVGSTDS_ERR_MSG_V ("App supports max %d secondary GIEs", MAX_SECONDARY_GIE_BINS); 199 | ret = FALSE; 200 | goto done; 201 | } 202 | parse_err = 203 | !parse_gie (&config->secondary_gie_sub_bin_config[config-> 204 | num_secondary_gie_sub_bins], 205 | cfg_file, *group, cfg_file_path); 206 | if (config->secondary_gie_sub_bin_config[config->num_secondary_gie_sub_bins].enable){ 207 | config->num_secondary_gie_sub_bins++; 208 | } 209 | } 210 | 211 | if (!strncmp (*group, CONFIG_GROUP_SINK, sizeof (CONFIG_GROUP_SINK) - 1)) { 212 | if (config->num_sink_sub_bins == MAX_SINK_BINS) { 213 | NVGSTDS_ERR_MSG_V ("App supports max %d sinks", MAX_SINK_BINS); 214 | ret = FALSE; 215 | goto done; 216 | } 217 | parse_err = 218 | !parse_sink (&config-> 219 | sink_bin_sub_bin_config[config->num_sink_sub_bins], cfg_file, *group); 220 | if (config-> 221 | sink_bin_sub_bin_config[config->num_sink_sub_bins].enable){ 222 | config->num_sink_sub_bins++; 223 | } 224 | } 225 | 226 | if (!g_strcmp0 (*group, CONFIG_GROUP_TILED_DISPLAY)) { 227 | parse_err = !parse_tiled_display (&config->tiled_display_config, cfg_file); 228 | } 229 | 230 | if (!g_strcmp0 (*group, CONFIG_GROUP_DSEXAMPLE)) { 231 | parse_err = !parse_dsexample (&config->dsexample_config, cfg_file); 232 | } 233 | 234 | if (!g_strcmp0 (*group, CONFIG_GROUP_TESTS)) { 235 | parse_err = !parse_tests (config, cfg_file); 236 | } 237 | 238 | if (parse_err) { 239 | GST_CAT_ERROR (APP_CFG_PARSER_CAT, "Failed to parse '%s' group", *group); 240 | goto done; 241 | } 242 | } 243 | 244 | for (i = 0; i < config->num_secondary_gie_sub_bins; i++) { 245 | if (config->secondary_gie_sub_bin_config[i].unique_id == 246 | config->primary_gie_config.unique_id) { 247 | NVGSTDS_ERR_MSG_V ("Non unique gie ids found"); 248 | ret = FALSE; 249 | goto done; 250 | } 251 | } 252 | 253 | for (i = 0; i < config->num_secondary_gie_sub_bins; i++) { 254 | for (j = i + 1; j < config->num_secondary_gie_sub_bins; j++) { 255 | if (config->secondary_gie_sub_bin_config[i].unique_id == 256 | config->secondary_gie_sub_bin_config[j].unique_id) { 257 | NVGSTDS_ERR_MSG_V ("Non unique gie id %d found", 258 | config->secondary_gie_sub_bin_config[i].unique_id); 259 | ret = FALSE; 260 | goto done; 261 | } 262 | } 263 | } 264 | 265 | for (i = 0; i < config->num_source_sub_bins; i++) { 266 | if (config->multi_source_config[i].type == NV_DS_SOURCE_URI_MULTIPLE) { 267 | if (config->multi_source_config[i].num_sources < 1) { 268 | config->multi_source_config[i].num_sources = 1; 269 | } 270 | for (j = 1; j < config->multi_source_config[i].num_sources; j++) { 271 | if (config->num_source_sub_bins == MAX_SOURCE_BINS) { 272 | NVGSTDS_ERR_MSG_V ("App supports max %d sources", MAX_SOURCE_BINS); 273 | ret = FALSE; 274 | goto done; 275 | } 276 | memcpy (&config->multi_source_config[config->num_source_sub_bins], 277 | &config->multi_source_config[i], 278 | sizeof (config->multi_source_config[i])); 279 | config->multi_source_config[config->num_source_sub_bins].type = NV_DS_SOURCE_URI; 280 | config->multi_source_config[config->num_source_sub_bins].uri = 281 | g_strdup_printf (config->multi_source_config[config->num_source_sub_bins].uri, j); 282 | config->num_source_sub_bins++; 283 | } 284 | config->multi_source_config[i].type = NV_DS_SOURCE_URI; 285 | config->multi_source_config[i].uri = 286 | g_strdup_printf (config->multi_source_config[i].uri, 0); 287 | } 288 | } 289 | ret = TRUE; 290 | 291 | done: 292 | if (cfg_file) { 293 | g_key_file_free (cfg_file); 294 | } 295 | 296 | if (groups) { 297 | g_strfreev (groups); 298 | } 299 | 300 | if (error) { 301 | g_error_free (error); 302 | } 303 | if (!ret) { 304 | NVGSTDS_ERR_MSG_V ("%s failed", __func__); 305 | } 306 | return ret; 307 | } 308 | -------------------------------------------------------------------------------- /open_images/open_image_to_json.py: -------------------------------------------------------------------------------- 1 | import os, csv, json, shutil 2 | from data_tools.coco_tools import read_json 3 | from PIL import Image 4 | 5 | 6 | def reduce_data(oidata, catmid2name, keep_classes=[]): 7 | """ 8 | Reduce the amount of data by only keeping images that are in the classes we want. 9 | :param oidata: oidata, as outputted by parse_open_images 10 | :param catmid2name: catid2name dict, as produced by read_catMIDtoname 11 | :param keep_classes: List of classes to be kept. 12 | :return: 13 | """ 14 | print(" Reducing the dataset. Initial dataset has length", len(oidata)) 15 | # First build a dictionary of imageID:[classnames] 16 | imageid2classmid = {} 17 | for dd in oidata: 18 | imageid = dd['ImageID'] 19 | if imageid not in imageid2classmid: 20 | imageid2classmid[imageid] = [dd['LabelName']] 21 | else: 22 | imageid2classmid[imageid].append(dd['LabelName']) 23 | 24 | # Work out which images we are including. 25 | imageid2include = {} # dict to store True if this imageid is included. 26 | 27 | for imgid, classmids in imageid2classmid.items(): 28 | imageid2include[imgid] = False # Assume we don't include this. 29 | for mid in classmids: 30 | this_name = catmid2name[mid] 31 | if this_name in keep_classes: 32 | imageid2include[imgid] = True 33 | 34 | # Now work through list, appending if ImageID has imageid2include[imageid] = True 35 | returned_data = [] 36 | for dd in oidata: 37 | imageid = dd['ImageID'] 38 | if imageid2include[imageid]: 39 | returned_data.append(dd) 40 | 41 | print(" Reducing the dataset. Final dataset has length", len(returned_data)) 42 | return returned_data 43 | 44 | def openimages2coco(oidata, catmid2name, img_dir, desc="", output_class_ids=None, 45 | max_size=None, min_ann_size=None, min_ratio=0.0, min_width_for_ratio=400): 46 | """ 47 | Converts open images annotations into COCO format 48 | :param raw: list of data items, as produced by parse_open_images 49 | :return: COCO style dict 50 | """ 51 | output = {'info': 52 | "Annotations produced from OpenImages. %s" % desc, 53 | 'licenses': [], 54 | 'images': [], 55 | 'annotations': [], 56 | 'categories': []} # Prepare output 57 | 58 | # Get categories in this dataset 59 | all_cats = [] 60 | for dd in oidata: 61 | if dd['LabelName'] not in all_cats: 62 | all_cats.append(dd['LabelName']) 63 | categories = [] 64 | for mid in all_cats: 65 | cat_name = catmid2name[mid] 66 | if cat_name in output_class_ids: 67 | categories.append({"id": output_class_ids[cat_name], "name": cat_name, "supercategory": 'object'}) 68 | output['categories'] = categories 69 | 70 | # Get images 71 | image_filename_to_id = {} # To store found images. 72 | current_img_index = 0 #To incrementally add image IDs. 73 | imgid2wh = {} # To store width and height 74 | intermediate_images = [] # To store as if output 75 | for dd in oidata: 76 | filename = dd['ImageID'] + '.jpg' 77 | if filename not in image_filename_to_id: 78 | img_entry = _oidata_entry_to_image_dict(filename, current_img_index, img_dir) 79 | image_filename_to_id[filename] = current_img_index 80 | imgid2wh[current_img_index] = (img_entry['width'], img_entry['height']) 81 | intermediate_images.append(img_entry) 82 | current_img_index += 1 83 | 84 | # Get annotations 85 | ann_id = 1 86 | imgid2_has_new_ann = {} # Use this to make sure that our images have valid annotations 87 | new_anns_raw = [] # list of candidate annotations 88 | for dd in oidata: 89 | filename = dd['ImageID'] + '.jpg' 90 | imgid = image_filename_to_id[filename] 91 | cat_name = catmid2name[dd['LabelName']] 92 | if cat_name in output_class_ids: 93 | catid = output_class_ids[cat_name] 94 | w, h = imgid2wh[imgid] 95 | bbox, area, seg = _ann2bbox(dd, w, h) 96 | ann_entry = {'id': ann_id, 'image_id': imgid, 'category_id': catid, 97 | 'segmentation': seg, 98 | 'area': area, 99 | 'bbox': bbox, 100 | 'iscrowd': 0} 101 | # Check if we want to include this annotation 102 | include_this_annotation = True 103 | x, y, ann_w, ann_h = bbox 104 | if max_size: 105 | maxdim = max(w, h) 106 | ann_w = ann_w * (max_size / float(maxdim)) 107 | ann_h = ann_h * (max_size / float(maxdim)) 108 | if min_ann_size is not None: 109 | if ann_w < min_ann_size[0]: 110 | include_this_annotation = False 111 | if ann_h < min_ann_size[1]: 112 | include_this_annotation = False 113 | 114 | # Now check whether this annotation exceeds the ratio requriements, if any. 115 | if min_ratio > 0: 116 | try: 117 | ratio = float(w) / float(h) 118 | except ZeroDivisionError: 119 | include_this_annotation = False 120 | else: 121 | if ratio >= min_ratio and w >= min_width_for_ratio: 122 | include_this_annotation = False 123 | 124 | if include_this_annotation: 125 | new_anns_raw.append(ann_entry) 126 | imgid2_has_new_ann[imgid] = True 127 | ann_id += 1 128 | 129 | # Now we must review all of the images and only keep those where imgid2_has_new_ann[imgid] = True 130 | 131 | new_imgs_raw = [] 132 | for img in intermediate_images: 133 | if img['id'] in imgid2_has_new_ann: 134 | new_imgs_raw.append(img) 135 | 136 | # Now we assign new image_ids to the images, mapping old to new 137 | old_img2new_img = {} 138 | new_imgs = [] 139 | for indx, img in enumerate(new_imgs_raw): 140 | old_img2new_img[img['id']] = indx + 1 141 | img['id'] = indx + 1 142 | new_imgs.append(img) 143 | 144 | output['images'] = new_imgs 145 | 146 | # Now we assing new ann_ids to the annotations, also updating the image ID 147 | new_anns = [] 148 | for indx, ann in enumerate(new_anns_raw): 149 | ann['id'] = indx + 1 150 | ann['image_id'] = old_img2new_img[ann['image_id']] 151 | new_anns.append(ann) 152 | 153 | output['annotations'] = new_anns 154 | return output 155 | 156 | def read_catMIDtoname(csv_file): 157 | catmid2name = {} 158 | 159 | assert os.path.isfile(csv_file), "File %s does not exist." % csv_file 160 | 161 | rows_read = 0 162 | with open(csv_file) as csvfile: 163 | reader = csv.reader(csvfile) 164 | for row in reader: 165 | mid = row[0] 166 | name = row[1] 167 | catmid2name[mid] = name 168 | rows_read += 1 169 | print(" Read", rows_read, "rows from category csv", csv_file) 170 | return catmid2name 171 | 172 | def parse_open_images(annotation_csv): 173 | """ 174 | Parse open images and produce a list of annotations. 175 | :param annotation_csv: 176 | :return: 177 | """ 178 | annotations = [] 179 | 180 | assert os.path.isfile(annotation_csv), "File %s does not exist." % annotation_csv 181 | expected_header = ['ImageID', 'Source', 'LabelName', 'Confidence', 'XMin', 'XMax', 'YMin', 'YMax', 'IsOccluded', 'IsTruncated', 'IsGroupOf', 'IsDepiction', 'IsInside'] 182 | 183 | rows_read = 0 184 | with open(annotation_csv) as csvfile: 185 | reader = csv.reader(csvfile) 186 | header = next(reader) 187 | for ii, hh in enumerate(header): 188 | assert hh == expected_header[ii], "File header is not as expected." 189 | for row in reader: 190 | ann = parse_open_images_row(row, header) 191 | annotations.append(ann) 192 | rows_read += 1 193 | # if rows_read > 10: 194 | # print("DEBUG: Only reading 11 rows.") 195 | # break 196 | print(" Read", rows_read, "rows from annotation csv", annotation_csv) 197 | return annotations 198 | 199 | def parse_open_images_row(row, header): 200 | """Parse open images row, returning a dict 201 | Format of dict (str unless otherwise specified) 202 | ImageID: Image ID of the box. 203 | Source: Indicateds how the box was made. 204 | xclick are manually drawn boxes using the method presented in [1]. 205 | activemil are boxes produced using an enhanced version of the method [2]. These are human verified to be accurate at IoU>0.7. 206 | LabelName: MID of the object class 207 | Confidence: Always 1 (here True) 208 | XMin, XMax, YMin, YMax: coordinates of the box, in normalized image coordinates. (FLOAT) 209 | XMin is in [0,1], where 0 is the leftmost pixel, and 1 is the rightmost pixel in the image. 210 | Y coordinates go from the top pixel (0) to the bottom pixel (1). 211 | For each of them, value 1 indicates present, 0 not present, and -1 unknown. (INT) 212 | IsOccluded: Indicates that the object is occluded by another object in the image. 213 | IsTruncated: Indicates that the object extends beyond the boundary of the image. 214 | IsGroupOf: Indicates that the box spans a group of objects (e.g., a bed of flowers or a crowd of people). We asked annotators to use this tag for cases with more than 5 instances which are heavily occluding each other and are physically touching. 215 | IsDepiction: Indicates that the object is a depiction (e.g., a cartoon or drawing of the object, not a real physical instance). 216 | IsInside: Indicates a picture taken from the inside of the object (e.g., a car interior or inside of a building). 217 | 218 | """ 219 | ann = {} 220 | for ii, hh in enumerate(header): 221 | if hh in ['XMin', 'XMax', 'YMin', 'YMax']: 222 | ann[hh] = float(row[ii]) 223 | elif hh in ['Confidence', 'IsOccluded', 'IsTruncated', 'IsGroupOf', 'IsDepiction', 'IsInside']: 224 | ann[hh] = int(row[ii]) 225 | else: # str 226 | ann[hh] = row[ii] 227 | return ann 228 | 229 | def copy_images(json_file, original_image_dirs, new_image_dir): 230 | """Copy files from original_image_dirs to new_iamge_dirs""" 231 | if type(original_image_dirs) is not list: 232 | original_image_dirs = [original_image_dirs] 233 | 234 | # Open JSON file and get list of images 235 | annotations = read_json(json_file, verbose=False) 236 | image_filenames = [ann['file_name'] for ann in annotations['images']] 237 | 238 | for img in image_filenames: 239 | for img_d in original_image_dirs: 240 | orig = os.path.join(img_d, img) 241 | if not os.path.isfile(orig): 242 | continue 243 | new = os.path.join(new_image_dir, img) 244 | # Copy 245 | shutil.copy(orig, new) 246 | print("All %i images in %s copied to %s" % (len(image_filenames), json_file, new_image_dir)) 247 | 248 | 249 | def _oidata_entry_to_image_dict(filename, indx, img_dir): 250 | width, height = _get_img_width_height(filename, img_dir) 251 | return {'id': indx, 'width': width, 'height': height, 'file_name': filename, 252 | 'license': None, 'flickr_url': None, 'coco_url': None, 'date_captured': None} 253 | 254 | def _get_img_width_height(filename, img_dir): 255 | # Modified to deal with img_dir as a list. 256 | if not type(img_dir) == list: 257 | img_dir = [img_dir] 258 | for img_d in img_dir: 259 | filepath = os.path.join(img_d, filename) 260 | try: 261 | image = Image.open(filepath).convert("RGB") 262 | except FileNotFoundError: 263 | pass 264 | else: 265 | return image.size 266 | raise FileNotFoundError("Image %s not found in any of img_dir" % filename) 267 | 268 | def _ann2bbox(dd, img_width, img_height): 269 | xmin = dd['XMin'] * img_width 270 | xmax = dd['XMax'] * img_width 271 | ymin = dd['YMin'] * img_height 272 | ymax = dd['YMax'] * img_height 273 | seg = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin] 274 | w = xmax - xmin 275 | h = ymax - ymin 276 | bbox = [xmin, ymin, w, h] 277 | return bbox, w * h, seg 278 | 279 | -------------------------------------------------------------------------------- /src/deepstream_app_main.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "deepstream_app.h" 24 | #include "deepstream_config_file_parser.h" 25 | #include "nvds_version.h" 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | 32 | #define MAX_INSTANCES 128 33 | #define APP_TITLE "DeepStream" 34 | 35 | #define DEFAULT_X_WINDOW_WIDTH 1920 36 | #define DEFAULT_X_WINDOW_HEIGHT 1080 37 | 38 | AppCtx *appCtx[MAX_INSTANCES]; 39 | static guint cintr = FALSE; 40 | static GMainLoop *main_loop = NULL; 41 | static gchar **cfg_files = NULL; 42 | static gchar **input_files = NULL; 43 | static gboolean print_version = FALSE; 44 | static gboolean show_bbox_text = FALSE; 45 | static gboolean print_dependencies_version = FALSE; 46 | static gboolean quit = FALSE; 47 | static gint return_value = 0; 48 | static guint num_instances; 49 | static guint num_input_files; 50 | static GMutex fps_lock; 51 | static gdouble fps[MAX_SOURCE_BINS]; 52 | static gdouble fps_avg[MAX_SOURCE_BINS]; 53 | static guint num_fps_inst = 0; 54 | 55 | static Display *display = NULL; 56 | static Window windows[MAX_INSTANCES] = { 0 }; 57 | 58 | static gint source_ids[MAX_INSTANCES]; 59 | 60 | static GThread *x_event_thread = NULL; 61 | static GMutex disp_lock; 62 | 63 | 64 | GST_DEBUG_CATEGORY (NVDS_APP); 65 | 66 | GOptionEntry entries[] = { 67 | {"version", 'v', 0, G_OPTION_ARG_NONE, &print_version, 68 | "Print DeepStreamSDK version", NULL} 69 | , 70 | {"tiledtext", 't', 0, G_OPTION_ARG_NONE, &show_bbox_text, 71 | "Display Bounding box labels in tiled mode", NULL} 72 | , 73 | {"version-all", 0, 0, G_OPTION_ARG_NONE, &print_dependencies_version, 74 | "Print DeepStreamSDK and dependencies version", NULL} 75 | , 76 | {"cfg-file", 'c', 0, G_OPTION_ARG_FILENAME_ARRAY, &cfg_files, 77 | "Set the config file", NULL} 78 | , 79 | {"input-file", 'i', 0, G_OPTION_ARG_FILENAME_ARRAY, &input_files, 80 | "Set the input file", NULL} 81 | , 82 | {NULL} 83 | , 84 | }; 85 | 86 | /** 87 | * Callback function to be called once all inferences (Primary + Secondary) 88 | * are done. This is opportunity to modify content of the metadata. 89 | * e.g. Here Person is being replaced with Man/Woman and corresponding counts 90 | * are being maintained. It should be modified according to network classes 91 | * or can be removed altogether if not required. 92 | */ 93 | static void 94 | all_bbox_generated (AppCtx * appCtx, GstBuffer * buf, 95 | NvDsBatchMeta * batch_meta, guint index) 96 | { 97 | guint num_male = 0; 98 | guint num_female = 0; 99 | guint num_objects[128]; 100 | 101 | memset (num_objects, 0, sizeof (num_objects)); 102 | 103 | for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL; 104 | l_frame = l_frame->next) { 105 | NvDsFrameMeta *frame_meta = l_frame->data; 106 | for (NvDsMetaList * l_obj = frame_meta->obj_meta_list; l_obj != NULL; 107 | l_obj = l_obj->next) { 108 | NvDsObjectMeta *obj = (NvDsObjectMeta *) l_obj->data; 109 | if (obj->unique_component_id == 110 | (gint) appCtx->config.primary_gie_config.unique_id) { 111 | if (obj->class_id >= 0 && obj->class_id < 128) { 112 | num_objects[obj->class_id]++; 113 | } 114 | if (appCtx->person_class_id > -1 115 | && obj->class_id == appCtx->person_class_id) { 116 | if (strstr (obj->text_params.display_text, "Man")) { 117 | str_replace (obj->text_params.display_text, "Man", ""); 118 | str_replace (obj->text_params.display_text, "Person", "Man"); 119 | num_male++; 120 | } else if (strstr (obj->text_params.display_text, "Woman")) { 121 | str_replace (obj->text_params.display_text, "Woman", ""); 122 | str_replace (obj->text_params.display_text, "Person", "Woman"); 123 | num_female++; 124 | } 125 | } 126 | } 127 | } 128 | } 129 | } 130 | 131 | /** 132 | * Function to handle program interrupt signal. 133 | * It installs default handler after handling the interrupt. 134 | */ 135 | static void 136 | _intr_handler (int signum) 137 | { 138 | struct sigaction action; 139 | 140 | NVGSTDS_ERR_MSG_V ("User Interrupted.. \n"); 141 | 142 | memset (&action, 0, sizeof (action)); 143 | action.sa_handler = SIG_DFL; 144 | 145 | sigaction (SIGINT, &action, NULL); 146 | 147 | cintr = TRUE; 148 | } 149 | 150 | /** 151 | * callback function to print the performance numbers of each stream. 152 | */ 153 | static void 154 | perf_cb (gpointer context, NvDsAppPerfStruct * str) 155 | { 156 | static guint header_print_cnt = 0; 157 | guint i; 158 | AppCtx *appCtx = (AppCtx *) context; 159 | guint numf = (num_instances == 1) ? str->num_instances : num_instances; 160 | 161 | g_mutex_lock (&fps_lock); 162 | if (num_instances > 1) { 163 | fps[appCtx->index] = str->fps[0]; 164 | fps_avg[appCtx->index] = str->fps_avg[0]; 165 | } else { 166 | for (i = 0; i < numf; i++) { 167 | fps[i] = str->fps[i]; 168 | fps_avg[i] = str->fps_avg[i]; 169 | } 170 | } 171 | 172 | num_fps_inst++; 173 | if (num_fps_inst < num_instances) { 174 | g_mutex_unlock (&fps_lock); 175 | return; 176 | } 177 | 178 | num_fps_inst = 0; 179 | 180 | if (header_print_cnt % 20 == 0) { 181 | g_print ("\n**PERF: "); 182 | for (i = 0; i < numf; i++) { 183 | g_print ("FPS %d (Avg)\t", i); 184 | } 185 | g_print ("\n"); 186 | header_print_cnt = 0; 187 | } 188 | header_print_cnt++; 189 | g_print ("**PERF: "); 190 | for (i = 0; i < numf; i++) { 191 | g_print ("%.2f (%.2f)\t", fps[i], fps_avg[i]); 192 | } 193 | g_print ("\n"); 194 | g_mutex_unlock (&fps_lock); 195 | } 196 | 197 | /** 198 | * Loop function to check the status of interrupts. 199 | * It comes out of loop if application got interrupted. 200 | */ 201 | static gboolean 202 | check_for_interrupt (gpointer data) 203 | { 204 | if (quit) { 205 | return FALSE; 206 | } 207 | 208 | if (cintr) { 209 | cintr = FALSE; 210 | 211 | quit = TRUE; 212 | g_main_loop_quit (main_loop); 213 | 214 | return FALSE; 215 | } 216 | return TRUE; 217 | } 218 | 219 | /* 220 | * Function to install custom handler for program interrupt signal. 221 | */ 222 | static void 223 | _intr_setup (void) 224 | { 225 | struct sigaction action; 226 | 227 | memset (&action, 0, sizeof (action)); 228 | action.sa_handler = _intr_handler; 229 | 230 | sigaction (SIGINT, &action, NULL); 231 | } 232 | 233 | static gboolean 234 | kbhit (void) 235 | { 236 | struct timeval tv; 237 | fd_set rdfs; 238 | 239 | tv.tv_sec = 0; 240 | tv.tv_usec = 0; 241 | 242 | FD_ZERO (&rdfs); 243 | FD_SET (STDIN_FILENO, &rdfs); 244 | 245 | select (STDIN_FILENO + 1, &rdfs, NULL, NULL, &tv); 246 | return FD_ISSET (STDIN_FILENO, &rdfs); 247 | } 248 | 249 | /* 250 | * Function to enable / disable the canonical mode of terminal. 251 | * In non canonical mode input is available immediately (without the user 252 | * having to type a line-delimiter character). 253 | */ 254 | static void 255 | changemode (int dir) 256 | { 257 | static struct termios oldt, newt; 258 | 259 | if (dir == 1) { 260 | tcgetattr (STDIN_FILENO, &oldt); 261 | newt = oldt; 262 | newt.c_lflag &= ~(ICANON); 263 | tcsetattr (STDIN_FILENO, TCSANOW, &newt); 264 | } else 265 | tcsetattr (STDIN_FILENO, TCSANOW, &oldt); 266 | } 267 | 268 | static void 269 | print_runtime_commands (void) 270 | { 271 | g_print ("\nRuntime commands:\n" 272 | "\th: Print this help\n" 273 | "\tq: Quit\n\n" "\tp: Pause\n" "\tr: Resume\n\n"); 274 | 275 | if (appCtx[0]->config.tiled_display_config.enable) { 276 | g_print 277 | ("NOTE: To expand a source in the 2D tiled display and view object details," 278 | " left-click on the source.\n" 279 | " To go back to the tiled display, right-click anywhere on the window.\n\n"); 280 | } 281 | } 282 | 283 | static guint rrow, rcol; 284 | static gboolean rrowsel = FALSE, selecting = FALSE; 285 | 286 | /** 287 | * Loop function to check keyboard inputs and status of each pipeline. 288 | */ 289 | static gboolean 290 | event_thread_func (gpointer arg) 291 | { 292 | guint i; 293 | gboolean ret = TRUE; 294 | 295 | // Check if all instances have quit 296 | for (i = 0; i < num_instances; i++) { 297 | if (!appCtx[i]->quit) 298 | break; 299 | } 300 | 301 | if (i == num_instances) { 302 | quit = TRUE; 303 | g_main_loop_quit (main_loop); 304 | return FALSE; 305 | } 306 | // Check for keyboard input 307 | if (!kbhit ()) { 308 | //continue; 309 | return TRUE; 310 | } 311 | int c = fgetc (stdin); 312 | g_print ("\n"); 313 | 314 | gint source_id; 315 | GstElement *tiler = appCtx[0]->pipeline.tiled_display_bin.tiler; 316 | g_object_get (G_OBJECT (tiler), "show-source", &source_id, NULL); 317 | 318 | if (selecting) { 319 | if (rrowsel == FALSE) { 320 | if (c >= '0' && c <= '9') { 321 | rrow = c - '0'; 322 | if (rrow < appCtx[0]->config.tiled_display_config.rows){ 323 | g_print ("--selecting source row %d--\n", rrow); 324 | rrowsel = TRUE; 325 | }else{ 326 | g_print ("--selected source row %d out of bound, reenter\n", rrow); 327 | } 328 | } 329 | } else { 330 | if (c >= '0' && c <= '9') { 331 | unsigned int tile_num_columns = appCtx[0]->config.tiled_display_config.columns; 332 | rcol = c - '0'; 333 | if (rcol < tile_num_columns){ 334 | selecting = FALSE; 335 | rrowsel = FALSE; 336 | source_id = tile_num_columns * rrow + rcol; 337 | g_print ("--selecting source col %d sou=%d--\n", rcol, source_id); 338 | if (source_id >= (gint) appCtx[0]->config.num_source_sub_bins) { 339 | source_id = -1; 340 | } else { 341 | source_ids[0] = source_id; 342 | appCtx[0]->show_bbox_text = TRUE; 343 | g_object_set (G_OBJECT (tiler), "show-source", source_id, NULL); 344 | } 345 | }else{ 346 | g_print ("--selected source col %d out of bound, reenter\n", rcol); 347 | } 348 | } 349 | } 350 | } 351 | switch (c) { 352 | case 'h': 353 | print_runtime_commands (); 354 | break; 355 | case 'p': 356 | for (i = 0; i < num_instances; i++) 357 | pause_pipeline (appCtx[i]); 358 | break; 359 | case 'r': 360 | for (i = 0; i < num_instances; i++) 361 | resume_pipeline (appCtx[i]); 362 | break; 363 | case 'q': 364 | quit = TRUE; 365 | g_main_loop_quit (main_loop); 366 | ret = FALSE; 367 | break; 368 | case 'z': 369 | if (source_id == -1 && selecting == FALSE) { 370 | g_print ("--selecting source --\n"); 371 | selecting = TRUE; 372 | } else { 373 | if (!show_bbox_text) 374 | appCtx[0]->show_bbox_text = FALSE; 375 | g_object_set (G_OBJECT (tiler), "show-source", -1, NULL); 376 | source_ids[0] = -1; 377 | selecting = FALSE; 378 | g_print ("--tiled mode --\n"); 379 | } 380 | break; 381 | default: 382 | break; 383 | } 384 | return ret; 385 | } 386 | 387 | static int 388 | get_source_id_from_coordinates (float x_rel, float y_rel) 389 | { 390 | int tile_num_rows = appCtx[0]->config.tiled_display_config.rows; 391 | int tile_num_columns = appCtx[0]->config.tiled_display_config.columns; 392 | 393 | int source_id = (int) (x_rel * tile_num_columns); 394 | source_id += ((int) (y_rel * tile_num_rows)) * tile_num_columns; 395 | 396 | /* Don't allow clicks on empty tiles. */ 397 | if (source_id >= (gint) appCtx[0]->config.num_source_sub_bins) 398 | source_id = -1; 399 | 400 | return source_id; 401 | } 402 | 403 | /** 404 | * Thread to monitor X window events. 405 | */ 406 | static gpointer 407 | nvds_x_event_thread (gpointer data) 408 | { 409 | g_mutex_lock (&disp_lock); 410 | while (display) { 411 | XEvent e; 412 | guint index; 413 | while (XPending (display)) { 414 | XNextEvent (display, &e); 415 | switch (e.type) { 416 | case ButtonPress: 417 | { 418 | XWindowAttributes win_attr; 419 | XButtonEvent ev = e.xbutton; 420 | gint source_id; 421 | GstElement *tiler; 422 | 423 | XGetWindowAttributes (display, ev.window, &win_attr); 424 | 425 | for (index = 0; index < MAX_INSTANCES; index++) 426 | if (ev.window == windows[index]) 427 | break; 428 | 429 | tiler = appCtx[index]->pipeline.tiled_display_bin.tiler; 430 | g_object_get (G_OBJECT (tiler), "show-source", &source_id, NULL); 431 | 432 | if (ev.button == Button1 && source_id == -1) { 433 | source_id = 434 | get_source_id_from_coordinates (ev.x * 1.0 / win_attr.width, 435 | ev.y * 1.0 / win_attr.height); 436 | if (source_id > -1) { 437 | g_object_set (G_OBJECT (tiler), "show-source", source_id, NULL); 438 | source_ids[index] = source_id; 439 | appCtx[index]->show_bbox_text = TRUE; 440 | } 441 | } else if (ev.button == Button3) { 442 | g_object_set (G_OBJECT (tiler), "show-source", -1, NULL); 443 | source_ids[index] = -1; 444 | if (!show_bbox_text) 445 | appCtx[index]->show_bbox_text = FALSE; 446 | } 447 | } 448 | break; 449 | case KeyRelease: 450 | case KeyPress: 451 | { 452 | KeySym p, r, q; 453 | guint i; 454 | p = XKeysymToKeycode (display, XK_P); 455 | r = XKeysymToKeycode (display, XK_R); 456 | q = XKeysymToKeycode (display, XK_Q); 457 | if (e.xkey.keycode == p) { 458 | for (i = 0; i < num_instances; i++) 459 | pause_pipeline (appCtx[i]); 460 | break; 461 | } 462 | if (e.xkey.keycode == r) { 463 | for (i = 0; i < num_instances; i++) 464 | resume_pipeline (appCtx[i]); 465 | break; 466 | } 467 | if (e.xkey.keycode == q) { 468 | quit = TRUE; 469 | g_main_loop_quit (main_loop); 470 | } 471 | } 472 | break; 473 | case ClientMessage: 474 | { 475 | Atom wm_delete; 476 | for (index = 0; index < MAX_INSTANCES; index++) 477 | if (e.xclient.window == windows[index]) 478 | break; 479 | 480 | wm_delete = XInternAtom (display, "WM_DELETE_WINDOW", 1); 481 | if (wm_delete != None && wm_delete == (Atom) e.xclient.data.l[0]) { 482 | quit = TRUE; 483 | g_main_loop_quit (main_loop); 484 | } 485 | } 486 | break; 487 | } 488 | } 489 | g_mutex_unlock (&disp_lock); 490 | g_usleep (G_USEC_PER_SEC / 20); 491 | g_mutex_lock (&disp_lock); 492 | } 493 | g_mutex_unlock (&disp_lock); 494 | return NULL; 495 | } 496 | 497 | /** 498 | * callback function to add application specific metadata. 499 | * Here it demonstrates how to display the URI of source in addition to 500 | * the text generated after inference. 501 | */ 502 | static gboolean 503 | overlay_graphics (AppCtx * appCtx, GstBuffer * buf, 504 | NvDsBatchMeta * batch_meta, guint index) 505 | { 506 | if (source_ids[index] == -1) 507 | return TRUE; 508 | 509 | NvDsFrameLatencyInfo *latency_info = NULL; 510 | NvDsDisplayMeta *display_meta = 511 | nvds_acquire_display_meta_from_pool (batch_meta); 512 | 513 | display_meta->num_labels = 1; 514 | display_meta->text_params[0].display_text = g_strdup_printf ("Source: %s", 515 | appCtx->config.multi_source_config[source_ids[index]].uri); 516 | 517 | display_meta->text_params[0].y_offset = 20; 518 | display_meta->text_params[0].x_offset = 20; 519 | display_meta->text_params[0].font_params.font_color = (NvOSD_ColorParams) { 520 | 0, 1, 0, 1}; 521 | display_meta->text_params[0].font_params.font_size = 522 | appCtx->config.osd_config.text_size * 1.5; 523 | display_meta->text_params[0].font_params.font_name = "Serif"; 524 | display_meta->text_params[0].set_bg_clr = 1; 525 | display_meta->text_params[0].text_bg_clr = (NvOSD_ColorParams) { 526 | 0, 0, 0, 1.0}; 527 | 528 | 529 | if(nvds_enable_latency_measurement) { 530 | g_mutex_lock (&appCtx->latency_lock); 531 | latency_info = &appCtx->latency_info[index]; 532 | display_meta->num_labels++; 533 | display_meta->text_params[1].display_text = g_strdup_printf ("Latency: %lf", 534 | latency_info->latency); 535 | g_mutex_unlock (&appCtx->latency_lock); 536 | 537 | display_meta->text_params[1].y_offset = (display_meta->text_params[0].y_offset * 2 )+ 538 | display_meta->text_params[0].font_params.font_size; 539 | display_meta->text_params[1].x_offset = 20; 540 | display_meta->text_params[1].font_params.font_color = (NvOSD_ColorParams) { 541 | 0, 1, 0, 1}; 542 | display_meta->text_params[1].font_params.font_size = 543 | appCtx->config.osd_config.text_size * 1.5; 544 | display_meta->text_params[1].font_params.font_name = "Arial"; 545 | display_meta->text_params[1].set_bg_clr = 1; 546 | display_meta->text_params[1].text_bg_clr = (NvOSD_ColorParams) { 547 | 0, 0, 0, 1.0}; 548 | } 549 | 550 | nvds_add_display_meta_to_frame (nvds_get_nth_frame_meta (batch_meta-> 551 | frame_meta_list, 0), display_meta); 552 | return TRUE; 553 | } 554 | 555 | int 556 | main (int argc, char *argv[]) 557 | { 558 | GOptionContext *ctx = NULL; 559 | GOptionGroup *group = NULL; 560 | GError *error = NULL; 561 | guint i; 562 | 563 | ctx = g_option_context_new ("Nvidia DeepStream Demo"); 564 | group = g_option_group_new ("abc", NULL, NULL, NULL, NULL); 565 | g_option_group_add_entries (group, entries); 566 | 567 | g_option_context_set_main_group (ctx, group); 568 | g_option_context_add_group (ctx, gst_init_get_option_group ()); 569 | 570 | GST_DEBUG_CATEGORY_INIT (NVDS_APP, "NVDS_APP", 0, NULL); 571 | 572 | if (!g_option_context_parse (ctx, &argc, &argv, &error)) { 573 | NVGSTDS_ERR_MSG_V ("%s", error->message); 574 | return -1; 575 | } 576 | 577 | if (print_version) { 578 | g_print ("deepstream-app version %d.%d\n", 579 | NVDS_APP_VERSION_MAJOR, NVDS_APP_VERSION_MINOR); 580 | nvds_version_print (); 581 | return 0; 582 | } 583 | 584 | if (print_dependencies_version) { 585 | g_print ("deepstream-app version %d.%d\n", 586 | NVDS_APP_VERSION_MAJOR, NVDS_APP_VERSION_MINOR); 587 | nvds_version_print (); 588 | nvds_dependencies_version_print (); 589 | return 0; 590 | } 591 | 592 | if (cfg_files) { 593 | num_instances = g_strv_length (cfg_files); 594 | } 595 | if (input_files) { 596 | num_input_files = g_strv_length (input_files); 597 | } 598 | 599 | memset (source_ids, -1, sizeof (source_ids)); 600 | 601 | if (!cfg_files || num_instances == 0) { 602 | NVGSTDS_ERR_MSG_V ("Specify config file with -c option"); 603 | return_value = -1; 604 | goto done; 605 | } 606 | 607 | for (i = 0; i < num_instances; i++) { 608 | appCtx[i] = g_malloc0 (sizeof (AppCtx)); 609 | appCtx[i]->person_class_id = -1; 610 | appCtx[i]->car_class_id = -1; 611 | appCtx[i]->index = i; 612 | if (show_bbox_text) { 613 | appCtx[i]->show_bbox_text = TRUE; 614 | } 615 | 616 | if (input_files && input_files[i]) { 617 | appCtx[i]->config.multi_source_config[0].uri = 618 | g_strdup_printf ("file://%s", input_files[i]); 619 | g_free (input_files[i]); 620 | } 621 | 622 | if (!parse_config_file (&appCtx[i]->config, cfg_files[i])) { 623 | NVGSTDS_ERR_MSG_V ("Failed to parse config file '%s'", cfg_files[i]); 624 | appCtx[i]->return_value = -1; 625 | goto done; 626 | } 627 | } 628 | 629 | for (i = 0; i < num_instances; i++) { 630 | if (!create_pipeline (appCtx[i], NULL, 631 | all_bbox_generated, perf_cb, overlay_graphics)) { 632 | NVGSTDS_ERR_MSG_V ("Failed to create pipeline"); 633 | return_value = -1; 634 | goto done; 635 | } 636 | } 637 | 638 | main_loop = g_main_loop_new (NULL, FALSE); 639 | 640 | _intr_setup (); 641 | g_timeout_add (400, check_for_interrupt, NULL); 642 | 643 | 644 | g_mutex_init (&disp_lock); 645 | display = XOpenDisplay (NULL); 646 | for (i = 0; i < num_instances; i++) { 647 | guint j; 648 | 649 | if (gst_element_set_state (appCtx[i]->pipeline.pipeline, 650 | GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) { 651 | NVGSTDS_ERR_MSG_V ("Failed to set pipeline to PAUSED"); 652 | return_value = -1; 653 | goto done; 654 | } 655 | 656 | if (!appCtx[i]->config.tiled_display_config.enable) 657 | continue; 658 | 659 | for (j = 0; j < appCtx[i]->config.num_sink_sub_bins; j++) { 660 | XTextProperty xproperty; 661 | gchar *title; 662 | guint width, height; 663 | 664 | if (!GST_IS_VIDEO_OVERLAY (appCtx[i]->pipeline.instance_bins[0]. 665 | sink_bin.sub_bins[j].sink)) { 666 | continue; 667 | } 668 | 669 | if (!display) { 670 | NVGSTDS_ERR_MSG_V ("Could not open X Display"); 671 | return_value = -1; 672 | goto done; 673 | } 674 | 675 | if (appCtx[i]->config.sink_bin_sub_bin_config[j].render_config.width) 676 | width = 677 | appCtx[i]->config.sink_bin_sub_bin_config[j].render_config.width; 678 | else 679 | width = appCtx[i]->config.tiled_display_config.width; 680 | 681 | if (appCtx[i]->config.sink_bin_sub_bin_config[j].render_config.height) 682 | height = 683 | appCtx[i]->config.sink_bin_sub_bin_config[j].render_config.height; 684 | else 685 | height = appCtx[i]->config.tiled_display_config.height; 686 | 687 | width = (width) ? width : DEFAULT_X_WINDOW_WIDTH; 688 | height = (height) ? height : DEFAULT_X_WINDOW_HEIGHT; 689 | 690 | windows[i] = 691 | XCreateSimpleWindow (display, RootWindow (display, 692 | DefaultScreen (display)), 0, 0, width, height, 2, 0x00000000, 693 | 0x00000000); 694 | 695 | if (num_instances > 1) 696 | title = g_strdup_printf (title, APP_TITLE "-%d", i); 697 | else 698 | title = g_strdup (APP_TITLE); 699 | if (XStringListToTextProperty ((char **) &title, 1, &xproperty) != 0) { 700 | XSetWMName (display, windows[i], &xproperty); 701 | XFree (xproperty.value); 702 | } 703 | 704 | XSetWindowAttributes attr = { 0 }; 705 | if ((appCtx[i]->config.tiled_display_config.enable && 706 | appCtx[i]->config.tiled_display_config.rows * 707 | appCtx[i]->config.tiled_display_config.columns == 1) || 708 | (appCtx[i]->config.tiled_display_config.enable == 0 && 709 | appCtx[i]->config.num_source_sub_bins == 1)) { 710 | attr.event_mask = KeyPress; 711 | } else { 712 | attr.event_mask = ButtonPress | KeyRelease; 713 | } 714 | XChangeWindowAttributes (display, windows[i], CWEventMask, &attr); 715 | 716 | Atom wmDeleteMessage = XInternAtom (display, "WM_DELETE_WINDOW", False); 717 | if (wmDeleteMessage != None) { 718 | XSetWMProtocols (display, windows[i], &wmDeleteMessage, 1); 719 | } 720 | XMapRaised (display, windows[i]); 721 | XSync (display, 1); //discard the events for now 722 | gst_video_overlay_set_window_handle (GST_VIDEO_OVERLAY (appCtx 723 | [i]->pipeline.instance_bins[0].sink_bin.sub_bins[j].sink), 724 | (gulong) windows[i]); 725 | gst_video_overlay_expose (GST_VIDEO_OVERLAY (appCtx[i]-> 726 | pipeline.instance_bins[0].sink_bin.sub_bins[j].sink)); 727 | if (!x_event_thread) 728 | x_event_thread = g_thread_new ("nvds-window-event-thread", 729 | nvds_x_event_thread, NULL); 730 | } 731 | } 732 | 733 | /* Dont try to set playing state if error is observed */ 734 | if (return_value != -1) { 735 | for (i = 0; i < num_instances; i++) { 736 | if (gst_element_set_state (appCtx[i]->pipeline.pipeline, 737 | GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE) { 738 | 739 | g_print ("\ncan't set pipeline to playing state.\n"); 740 | return_value = -1; 741 | goto done; 742 | } 743 | } 744 | } 745 | 746 | print_runtime_commands (); 747 | 748 | changemode (1); 749 | 750 | g_timeout_add (40, event_thread_func, NULL); 751 | g_main_loop_run (main_loop); 752 | 753 | changemode (0); 754 | 755 | done: 756 | 757 | g_print ("Quitting\n"); 758 | for (i = 0; i < num_instances; i++) { 759 | if (appCtx[i]->return_value == -1) 760 | return_value = -1; 761 | destroy_pipeline (appCtx[i]); 762 | 763 | g_mutex_lock (&disp_lock); 764 | if (windows[i]) 765 | XDestroyWindow (display, windows[i]); 766 | windows[i] = 0; 767 | g_mutex_unlock (&disp_lock); 768 | 769 | g_free (appCtx[i]); 770 | } 771 | 772 | g_mutex_lock (&disp_lock); 773 | if (display) 774 | XCloseDisplay (display); 775 | display = NULL; 776 | g_mutex_unlock (&disp_lock); 777 | g_mutex_clear (&disp_lock); 778 | 779 | if (main_loop) { 780 | g_main_loop_unref (main_loop); 781 | } 782 | 783 | if (ctx) { 784 | g_option_context_free (ctx); 785 | } 786 | 787 | if (return_value == 0) { 788 | g_print ("App run successful\n"); 789 | } else { 790 | g_print ("App run failed\n"); 791 | } 792 | 793 | gst_deinit (); 794 | 795 | return return_value; 796 | } 797 | -------------------------------------------------------------------------------- /src/deepstream_redaction_app.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include 24 | #include 25 | #include 26 | #include 27 | 28 | #include "deepstream_app.h" 29 | 30 | #define MAX_DISPLAY_LEN 64 31 | static guint batch_num = 0; 32 | 33 | GST_DEBUG_CATEGORY_EXTERN (NVDS_APP); 34 | 35 | GQuark _dsmeta_quark; 36 | 37 | #define CEIL(a,b) ((a + b - 1) / b) 38 | #define SOURCE_RESET_INTERVAL_IN_MS 60000 39 | 40 | /** 41 | * Function called at regular interval when one of NV_DS_SOURCE_RTSP type 42 | * source in the pipeline is down / disconnected. This function try to 43 | * reconnect the source by resetting that source pipeline. 44 | */ 45 | static gboolean 46 | watch_source_status (gpointer data) 47 | { 48 | NvDsSrcBin *src_bin = (NvDsSrcBin *) data; 49 | 50 | g_print ("watch_source_status %s\n", GST_ELEMENT_NAME(src_bin)); 51 | if (src_bin && src_bin->reconfiguring) { 52 | // source is still not up, reconfigure it again. 53 | g_timeout_add (20, reset_source_pipeline, src_bin); 54 | return TRUE; 55 | } else { 56 | // source is reconfigured, remove call back. 57 | return FALSE; 58 | } 59 | } 60 | 61 | /** 62 | * callback function to receive messages from components 63 | * in the pipeline. 64 | */ 65 | static gboolean 66 | bus_callback (GstBus * bus, GstMessage * message, gpointer data) 67 | { 68 | AppCtx *appCtx = (AppCtx *) data; 69 | GST_CAT_DEBUG (NVDS_APP, 70 | "Received message on bus: source %s, msg_type %s", 71 | GST_MESSAGE_SRC_NAME (message), GST_MESSAGE_TYPE_NAME (message)); 72 | switch (GST_MESSAGE_TYPE (message)) { 73 | case GST_MESSAGE_INFO:{ 74 | GError *error = NULL; 75 | gchar *debuginfo = NULL; 76 | gst_message_parse_info (message, &error, &debuginfo); 77 | g_printerr ("INFO from %s: %s\n", 78 | GST_OBJECT_NAME (message->src), error->message); 79 | if (debuginfo) { 80 | g_printerr ("Debug info: %s\n", debuginfo); 81 | } 82 | g_error_free (error); 83 | g_free (debuginfo); 84 | break; 85 | } 86 | case GST_MESSAGE_WARNING:{ 87 | GError *error = NULL; 88 | gchar *debuginfo = NULL; 89 | gst_message_parse_warning (message, &error, &debuginfo); 90 | g_printerr ("WARNING from %s: %s\n", 91 | GST_OBJECT_NAME (message->src), error->message); 92 | if (debuginfo) { 93 | g_printerr ("Debug info: %s\n", debuginfo); 94 | } 95 | g_error_free (error); 96 | g_free (debuginfo); 97 | break; 98 | } 99 | case GST_MESSAGE_ERROR:{ 100 | GError *error = NULL; 101 | gchar *debuginfo = NULL; 102 | guint i = 0; 103 | gst_message_parse_error (message, &error, &debuginfo); 104 | g_printerr ("ERROR from %s: %s\n", 105 | GST_OBJECT_NAME (message->src), error->message); 106 | if (debuginfo) { 107 | g_printerr ("Debug info: %s\n", debuginfo); 108 | } 109 | 110 | NvDsSrcParentBin *bin = &appCtx->pipeline.multi_src_bin; 111 | for (i = 0; i < bin->num_bins; i++) { 112 | if (bin->sub_bins[i].src_elem == (GstElement *) GST_MESSAGE_SRC (message)) 113 | break; 114 | } 115 | 116 | if ((i != bin->num_bins) && 117 | (appCtx->config.multi_source_config[0].type == NV_DS_SOURCE_RTSP)) { 118 | // Error from one of RTSP source. 119 | NvDsSrcBin *subBin = &bin->sub_bins[i]; 120 | 121 | if (!subBin->reconfiguring || 122 | g_strrstr(debuginfo, "500 (Internal Server Error)")) { 123 | if (!subBin->reconfiguring) { 124 | // Check status of stream at regular interval. 125 | g_timeout_add (SOURCE_RESET_INTERVAL_IN_MS, 126 | watch_source_status, subBin); 127 | } 128 | // Reconfigure the stream. 129 | subBin->reconfiguring = TRUE; 130 | g_timeout_add (20, reset_source_pipeline, subBin); 131 | } 132 | g_error_free (error); 133 | g_free (debuginfo); 134 | return TRUE; 135 | } 136 | 137 | g_error_free (error); 138 | g_free (debuginfo); 139 | appCtx->return_value = -1; 140 | appCtx->quit = TRUE; 141 | break; 142 | } 143 | case GST_MESSAGE_STATE_CHANGED:{ 144 | GstState oldstate, newstate; 145 | gst_message_parse_state_changed (message, &oldstate, &newstate, NULL); 146 | if (GST_ELEMENT (GST_MESSAGE_SRC (message)) == appCtx->pipeline.pipeline) { 147 | switch (newstate) { 148 | case GST_STATE_PLAYING: 149 | NVGSTDS_INFO_MSG_V ("Pipeline running\n"); 150 | GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (appCtx-> 151 | pipeline.pipeline), GST_DEBUG_GRAPH_SHOW_ALL, 152 | "ds-app-playing"); 153 | break; 154 | case GST_STATE_PAUSED: 155 | if (oldstate == GST_STATE_PLAYING) { 156 | NVGSTDS_INFO_MSG_V ("Pipeline paused\n"); 157 | } 158 | break; 159 | case GST_STATE_READY: 160 | GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (appCtx->pipeline. 161 | pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "ds-app-ready"); 162 | if (oldstate == GST_STATE_NULL) { 163 | NVGSTDS_INFO_MSG_V ("Pipeline ready\n"); 164 | } else { 165 | NVGSTDS_INFO_MSG_V ("Pipeline stopped\n"); 166 | } 167 | break; 168 | case GST_STATE_NULL: 169 | g_mutex_lock (&appCtx->app_lock); 170 | g_cond_broadcast (&appCtx->app_cond); 171 | g_mutex_unlock (&appCtx->app_lock); 172 | break; 173 | default: 174 | break; 175 | } 176 | } 177 | break; 178 | } 179 | case GST_MESSAGE_EOS:{ 180 | /* 181 | * In normal scenario, this would use g_main_loop_quit() to exit the 182 | * loop and release the resources. Since this application might be 183 | * running multiple pipelines through configuration files, it should wait 184 | * till all pipelines are done. 185 | */ 186 | NVGSTDS_INFO_MSG_V ("Received EOS. Exiting ...\n"); 187 | appCtx->quit = TRUE; 188 | return FALSE; 189 | break; 190 | } 191 | default: 192 | break; 193 | } 194 | return TRUE; 195 | } 196 | 197 | static GstBusSyncReply 198 | bus_sync_handler (GstBus * bus, GstMessage * msg, gpointer data) 199 | { 200 | AppCtx *appCtx = (AppCtx *) data; 201 | 202 | switch (GST_MESSAGE_TYPE (msg)) { 203 | case GST_MESSAGE_ELEMENT: 204 | if (GST_MESSAGE_SRC (msg) == GST_OBJECT (appCtx->pipeline.multi_src_bin.bin)) { 205 | const GstStructure *structure; 206 | structure = gst_message_get_structure (msg); 207 | 208 | if (gst_structure_has_name (structure, "GstBinForwarded")) { 209 | GstMessage *child_msg; 210 | 211 | if (gst_structure_has_field (structure, "message")) { 212 | const GValue *val = gst_structure_get_value (structure, "message"); 213 | if (G_VALUE_TYPE (val) == GST_TYPE_MESSAGE) { 214 | child_msg = (GstMessage *) g_value_get_boxed (val); 215 | if (GST_MESSAGE_TYPE(child_msg) == GST_MESSAGE_ASYNC_DONE) { 216 | guint i = 0; 217 | NvDsSrcParentBin *bin = &appCtx->pipeline.multi_src_bin; 218 | GST_DEBUG ("num bins: %d, message src: %s\n", bin->num_bins, 219 | GST_MESSAGE_SRC_NAME(child_msg)); 220 | for (i = 0; i < bin->num_bins; i++) { 221 | if (bin->sub_bins[i].bin == (GstElement *) GST_MESSAGE_SRC (child_msg)) 222 | break; 223 | } 224 | 225 | if (i != bin->num_bins) { 226 | NvDsSrcBin *subBin = &bin->sub_bins[i]; 227 | if (subBin->reconfiguring && 228 | appCtx->config.multi_source_config[0].type == NV_DS_SOURCE_RTSP) 229 | g_timeout_add (20, set_source_to_playing, subBin); 230 | } 231 | } 232 | } 233 | } 234 | } 235 | } 236 | return GST_BUS_PASS; 237 | 238 | default: 239 | return GST_BUS_PASS; 240 | } 241 | } 242 | 243 | /** 244 | * Function to dump bounding box data in kitti format. For this to work, 245 | * property "gie-kitti-output-dir" must be set in configuration file. 246 | * Data of different sources and frames is dumped in separate file. 247 | */ 248 | static void 249 | write_kitti_output (AppCtx * appCtx, NvDsBatchMeta * batch_meta) 250 | { 251 | gchar bbox_file[1024] = { 0 }; 252 | FILE *bbox_params_dump_file = NULL; 253 | 254 | if (!appCtx->config.bbox_dir_path) 255 | return; 256 | 257 | for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL; 258 | l_frame = l_frame->next) { 259 | NvDsFrameMeta *frame_meta = l_frame->data; 260 | guint stream_id = frame_meta->pad_index; 261 | g_snprintf (bbox_file, sizeof (bbox_file) - 1, 262 | "%s/%02u_%03u_%06lu.txt", appCtx->config.bbox_dir_path, 263 | appCtx->index, stream_id, (gulong) frame_meta->frame_num); 264 | bbox_params_dump_file = fopen (bbox_file, "w"); 265 | if (!bbox_params_dump_file) 266 | continue; 267 | 268 | for (NvDsMetaList * l_obj = frame_meta->obj_meta_list; l_obj != NULL; 269 | l_obj = l_obj->next) { 270 | NvDsObjectMeta *obj = (NvDsObjectMeta *) l_obj->data; 271 | int left = obj->rect_params.left; 272 | int top = obj->rect_params.top; 273 | int right = left + obj->rect_params.width; 274 | int bottom = top + obj->rect_params.height; 275 | fprintf (bbox_params_dump_file, 276 | "%s 0.0 0 0.0 %d.00 %d.00 %d.00 %d.00 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n", 277 | obj->obj_label, left, top, right, bottom); 278 | } 279 | fclose (bbox_params_dump_file); 280 | } 281 | } 282 | 283 | /** 284 | * Function to dump bounding box data in kitti format with tracking ID added. 285 | * For this to work, property "kitti-track-output-dir" must be set in configuration file. 286 | * Data of different sources and frames is dumped in separate file. 287 | */ 288 | static void 289 | write_kitti_track_output (AppCtx * appCtx, NvDsBatchMeta * batch_meta) 290 | { 291 | gchar bbox_file[1024] = { 0 }; 292 | FILE *bbox_params_dump_file = NULL; 293 | 294 | if (!appCtx->config.kitti_track_dir_path) 295 | return; 296 | 297 | for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL; 298 | l_frame = l_frame->next) { 299 | NvDsFrameMeta *frame_meta = l_frame->data; 300 | guint stream_id = frame_meta->pad_index; 301 | g_snprintf (bbox_file, sizeof (bbox_file) - 1, 302 | "%s/%02u_%03u_%06lu.txt", appCtx->config.kitti_track_dir_path, 303 | appCtx->index, stream_id, (gulong) frame_meta->frame_num); 304 | bbox_params_dump_file = fopen (bbox_file, "w"); 305 | if (!bbox_params_dump_file) 306 | continue; 307 | 308 | for (NvDsMetaList * l_obj = frame_meta->obj_meta_list; l_obj != NULL; 309 | l_obj = l_obj->next) { 310 | NvDsObjectMeta *obj = (NvDsObjectMeta *) l_obj->data; 311 | int left = obj->rect_params.left; 312 | int top = obj->rect_params.top; 313 | int right = left + obj->rect_params.width; 314 | int bottom = top + obj->rect_params.height; 315 | guint64 id = obj->object_id; 316 | fprintf (bbox_params_dump_file, 317 | "%s %lu 0.0 0 0.0 %d.00 %d.00 %d.00 %d.00 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n", 318 | obj->obj_label, id, left, top, right, bottom); 319 | } 320 | fclose (bbox_params_dump_file); 321 | } 322 | } 323 | 324 | static gint 325 | component_id_compare_func (gconstpointer a, gconstpointer b) 326 | { 327 | NvDsClassifierMeta *cmetaa = (NvDsClassifierMeta *) a; 328 | NvDsClassifierMeta *cmetab = (NvDsClassifierMeta *) b; 329 | 330 | if (cmetaa->unique_component_id < cmetab->unique_component_id) 331 | return -1; 332 | if (cmetaa->unique_component_id > cmetab->unique_component_id) 333 | return 1; 334 | return 0; 335 | } 336 | 337 | /** 338 | * Function to process the attached metadata. This is just for demonstration 339 | * and can be removed if not required. 340 | * Here it demonstrates to use bounding boxes of different color and size for 341 | * different type / class of objects. 342 | * It also demonstrates how to join the different labels(PGIE + SGIEs) 343 | * of an object to form a single string. 344 | */ 345 | static void 346 | process_meta (AppCtx * appCtx, NvDsBatchMeta * batch_meta) 347 | { 348 | // For single source always display text either with demuxer or with tiler 349 | if (!appCtx->config.tiled_display_config.enable || 350 | appCtx->config.num_source_sub_bins == 1) { 351 | appCtx->show_bbox_text = 0; // redaction modified 352 | } 353 | 354 | for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL; 355 | l_frame = l_frame->next) { 356 | NvDsFrameMeta *frame_meta = l_frame->data; 357 | for (NvDsMetaList * l_obj = frame_meta->obj_meta_list; l_obj != NULL; 358 | l_obj = l_obj->next) { 359 | NvDsObjectMeta *obj = (NvDsObjectMeta *) l_obj->data; 360 | gint class_index = obj->class_id; 361 | NvDsGieConfig *gie_config = NULL; 362 | gchar *str_ins_pos = NULL; 363 | 364 | if (obj->unique_component_id == 365 | (gint) appCtx->config.primary_gie_config.unique_id) { 366 | gie_config = &appCtx->config.primary_gie_config; 367 | } else { 368 | for (gint i = 0; i < (gint) appCtx->config.num_secondary_gie_sub_bins; 369 | i++) { 370 | gie_config = &appCtx->config.secondary_gie_sub_bin_config[i]; 371 | if (obj->unique_component_id == (gint) gie_config->unique_id) { 372 | break; 373 | } 374 | gie_config = NULL; 375 | } 376 | } 377 | g_free (obj->text_params.display_text); 378 | obj->text_params.display_text = NULL; 379 | 380 | // redaction modified 381 | 382 | /* 383 | if (gie_config != NULL) { 384 | if (g_hash_table_contains (gie_config->bbox_border_color_table, 385 | class_index + (gchar *) NULL)) { 386 | obj->rect_params.border_color = 387 | *((NvOSD_ColorParams *) 388 | g_hash_table_lookup (gie_config->bbox_border_color_table, 389 | class_index + (gchar *) NULL)); 390 | } else { 391 | obj->rect_params.border_color = gie_config->bbox_border_color; 392 | } 393 | obj->rect_params.border_width = appCtx->config.osd_config.border_width; 394 | 395 | if (g_hash_table_contains (gie_config->bbox_bg_color_table, 396 | class_index + (gchar *) NULL)) { 397 | obj->rect_params.has_bg_color = 1; 398 | obj->rect_params.bg_color = 399 | *((NvOSD_ColorParams *) 400 | g_hash_table_lookup (gie_config->bbox_bg_color_table, 401 | class_index + (gchar *) NULL)); 402 | } else { 403 | obj->rect_params.has_bg_color = 0; 404 | } 405 | } 406 | */ 407 | NvOSD_RectParams * rect_params = &(obj->rect_params); 408 | /* Draw black patch to cover faces (class_id = 0) */ 409 | if (obj->class_id == 0) { 410 | rect_params->border_width = 0; 411 | rect_params->has_bg_color = 1; 412 | rect_params->bg_color.red = 0.0; 413 | rect_params->bg_color.green = 0.0; 414 | rect_params->bg_color.blue = 0.0; 415 | rect_params->bg_color.alpha = 1.0; 416 | } 417 | if (!appCtx->show_bbox_text) 418 | continue; 419 | 420 | obj->text_params.x_offset = obj->rect_params.left; 421 | obj->text_params.y_offset = obj->rect_params.top - 30; 422 | obj->text_params.font_params.font_color = 423 | appCtx->config.osd_config.text_color; 424 | obj->text_params.font_params.font_size = 425 | appCtx->config.osd_config.text_size; 426 | obj->text_params.font_params.font_name = appCtx->config.osd_config.font; 427 | if (appCtx->config.osd_config.text_has_bg) { 428 | obj->text_params.set_bg_clr = 1; 429 | obj->text_params.text_bg_clr = appCtx->config.osd_config.text_bg_color; 430 | } 431 | 432 | obj->text_params.display_text = g_malloc (128); 433 | obj->text_params.display_text[0] = '\0'; 434 | str_ins_pos = obj->text_params.display_text; 435 | 436 | if (obj->obj_label[0] != '\0') 437 | sprintf (str_ins_pos, "%s", obj->obj_label); 438 | str_ins_pos += strlen (str_ins_pos); 439 | 440 | if (obj->object_id != UNTRACKED_OBJECT_ID) { 441 | sprintf (str_ins_pos, " %lu", obj->object_id); 442 | str_ins_pos += strlen (str_ins_pos); 443 | } 444 | 445 | obj->classifier_meta_list = 446 | g_list_sort (obj->classifier_meta_list, component_id_compare_func); 447 | for (NvDsMetaList * l_class = obj->classifier_meta_list; l_class != NULL; 448 | l_class = l_class->next) { 449 | NvDsClassifierMeta *cmeta = (NvDsClassifierMeta *) l_class->data; 450 | for (NvDsMetaList * l_label = cmeta->label_info_list; l_label != NULL; 451 | l_label = l_label->next) { 452 | NvDsLabelInfo *label = (NvDsLabelInfo *) l_label->data; 453 | if (label->pResult_label) { 454 | sprintf (str_ins_pos, " %s", label->pResult_label); 455 | } else if (label->result_label[0] != '\0') { 456 | sprintf (str_ins_pos, " %s", label->result_label); 457 | } 458 | str_ins_pos += strlen (str_ins_pos); 459 | } 460 | 461 | } 462 | } 463 | } 464 | } 465 | 466 | /** 467 | * Function which processes the inferred buffer and its metadata. 468 | * It also gives opportunity to attach application specific 469 | * metadata (e.g. clock, analytics output etc.). 470 | */ 471 | static void 472 | process_buffer (GstBuffer * buf, AppCtx * appCtx, guint index) 473 | { 474 | NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf); 475 | if (!batch_meta) { 476 | NVGSTDS_WARN_MSG_V ("Batch meta not found for buffer %p", buf); 477 | return; 478 | } 479 | process_meta (appCtx, batch_meta); 480 | //NvDsInstanceData *data = &appCtx->instance_data[index]; 481 | //guint i; 482 | 483 | // data->frame_num++; 484 | 485 | /* Opportunity to modify the processed metadata or do analytics based on 486 | * type of object e.g. maintaining count of particular type of car. 487 | */ 488 | if (appCtx->all_bbox_generated_cb) { 489 | appCtx->all_bbox_generated_cb (appCtx, buf, batch_meta, index); 490 | } 491 | //data->bbox_list_size = 0; 492 | 493 | /* 494 | * callback to attach application specific additional metadata. 495 | */ 496 | if (appCtx->overlay_graphics_cb) { 497 | appCtx->overlay_graphics_cb (appCtx, buf, batch_meta, index); 498 | } 499 | } 500 | 501 | /** 502 | * Buffer probe function to get the results of primary infer. 503 | * Here it demonstrates the use by dumping bounding box coordinates in 504 | * kitti format. 505 | */ 506 | static GstPadProbeReturn 507 | gie_primary_processing_done_buf_prob (GstPad * pad, GstPadProbeInfo * info, 508 | gpointer u_data) 509 | { 510 | GstBuffer *buf = (GstBuffer *) info->data; 511 | AppCtx *appCtx = (AppCtx *) u_data; 512 | NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf); 513 | if (!batch_meta) { 514 | NVGSTDS_WARN_MSG_V ("Batch meta not found for buffer %p", buf); 515 | return GST_PAD_PROBE_OK; 516 | } 517 | 518 | write_kitti_output (appCtx, batch_meta); 519 | 520 | return GST_PAD_PROBE_OK; 521 | } 522 | 523 | /** 524 | * Probe function to get results after all inferences(Primary + Secondary) 525 | * are done. This will be just before OSD or sink (in case OSD is disabled). 526 | */ 527 | static GstPadProbeReturn 528 | gie_processing_done_buf_prob (GstPad * pad, GstPadProbeInfo * info, 529 | gpointer u_data) 530 | { 531 | GstBuffer *buf = (GstBuffer *) info->data; 532 | NvDsInstanceBin *bin = (NvDsInstanceBin *) u_data; 533 | guint index = bin->index; 534 | AppCtx *appCtx = bin->appCtx; 535 | 536 | if (gst_buffer_is_writable (buf)) 537 | process_buffer (buf, appCtx, index); 538 | return GST_PAD_PROBE_OK; 539 | } 540 | 541 | /** 542 | * Buffer probe function after tracker. 543 | */ 544 | static GstPadProbeReturn 545 | tracking_done_buf_prob (GstPad * pad, GstPadProbeInfo * info, gpointer u_data) 546 | { 547 | NvDsInstanceBin *bin = (NvDsInstanceBin *) u_data; 548 | guint index = bin->index; 549 | AppCtx *appCtx = bin->appCtx; 550 | GstBuffer *buf = (GstBuffer *) info->data; 551 | NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf); 552 | if (!batch_meta) { 553 | NVGSTDS_WARN_MSG_V ("Batch meta not found for buffer %p", buf); 554 | return GST_PAD_PROBE_OK; 555 | } 556 | 557 | /* 558 | * Output KITTI labels with tracking ID if configured to do so. 559 | */ 560 | write_kitti_track_output(appCtx, batch_meta); 561 | 562 | if (appCtx->primary_bbox_generated_cb) 563 | appCtx->primary_bbox_generated_cb (appCtx, buf, batch_meta, index); 564 | return GST_PAD_PROBE_OK; 565 | } 566 | 567 | static GstPadProbeReturn 568 | latency_measurement_buf_prob(GstPad * pad, GstPadProbeInfo * info, gpointer u_data) 569 | { 570 | AppCtx *appCtx = (AppCtx *) u_data; 571 | guint i = 0, num_sources_in_batch = 0; 572 | if(nvds_enable_latency_measurement) 573 | { 574 | GstBuffer *buf = (GstBuffer *) info->data; 575 | NvDsFrameLatencyInfo *latency_info = NULL; 576 | g_mutex_lock (&appCtx->latency_lock); 577 | latency_info = appCtx->latency_info; 578 | g_print("\n************BATCH-NUM = %d**************\n",batch_num); 579 | num_sources_in_batch = nvds_measure_buffer_latency(buf, latency_info); 580 | 581 | for(i = 0; i < num_sources_in_batch; i++) 582 | { 583 | g_print("Source id = %d Frame_num = %d Frame latency = %lf (ms) \n", 584 | latency_info[i].source_id, 585 | latency_info[i].frame_num, 586 | latency_info[i].latency); 587 | } 588 | g_mutex_unlock (&appCtx->latency_lock); 589 | batch_num++; 590 | } 591 | 592 | return GST_PAD_PROBE_OK; 593 | } 594 | 595 | /** 596 | * Function to add components to pipeline which are dependent on number 597 | * of streams. These components work on single buffer. If tiling is being 598 | * used then single instance will be created otherwise < N > such instances 599 | * will be created for < N > streams 600 | */ 601 | static gboolean 602 | create_processing_instance (AppCtx * appCtx, guint index) 603 | { 604 | gboolean ret = FALSE; 605 | NvDsConfig *config = &appCtx->config; 606 | NvDsInstanceBin *instance_bin = &appCtx->pipeline.instance_bins[index]; 607 | GstElement *last_elem; 608 | gchar elem_name[32]; 609 | 610 | instance_bin->index = index; 611 | instance_bin->appCtx = appCtx; 612 | 613 | g_snprintf (elem_name, 32, "processing_bin_%d", index); 614 | instance_bin->bin = gst_bin_new (elem_name); 615 | 616 | if (!create_sink_bin (config->num_sink_sub_bins, 617 | config->sink_bin_sub_bin_config, &instance_bin->sink_bin, index)) { 618 | goto done; 619 | } 620 | 621 | gst_bin_add (GST_BIN (instance_bin->bin), instance_bin->sink_bin.bin); 622 | last_elem = instance_bin->sink_bin.bin; 623 | 624 | if (config->osd_config.enable) { 625 | if (!create_osd_bin (&config->osd_config, &instance_bin->osd_bin)) { 626 | goto done; 627 | } 628 | 629 | gst_bin_add (GST_BIN (instance_bin->bin), instance_bin->osd_bin.bin); 630 | 631 | NVGSTDS_LINK_ELEMENT (instance_bin->osd_bin.bin, last_elem); 632 | 633 | last_elem = instance_bin->osd_bin.bin; 634 | } 635 | 636 | NVGSTDS_BIN_ADD_GHOST_PAD (instance_bin->bin, last_elem, "sink"); 637 | if (config->osd_config.enable) { 638 | NVGSTDS_ELEM_ADD_PROBE (instance_bin->all_bbox_buffer_probe_id, 639 | instance_bin->osd_bin.nvosd, "sink", 640 | gie_processing_done_buf_prob, GST_PAD_PROBE_TYPE_BUFFER, instance_bin); 641 | } else { 642 | NVGSTDS_ELEM_ADD_PROBE (instance_bin->all_bbox_buffer_probe_id, 643 | instance_bin->sink_bin.bin, "sink", 644 | gie_processing_done_buf_prob, GST_PAD_PROBE_TYPE_BUFFER, instance_bin); 645 | } 646 | 647 | ret = TRUE; 648 | done: 649 | if (!ret) { 650 | NVGSTDS_ERR_MSG_V ("%s failed", __func__); 651 | } 652 | return ret; 653 | } 654 | 655 | /** 656 | * Function to create common elements(Primary infer, tracker, secondary infer) 657 | * of the pipeline. These components operate on muxed data from all the 658 | * streams. So they are independent of number of streams in the pipeline. 659 | */ 660 | static gboolean 661 | create_common_elements (NvDsConfig * config, NvDsPipeline * pipeline, 662 | GstElement ** sink_elem, GstElement ** src_elem, 663 | bbox_generated_callback primary_bbox_generated_cb) 664 | { 665 | gboolean ret = FALSE; 666 | *sink_elem = *src_elem = NULL; 667 | if (config->primary_gie_config.enable) { 668 | if (config->num_secondary_gie_sub_bins > 0) { 669 | if (!create_secondary_gie_bin (config->num_secondary_gie_sub_bins, 670 | config->primary_gie_config.unique_id, 671 | config->secondary_gie_sub_bin_config, 672 | &pipeline->common_elements.secondary_gie_bin)) { 673 | goto done; 674 | } 675 | gst_bin_add (GST_BIN (pipeline->pipeline), 676 | pipeline->common_elements.secondary_gie_bin.bin); 677 | if (!*src_elem) { 678 | *src_elem = pipeline->common_elements.secondary_gie_bin.bin; 679 | } 680 | if (*sink_elem) { 681 | NVGSTDS_LINK_ELEMENT (pipeline->common_elements.secondary_gie_bin.bin, 682 | *sink_elem); 683 | } 684 | *sink_elem = pipeline->common_elements.secondary_gie_bin.bin; 685 | } 686 | } 687 | 688 | if (config->tracker_config.enable) { 689 | if (!create_tracking_bin (&config->tracker_config, 690 | &pipeline->common_elements.tracker_bin)) { 691 | g_print ("creating tracker bin failed\n"); 692 | goto done; 693 | } 694 | gst_bin_add (GST_BIN (pipeline->pipeline), 695 | pipeline->common_elements.tracker_bin.bin); 696 | if (!*src_elem) { 697 | *src_elem = pipeline->common_elements.tracker_bin.bin; 698 | } 699 | if (*sink_elem) { 700 | NVGSTDS_LINK_ELEMENT (pipeline->common_elements.tracker_bin.bin, 701 | *sink_elem); 702 | } 703 | *sink_elem = pipeline->common_elements.tracker_bin.bin; 704 | } 705 | 706 | if (config->primary_gie_config.enable) { 707 | if (!create_primary_gie_bin (&config->primary_gie_config, 708 | &pipeline->common_elements.primary_gie_bin)) { 709 | goto done; 710 | } 711 | gst_bin_add (GST_BIN (pipeline->pipeline), 712 | pipeline->common_elements.primary_gie_bin.bin); 713 | if (*sink_elem) { 714 | NVGSTDS_LINK_ELEMENT (pipeline->common_elements.primary_gie_bin.bin, 715 | *sink_elem); 716 | } 717 | *sink_elem = pipeline->common_elements.primary_gie_bin.bin; 718 | if (!*src_elem) { 719 | *src_elem = pipeline->common_elements.primary_gie_bin.bin; 720 | } 721 | NVGSTDS_ELEM_ADD_PROBE (pipeline->common_elements. 722 | primary_bbox_buffer_probe_id, 723 | pipeline->common_elements.primary_gie_bin.bin, "src", 724 | gie_primary_processing_done_buf_prob, GST_PAD_PROBE_TYPE_BUFFER, 725 | pipeline->common_elements.appCtx); 726 | } 727 | 728 | if (config->primary_gie_config.enable) { 729 | if (config->tracker_config.enable) { 730 | NVGSTDS_ELEM_ADD_PROBE (pipeline->common_elements. 731 | primary_bbox_buffer_probe_id, 732 | pipeline->common_elements.tracker_bin.bin, "src", 733 | tracking_done_buf_prob, GST_PAD_PROBE_TYPE_BUFFER, 734 | &pipeline->common_elements); 735 | } else { 736 | NVGSTDS_ELEM_ADD_PROBE (pipeline->common_elements. 737 | primary_bbox_buffer_probe_id, 738 | pipeline->common_elements.primary_gie_bin.bin, "src", 739 | tracking_done_buf_prob, GST_PAD_PROBE_TYPE_BUFFER, 740 | &pipeline->common_elements); 741 | } 742 | } 743 | ret = TRUE; 744 | done: 745 | return ret; 746 | } 747 | 748 | /** 749 | * Main function to create the pipeline. 750 | */ 751 | gboolean 752 | create_pipeline (AppCtx * appCtx, 753 | bbox_generated_callback primary_bbox_generated_cb, 754 | bbox_generated_callback all_bbox_generated_cb, perf_callback perf_cb, 755 | overlay_graphics_callback overlay_graphics_cb) 756 | { 757 | gboolean ret = FALSE; 758 | NvDsPipeline *pipeline = &appCtx->pipeline; 759 | NvDsConfig *config = &appCtx->config; 760 | GstBus *bus; 761 | GstElement *last_elem; 762 | GstElement *tmp_elem1; 763 | GstElement *tmp_elem2; 764 | guint i; 765 | GstPad *fps_pad; 766 | gulong latency_probe_id; 767 | 768 | _dsmeta_quark = g_quark_from_static_string (NVDS_META_STRING); 769 | 770 | appCtx->all_bbox_generated_cb = all_bbox_generated_cb; 771 | appCtx->primary_bbox_generated_cb = primary_bbox_generated_cb; 772 | appCtx->overlay_graphics_cb = overlay_graphics_cb; 773 | 774 | if (config->osd_config.num_out_buffers < 8) { 775 | config->osd_config.num_out_buffers = 8; 776 | } 777 | 778 | pipeline->pipeline = gst_pipeline_new ("pipeline"); 779 | if (!pipeline->pipeline) { 780 | NVGSTDS_ERR_MSG_V ("Failed to create pipeline"); 781 | goto done; 782 | } 783 | 784 | bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline->pipeline)); 785 | pipeline->bus_id = gst_bus_add_watch (bus, bus_callback, appCtx); 786 | gst_bus_set_sync_handler (bus, bus_sync_handler, appCtx, NULL); 787 | gst_object_unref (bus); 788 | 789 | if (config->file_loop) { 790 | /* Let each source bin know it needs to loop. */ 791 | guint i; 792 | for (i = 0; i < config->num_source_sub_bins; i++) 793 | config->multi_source_config[i].loop = TRUE; 794 | } 795 | 796 | for (guint i = 0; i < config->num_sink_sub_bins; i++) { 797 | NvDsSinkSubBinConfig *sink_config = &config->sink_bin_sub_bin_config[i]; 798 | switch (sink_config->type) { 799 | case NV_DS_SINK_FAKE: 800 | case NV_DS_SINK_RENDER_EGL: 801 | case NV_DS_SINK_RENDER_OVERLAY: 802 | /* Set the "qos" property of sink, if not explicitly specified in the 803 | config. */ 804 | if (!sink_config->render_config.qos_value_specified) { 805 | /* QoS events should be generated by sink always in case of live sources 806 | or with synchronous playback for non-live sources. */ 807 | if (config->streammux_config.live_source || sink_config->render_config.sync) { 808 | sink_config->render_config.qos = TRUE; 809 | } else { 810 | sink_config->render_config.qos = FALSE; 811 | } 812 | } 813 | default: 814 | break; 815 | } 816 | } 817 | 818 | /* 819 | * Add muxer and < N > source components to the pipeline based 820 | * on the settings in configuration file. 821 | */ 822 | if (!create_multi_source_bin (config->num_source_sub_bins, 823 | config->multi_source_config, &pipeline->multi_src_bin)) 824 | goto done; 825 | gst_bin_add (GST_BIN (pipeline->pipeline), pipeline->multi_src_bin.bin); 826 | 827 | 828 | if (config->streammux_config.is_parsed) 829 | set_streammux_properties (&config->streammux_config, 830 | pipeline->multi_src_bin.streammux); 831 | 832 | if(appCtx->latency_info == NULL) 833 | { 834 | appCtx->latency_info = (NvDsFrameLatencyInfo *) 835 | calloc(1, config->streammux_config.batch_size * 836 | sizeof(NvDsFrameLatencyInfo)); 837 | } 838 | 839 | if (config->tiled_display_config.enable) { 840 | 841 | /* Tiler will generate a single composited buffer for all sources. So need 842 | * to create only one processing instance. */ 843 | if (!create_processing_instance (appCtx, 0)) { 844 | goto done; 845 | } 846 | // create and add tiling component to pipeline. 847 | if (config->tiled_display_config.columns * 848 | config->tiled_display_config.rows < config->num_source_sub_bins) { 849 | if (config->tiled_display_config.columns == 0) { 850 | config->tiled_display_config.columns = 851 | (guint) (sqrt (config->num_source_sub_bins) + 0.5); 852 | } 853 | config->tiled_display_config.rows = 854 | (guint) ceil (1.0 * config->num_source_sub_bins / 855 | config->tiled_display_config.columns); 856 | NVGSTDS_WARN_MSG_V 857 | ("Num of Tiles less than number of sources, readjusting to " 858 | "%u rows, %u columns", config->tiled_display_config.rows, 859 | config->tiled_display_config.columns); 860 | } 861 | 862 | gst_bin_add (GST_BIN (pipeline->pipeline), pipeline->instance_bins[0].bin); 863 | last_elem = pipeline->instance_bins[0].bin; 864 | 865 | if (!create_tiled_display_bin (&config->tiled_display_config, 866 | &pipeline->tiled_display_bin)) { 867 | goto done; 868 | } 869 | gst_bin_add (GST_BIN (pipeline->pipeline), pipeline->tiled_display_bin.bin); 870 | NVGSTDS_LINK_ELEMENT (pipeline->tiled_display_bin.bin, last_elem); 871 | last_elem = pipeline->tiled_display_bin.bin; 872 | } else { 873 | 874 | /* 875 | * Create demuxer only if tiled display is disabled. 876 | */ 877 | pipeline->demuxer = 878 | gst_element_factory_make (NVDS_ELEM_STREAM_DEMUX, "demuxer"); 879 | if (!pipeline->demuxer) { 880 | NVGSTDS_ERR_MSG_V ("Failed to create element 'demuxer'"); 881 | goto done; 882 | } 883 | gst_bin_add (GST_BIN (pipeline->pipeline), pipeline->demuxer); 884 | 885 | for (i = 0; i < config->num_source_sub_bins; i++) { 886 | gchar pad_name[16]; 887 | gboolean create_instance = FALSE; 888 | GstPad *demux_src_pad; 889 | guint j; 890 | 891 | /* Check if any sink has been configured to render/encode output for 892 | * source index `i`. The processing instance for that source will be 893 | * created only if atleast one sink has been configured as such. 894 | */ 895 | for (j = 0; j < config->num_sink_sub_bins; j++) { 896 | if (config->sink_bin_sub_bin_config[j].enable && 897 | config->sink_bin_sub_bin_config[j].source_id == i) { 898 | create_instance = TRUE; 899 | break; 900 | } 901 | } 902 | 903 | if (!create_instance) 904 | continue; 905 | 906 | if (!create_processing_instance (appCtx, i)) { 907 | goto done; 908 | } 909 | gst_bin_add (GST_BIN (pipeline->pipeline), 910 | pipeline->instance_bins[i].bin); 911 | 912 | g_snprintf (pad_name, 16, "src_%02d", i); 913 | demux_src_pad = gst_element_get_request_pad (pipeline->demuxer, pad_name); 914 | NVGSTDS_LINK_ELEMENT_FULL (pipeline->demuxer, pad_name, 915 | pipeline->instance_bins[i].bin, "sink"); 916 | gst_object_unref (demux_src_pad); 917 | } 918 | 919 | last_elem = pipeline->demuxer; 920 | } 921 | fps_pad = gst_element_get_static_pad (last_elem, "sink"); 922 | 923 | pipeline->common_elements.appCtx = appCtx; 924 | // Decide where in the pipeline the element should be added and add only if 925 | // enabled 926 | if (config->dsexample_config.enable) { 927 | // Create dsexample element bin and set properties 928 | if (!create_dsexample_bin (&config->dsexample_config, 929 | &pipeline->dsexample_bin)) { 930 | goto done; 931 | } 932 | // Add dsexample bin to instance bin 933 | gst_bin_add (GST_BIN (pipeline->pipeline), pipeline->dsexample_bin.bin); 934 | 935 | // Link this bin to the last element in the bin 936 | NVGSTDS_LINK_ELEMENT (pipeline->dsexample_bin.bin, last_elem); 937 | 938 | // Set this bin as the last element 939 | last_elem = pipeline->dsexample_bin.bin; 940 | } 941 | // create and add common components to pipeline. 942 | if (!create_common_elements (config, pipeline, &tmp_elem1, &tmp_elem2, 943 | primary_bbox_generated_cb)) { 944 | goto done; 945 | } 946 | 947 | if (tmp_elem2) { 948 | NVGSTDS_LINK_ELEMENT (tmp_elem2, last_elem); 949 | last_elem = tmp_elem1; 950 | } 951 | 952 | NVGSTDS_LINK_ELEMENT (pipeline->multi_src_bin.bin, last_elem); 953 | 954 | // enable performance measurement and add call back function to receive 955 | // performance data. 956 | if (config->enable_perf_measurement) { 957 | appCtx->perf_struct.context = appCtx; 958 | enable_perf_measurement (&appCtx->perf_struct, fps_pad, 959 | pipeline->multi_src_bin.num_bins, 960 | config->perf_measurement_interval_sec, perf_cb); 961 | } 962 | //gst_object_unref (fps_pad); 963 | 964 | NVGSTDS_ELEM_ADD_PROBE (latency_probe_id, 965 | pipeline->instance_bins->sink_bin.sub_bins[0].sink, "sink", 966 | latency_measurement_buf_prob, GST_PAD_PROBE_TYPE_BUFFER, 967 | appCtx); 968 | latency_probe_id = latency_probe_id; 969 | 970 | GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (appCtx->pipeline.pipeline), 971 | GST_DEBUG_GRAPH_SHOW_ALL, "ds-app-null"); 972 | 973 | g_mutex_init (&appCtx->app_lock); 974 | g_cond_init (&appCtx->app_cond); 975 | g_mutex_init (&appCtx->latency_lock); 976 | 977 | ret = TRUE; 978 | done: 979 | if (!ret) { 980 | NVGSTDS_ERR_MSG_V ("%s failed", __func__); 981 | } 982 | return ret; 983 | } 984 | 985 | /** 986 | * Function to destroy pipeline and release the resources, probes etc. 987 | */ 988 | void 989 | destroy_pipeline (AppCtx * appCtx) 990 | { 991 | gint64 end_time; 992 | NvDsConfig *config = &appCtx->config; 993 | guint i; 994 | GstBus *bus = NULL; 995 | 996 | end_time = g_get_monotonic_time () + G_TIME_SPAN_SECOND; 997 | 998 | if (!appCtx) 999 | return; 1000 | 1001 | if (appCtx->pipeline.demuxer) { 1002 | gst_pad_send_event (gst_element_get_static_pad (appCtx->pipeline.demuxer, 1003 | "sink"), gst_event_new_eos ()); 1004 | } else if (appCtx->pipeline.instance_bins[0].sink_bin.bin) { 1005 | gst_pad_send_event (gst_element_get_static_pad (appCtx-> 1006 | pipeline.instance_bins[0].sink_bin.bin, "sink"), 1007 | gst_event_new_eos ()); 1008 | } 1009 | 1010 | g_usleep (100000); 1011 | 1012 | g_mutex_lock (&appCtx->app_lock); 1013 | if (appCtx->pipeline.pipeline) { 1014 | bus = gst_pipeline_get_bus (GST_PIPELINE (appCtx->pipeline.pipeline)); 1015 | 1016 | while (TRUE) { 1017 | GstMessage *message = gst_bus_pop (bus); 1018 | if (message == NULL) 1019 | break; 1020 | else if (GST_MESSAGE_TYPE (message) == GST_MESSAGE_ERROR) 1021 | bus_callback (bus, message, appCtx); 1022 | else 1023 | gst_message_unref (message); 1024 | } 1025 | gst_element_set_state (appCtx->pipeline.pipeline, GST_STATE_NULL); 1026 | } 1027 | g_cond_wait_until (&appCtx->app_cond, &appCtx->app_lock, end_time); 1028 | g_mutex_unlock (&appCtx->app_lock); 1029 | 1030 | for (i = 0; i < appCtx->config.num_source_sub_bins; i++) { 1031 | NvDsInstanceBin *bin = &appCtx->pipeline.instance_bins[i]; 1032 | if (config->osd_config.enable) { 1033 | NVGSTDS_ELEM_REMOVE_PROBE (bin->all_bbox_buffer_probe_id, 1034 | bin->osd_bin.nvosd, "sink"); 1035 | } else { 1036 | NVGSTDS_ELEM_REMOVE_PROBE (bin->all_bbox_buffer_probe_id, 1037 | bin->sink_bin.bin, "sink"); 1038 | } 1039 | 1040 | if (config->primary_gie_config.enable) { 1041 | NVGSTDS_ELEM_REMOVE_PROBE (bin->primary_bbox_buffer_probe_id, 1042 | bin->primary_gie_bin.bin, "src"); 1043 | } 1044 | 1045 | } 1046 | if(appCtx->latency_info == NULL) 1047 | { 1048 | free(appCtx->latency_info); 1049 | appCtx->latency_info = NULL; 1050 | } 1051 | 1052 | g_mutex_clear(&appCtx->latency_lock); 1053 | 1054 | if (appCtx->pipeline.pipeline) { 1055 | bus = gst_pipeline_get_bus (GST_PIPELINE (appCtx->pipeline.pipeline)); 1056 | gst_bus_remove_watch (bus); 1057 | gst_object_unref (bus); 1058 | gst_object_unref (appCtx->pipeline.pipeline); 1059 | } 1060 | } 1061 | 1062 | gboolean 1063 | pause_pipeline (AppCtx * appCtx) 1064 | { 1065 | GstState cur; 1066 | GstState pending; 1067 | GstStateChangeReturn ret; 1068 | GstClockTime timeout = 5 * GST_SECOND / 1000; 1069 | 1070 | ret = 1071 | gst_element_get_state (appCtx->pipeline.pipeline, &cur, &pending, 1072 | timeout); 1073 | 1074 | if (ret == GST_STATE_CHANGE_ASYNC) { 1075 | return FALSE; 1076 | } 1077 | 1078 | if (cur == GST_STATE_PAUSED) { 1079 | return TRUE; 1080 | } else if (cur == GST_STATE_PLAYING) { 1081 | gst_element_set_state (appCtx->pipeline.pipeline, GST_STATE_PAUSED); 1082 | gst_element_get_state (appCtx->pipeline.pipeline, &cur, &pending, 1083 | GST_CLOCK_TIME_NONE); 1084 | pause_perf_measurement (&appCtx->perf_struct); 1085 | return TRUE; 1086 | } else { 1087 | return FALSE; 1088 | } 1089 | } 1090 | 1091 | gboolean 1092 | resume_pipeline (AppCtx * appCtx) 1093 | { 1094 | GstState cur; 1095 | GstState pending; 1096 | GstStateChangeReturn ret; 1097 | GstClockTime timeout = 5 * GST_SECOND / 1000; 1098 | 1099 | ret = 1100 | gst_element_get_state (appCtx->pipeline.pipeline, &cur, &pending, 1101 | timeout); 1102 | 1103 | if (ret == GST_STATE_CHANGE_ASYNC) { 1104 | return FALSE; 1105 | } 1106 | 1107 | if (cur == GST_STATE_PLAYING) { 1108 | return TRUE; 1109 | } else if (cur == GST_STATE_PAUSED) { 1110 | gst_element_set_state (appCtx->pipeline.pipeline, GST_STATE_PLAYING); 1111 | gst_element_get_state (appCtx->pipeline.pipeline, &cur, &pending, 1112 | GST_CLOCK_TIME_NONE); 1113 | resume_perf_measurement (&appCtx->perf_struct); 1114 | return TRUE; 1115 | } else { 1116 | return FALSE; 1117 | } 1118 | } 1119 | --------------------------------------------------------------------------------