├── CDLA-Sharing-v1.0.pdf
├── LICENSE.md
├── Makefile
├── README
├── README.md
├── deepstream_segmentation_app.c
├── dstest_segmentation_config_industrial.txt
├── dstest_segmentation_config_semantic.txt
├── gst-pipeline.png
├── input
└── 0599.jpg
├── masks
└── out_rgba_0599.jpg
├── performance-jetson-dgpu.png
├── segmentation-result.png
├── tlt-converter.png
├── unet-retrain.png
└── usr_input.txt
/CDLA-Sharing-v1.0.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NVIDIA-AI-IOT/deepstream-segmentation-analytics/d1ef8c36b47287576e6d713f1220bbf7d43d4562/CDLA-Sharing-v1.0.pdf
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) [year] [fullname]
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
3 | #
4 | # Permission is hereby granted, free of charge, to any person obtaining a
5 | # copy of this software and associated documentation files (the "Software"),
6 | # to deal in the Software without restriction, including without limitation
7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | # and/or sell copies of the Software, and to permit persons to whom the
9 | # Software is furnished to do so, subject to the following conditions:
10 | #
11 | # The above copyright notice and this permission notice shall be included in
12 | # all copies or substantial portions of the Software.
13 | #
14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | # DEALINGS IN THE SOFTWARE.
21 | ################################################################################
22 |
23 | APP:= deepstream-segmentation-analytics
24 |
25 | CC:=g++
26 |
27 | TARGET_DEVICE = $(shell gcc -dumpmachine | cut -f1 -d -)
28 |
29 | NVDS_VERSION:=5.0
30 |
31 | LIB_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/lib/
32 | APP_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/bin/
33 |
34 | ifeq ($(TARGET_DEVICE),aarch64)
35 | CFLAGS:= -DPLATFORM_TEGRA
36 | endif
37 |
38 | SRCS:= $(wildcard *.c)
39 |
40 | INCS= $(wildcard *.h)
41 |
42 | PKGS:= gstreamer-1.0 gstreamer-video-1.0 x11 json-glib-1.0
43 |
44 | OBJS:= $(SRCS:.c=.o)
45 |
46 | CFLAGS+= -I../../apps-common/includes -I./includes -I../../../includes -I../deepstream-app/ -DDS_VERSION_MINOR=0 -DDS_VERSION_MAJOR=5
47 | CFLAGS+= -I$(INC_DIR)
48 |
49 |
50 | LIBS+= -L$(LIB_INSTALL_DIR) -lnvdsgst_meta -lnvds_meta -lnvdsgst_helper -lnvdsgst_smartrecord -lnvds_utils -lnvds_msgbroker -lm \
51 | -lgstrtspserver-1.0 -ldl -Wl,-rpath,$(LIB_INSTALL_DIR)
52 |
53 | CFLAGS+= `pkg-config --cflags $(PKGS)`
54 |
55 | LIBS+= `pkg-config --libs $(PKGS)`
56 |
57 | all: $(APP)
58 |
59 |
60 | %.o: %.c $(INCS) Makefile
61 | $(CC) -c -o $@ $(CFLAGS) $<
62 |
63 | $(APP): $(OBJS) Makefile
64 | $(CXX) -o $(APP) $(OBJS) $(LIBS)
65 |
66 | install: $(APP)
67 | cp -rv $(APP) $(APP_INSTALL_DIR)
68 |
69 | clean:
70 | rm -rf $(OBJS) $(APP)
71 |
72 |
--------------------------------------------------------------------------------
/README:
--------------------------------------------------------------------------------
1 | *****************************************************************************
2 | * Copyright (c) 2019-2020 NVIDIA Corporation. All rights reserved.
3 | *
4 | * NVIDIA Corporation and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA Corporation is strictly prohibited.
9 | *****************************************************************************
10 |
11 | Prequisites:
12 |
13 | Please follow instructions in the apps/sample_apps/deepstream-app/README on how
14 | to install the prequisites for Deepstream SDK, the DeepStream SDK itself and the
15 | apps.
16 |
17 | You must have the following development packages installed
18 | GStreamer-1.0
19 | GStreamer-1.0 Base Plugins
20 | GStreamer-1.0 gstrtspserver
21 | X11 client-side library
22 |
23 | To install these packages, execute the following command:
24 | sudo apt-get install libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
25 | libgstrtspserver-1.0-dev libx11-dev
26 |
27 | Pipeline:
28 | H264/JPEG-->decoder-->tee -->| -- (batch size) ------>|-->streammux--> nvinfer--> nvsegvisual ----> nvosd --> |---> encode --->filesink
29 |
30 | Compilation Steps:
31 | $ cd apps/deepstream-segmentation-test/
32 | $ make
33 |
34 | the usr_input.txt
35 | =================
36 | this is the user input file to define the input image batch size, output display height and width, stream directories. Please follow exact format as below example: i.e. no less or space, no variable name change. for example, stream1 can not be images1, etc. If user does not want change the value, then following setting will be the default. the program will be run forever unless user will shut if off.
37 |
38 | example
39 | ===========
40 | batch_size=8
41 | width=1280
42 | height=720
43 | stream1=/opt/nvidia/deepstream/deepstream-5.0/sources/apps/sample_apps/deepstream-segmentation-analytics/images1
44 | stream2=/opt/nvidia/deepstream/deepstream-5.0/sources/apps/sample_apps/deepstream-segmentation-analytics/images2
45 | pro_per_sec=40
46 | no_streams=3
47 | production=1
48 |
49 | user defined parameters
50 | =======================
51 |
52 | batch_size : how many total images each time to load for a stram directory
53 | width : output image width
54 | height : output image height
55 | stream1 : path to image1 directory
56 | stream2 : path to image2 directory
57 | pro_per_sec : how many seconds to wait before a new segmentation run
58 | no_streams : number of stream directories
59 | production : 1 for real production env. 0 for Nvidia helm-chart env.
60 |
61 |
62 | Example command to run:
63 | =======================
64 |
65 | For binary segmentation:
66 |
67 | $ ./deepstream-segmentation-analytics -c dstest_segmentation_config_industrial.txt -i usr_input.txt
68 |
69 | For multi-class semantic segmentation:
70 |
71 | $ ./deepstream-segmentation-analytics -c dstest_segmentation_config_semantic.txt -i usr_input.txt
72 |
73 |
74 | Other Note
75 | ==========
76 | for Helm-Chart env., segmentation run generate out.jpg for the mask ground truth
77 | for the producton env., the mask directory will have the all the mask ground truth pictures and input directory will save the input images in case to be used for the retrain purpose
78 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------
2 | # This sample application is no longer maintained
3 | # ------------------------------------------------------
4 |
5 | *****************************************************************************
6 | * Copyright (c) 2019-2021 NVIDIA Corporation. All rights reserved.
7 | *
8 | * NVIDIA Corporation and its licensors retain all intellectual property
9 | * and proprietary rights in and to this software, related documentation
10 | * and any modifications thereto. Any use, reproduction, disclosure or
11 | * distribution of this software and related documentation without an express
12 | * license agreement from NVIDIA Corporation is strictly prohibited.
13 | *
14 | *****************************************************************************
15 | # Instroduction
16 |
17 | This deepstream-segmentation-analytics application uses the Nvidia DeepStream-5.1 SDK
18 | to generate the segmentation ground truth JPEG output to display the industrial component defect.
19 | It simulates the real industrial production line env.. The apps run 24 hours / day until it is shut off.
20 | This version of the apps can also be run under the Nvidia internal helm-chart env. after setting the production=0 in the user defined input file.
21 |
22 |
23 |
24 | # Prequisites:
25 |
26 | Please follow instructions in the apps/sample_apps/deepstream-app/README on how
27 | to install the prequisites for Deepstream SDK, the DeepStream SDK itself and the
28 | apps.
29 |
30 |
31 | One must have the following development packages installed
32 | * GStreamer-1.0
33 | * GStreamer-1.0 Base Plugins
34 | * GStreamer-1.0 gstrtspserver
35 | * X11 client-side library
36 | * DeepStream-5.1 SDK : https://docs.nvidia.com/metropolis/deepstream/dev-guide/index.html
37 |
38 |
39 |
40 | # DeepStream Pipeline
41 | * DeepStream SDK is based on the GStreamer framework. GStreamer is a pipeline based multimedia framework that links together a wide variety of media processing systems to complete workflows. Following is the pipleline for this segmentation application. It supports for both binary and multi class model for the segmentation.
42 | 
43 |
44 |
45 |
46 | # This DeepStream Segmentation Apps Overview
47 | * The usr_input.txt gethers the user input information as example as following:
48 |
49 | * batch_size - how many images will need going through the segmentation process for a stream directory
50 |
51 | * width - the output jpg file width (usually same as the input image width)
52 |
53 | * height -the output jpg file height (usually same as the input image height)
54 |
55 | * stream0 - /path/for/the/images0/dir. stream1, streamN will be in the same fasion.
56 |
57 | * pro_per_sec - repeat the segmentation run after how many seconds. N/A to helm-chart env. case
58 |
59 | * no_streams - how many stream dirs are in the env.
60 |
61 | * production - 1 for real production env. case. 0 for the Nvidia internal helm-chart env.
62 |
63 | * User needs to download the dataset: Class7 from the DAGM 2007 [1] and put the images into the image directory
64 |
65 | * Each time of apps run, it will go through all the stream directory, i.e, stream0, stream1, streamN to perform a batch size image segmentation
66 |
67 | * To perform a batch size image access for the stream0, stream1, streamN, if the image dir. is exmpty, it will not do anything
68 |
69 | * After an image is read from a stream dir., then it will be deleted in that dir.
70 |
71 | * The output jpg file will be saved in the masks directory with the unique name while the input file will be saved in input directory
72 |
73 | * The saved output and input files can be used for the re-training purpose to improve the segmentation accuracy
74 |
75 | * If production=0 (for helm-chart env.), then the input images will not be deleted while no files will be saved in the input and mask dir. Also the out.jpg file as the segmentation ground truth file will be save in the directory in case for view.
76 |
77 |
78 |
79 |
80 |
81 | # Nvidia Transfer Learning Toolkit 3.0 (Training / Evaluation / Export / Converter)
82 |
83 |
84 | * Please read the Nvidia TLT-3.0 document : https://developer.nvidia.com/tlt-get-started
85 |
86 | * Please follow https://docs.nvidia.com/metropolis/TLT/tlt-user-guide to download TLT Jupyter Notebook and TLT converter
87 |
88 | 
89 |
90 |
91 |
92 |
93 | # Nvidia Transfer Learning Toolkit 3.0 User Guide on the UNET Used for the Segmentation
94 |
95 | * https://docs.nvidia.com/metropolis/TLT/tlt-user-guide/text/semantic_segmentation/unet.html#training-the-model
96 |
97 |
98 |
99 |
100 |
101 | * Use the Jupyter Notebook for the UNET training based on the DAGM-2007 Dataset on Class7
102 |
103 | * Use the TLT to generate the .etlt and .enginer file for the DeepStream application deployment
104 |
105 | * For the DAGM-2007 Class7 dataset[1], it misses the mask file as training label for each good image (without defect)
106 |
107 | * One need to create a black grayscale image as a mask file for the good images without defect in order to use TLT for re-training
108 |
109 | * dummy_image.py can be used to create the above mentioned mask file
110 |
111 |
112 |
113 |
114 |
115 |
116 | # Deploying the Apps to DeepStream-5.1 Using Transfer Learning Toolkit-3.0
117 |
118 | * Use the .etlt or .engine file after TLT train, export, and coverter
119 |
120 | * Use the Jetson version of the tlt converter to generate the .engine file used in the Jetson devices
121 |
122 | Generate .engine file as an example: ./tlt-converter -k $key -e trt.fp16.tlt.unet.engine -t fp16 -p input_1 1,1x3x320x320, 4x3x320x320,16x3x320x320 model_unet.etlt
123 | here: $key is the key when do the tlt train and 320x320 is the input/training image size as example
124 |
125 | * Define the .etlt or .engine file path in the config file for dGPU and Jetson for the DS-5.1 application
126 |
127 | * example: model-engine-file = ../../models/unet/trt.fp16.tlt.unet.engine in dstest_segmentation_config_industrial.txt
128 |
129 |
130 |
131 |
132 | # How to Compile the Application Package
133 | * git clone this application into /opt/nvidia/deeepstream/deepstream-5.1/sources/apps/sample_apps
134 |
135 | * $ cd deepstream-segmentation-analytics
136 |
137 | * $ make
138 |
139 |
140 |
141 | # How to Run this DeepStream Segmentation Application
142 |
143 | * make the models dir. in the deepstream-segmentation-analytics and copy the trt.fp16.tlt.unet.engine (as example) into models dir.
144 |
145 | * $ ./deepstream-segmentation-analytics -c dstest_segmentation_config_industrial.txt -i usr_input.txt -for binary segmentation
146 |
147 | * $ ./deepstream-segmentation-analytics -c dstest_segmentation_config_semantic.txt -i usr_input.txt -for multi class
148 |
149 | * The program run will generate the output jpg as the masked ground truth after the segmentation which is saved in the masks directory.
150 |
151 | 
152 |
153 |
154 |
155 |
156 | # The performance using different GPU devices
157 |
158 | 
159 |
160 |
161 |
162 |
163 | # References
164 |
165 | * [1] All the images are from the DAGM 2007 competition dataset: https://www.kaggle.com/mhskjelvareid/dagm-2007-competition-dataset-optical-inspection
166 |
167 | * [2] DAGM-2007 License information reference file: CDLA-Sharing-v1.0.pdf
168 |
169 | * [3] Nvidia DeepStream Referenced Unet Models: https://github.com/qubvel/segmentation_models
170 |
171 | * [4] The example Jupyter Notebook program for Unet training process
172 | https://github.com/qubvel/segmentation_models/blob/master/examples/binary%20segmentation%20(camvid).ipynb
173 |
174 |
--------------------------------------------------------------------------------
/deepstream_segmentation_app.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #include
24 | #include
25 | #include
26 | #include
27 | #include
28 | #include
29 | #include
30 | #include
31 | #include
32 | #include
33 | #include
34 | #include
35 | #include
36 | #include
37 | using namespace std;
38 |
39 | #include "gstnvdsmeta.h"
40 | #include "gst-nvmessage.h"
41 |
42 | /* The muxer output resolution must be set if the input streams will be of
43 | * different resolution. The muxer will scale all the input frames to this
44 | * resolution. */
45 |
46 | static gint MUXER_OUTPUT_WIDTH;
47 | static gint MUXER_OUTPUT_HEIGHT;
48 |
49 | /* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
50 | * based on the fastest source's framerate. */
51 | //for loading more picture case, need to increase MUXER_BATCH_TIMEOUT_USEC to
52 | //avoid the empty display holes in the .jpg file
53 | //If use many image files each time run, set following to -1. this will make muxer wait for all sources to be up, but if one of the image is offline, then ithang the pipeline
54 |
55 | #define MUXER_BATCH_TIMEOUT_USEC -1
56 |
57 | static gint TILED_OUTPUT_WIDTH;
58 | static gint TILED_OUTPUT_HEIGHT;
59 | //#define TILED_OUTPUT_WIDTH 1024
60 | //#define TILED_OUTPUT_HEIGHT 1024
61 |
62 | #define NVINFER_PLUGIN "nvinfer"
63 | #define NVINFERSERVER_PLUGIN "nvinferserver"
64 |
65 |
66 | static int production = 0;
67 | static int pro_per_sec;
68 | static int no_streams;
69 | static int stream_index = 0;
70 | static guint num_sources = 1;
71 | static gint MAX_NUM_FILE = 8;
72 |
73 | static gint frame_number;
74 | static struct timeval g_start;
75 | static struct timeval g_end;
76 | static struct timeval current_time;
77 | static float g_accumulated_time_macro = 0;
78 | static gint pic_no = 0;
79 |
80 | static void profile_start() {
81 | gettimeofday(&g_start, 0);
82 | }
83 |
84 | static void profile_end() {
85 | gettimeofday(&g_end, 0);
86 | }
87 |
88 | static float profile_delta() {
89 |
90 | int delta;
91 | g_accumulated_time_macro += 1000000 * (g_end.tv_sec - g_start.tv_sec)
92 | + g_end.tv_usec - g_start.tv_usec;
93 | delta = g_accumulated_time_macro/1000000;
94 | //std::cout << "The Delta time = " << delta << std::endl;
95 | return delta;
96 | }
97 |
98 |
99 | static void profile_result() {
100 |
101 | frame_number = MAX_NUM_FILE;
102 |
103 | g_accumulated_time_macro += 1000000 * (g_end.tv_sec - g_start.tv_sec)
104 | + g_end.tv_usec - g_start.tv_usec;
105 | float fps = (float)((frame_number) / (float)(g_accumulated_time_macro/1000000));
106 | std::cout << "The average frame rate is " << fps<< ", frame num " << frame_number << ", time accumulated " << g_accumulated_time_macro/1000000 << std::endl;
107 |
108 | }
109 |
110 |
111 | static void cpu_profile() {
112 |
113 | frame_number = MAX_NUM_FILE;
114 |
115 | g_accumulated_time_macro += 1000000 * (g_end.tv_sec - g_start.tv_sec)
116 | + g_end.tv_usec - g_start.tv_usec;
117 | std::cout << "For frame = " << frame_number << ", CPU time accumulated " << g_accumulated_time_macro/1000000 << std::endl;
118 |
119 | }
120 |
121 |
122 | /* tiler_sink_pad_buffer_probe will extract metadata received on segmentation
123 | * src pad */
124 | static GstPadProbeReturn
125 | tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
126 | gpointer u_data)
127 | {
128 | GstBuffer *buf = (GstBuffer *) info->data;
129 | NvDsMetaList * l_frame = NULL;
130 | NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
131 |
132 | for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
133 | l_frame = l_frame->next) {
134 | // TODO:
135 | }
136 | return GST_PAD_PROBE_OK;
137 | }
138 |
139 | static gboolean
140 | bus_call (GstBus * bus, GstMessage * msg, gpointer data)
141 | {
142 | GMainLoop *loop = (GMainLoop *) data;
143 | switch (GST_MESSAGE_TYPE (msg)) {
144 | case GST_MESSAGE_EOS:
145 | g_print ("End of stream\n\n\n");
146 | // Add the delay to show the result
147 | usleep(1000000);
148 | g_main_loop_quit (loop);
149 | break;
150 | case GST_MESSAGE_WARNING:
151 | {
152 | gchar *debug;
153 | GError *error;
154 | gst_message_parse_warning (msg, &error, &debug);
155 | g_printerr ("WARNING from element %s: %s\n",
156 | GST_OBJECT_NAME (msg->src), error->message);
157 | g_free (debug);
158 | g_printerr ("Warning: %s\n", error->message);
159 | g_error_free (error);
160 | break;
161 | }
162 | case GST_MESSAGE_ERROR:
163 | {
164 | gchar *debug;
165 | GError *error;
166 | gst_message_parse_error (msg, &error, &debug);
167 | g_printerr ("ERROR from element %s: %s\n",
168 | GST_OBJECT_NAME (msg->src), error->message);
169 | if (debug)
170 | g_printerr ("Error details: %s\n", debug);
171 | g_free (debug);
172 | g_error_free (error);
173 | g_main_loop_quit (loop);
174 | break;
175 | }
176 | case GST_MESSAGE_ELEMENT:
177 | {
178 | if (gst_nvmessage_is_stream_eos (msg)) {
179 | guint str_id;
180 | if (gst_nvmessage_parse_stream_eos (msg, &str_id)) {
181 | pic_no++;
182 | g_print ("Got EOS from stream %d\n", str_id);
183 | if(pic_no == MAX_NUM_FILE){
184 | //get the the profiling data
185 | profile_end();
186 | profile_result();
187 | }
188 | }
189 | }
190 | break;
191 | }
192 | default:
193 | break;
194 | }
195 | return TRUE;
196 | }
197 |
198 | static GstElement *
199 | create_source_bin (guint index, gchar * uri)
200 | {
201 | GstElement *bin = NULL;
202 | gchar bin_name[16] = { };
203 |
204 | g_snprintf (bin_name, 15, "source-bin-%02d", index);
205 | /* Create a source GstBin to abstract this bin's content from the rest of the
206 | * pipeline */
207 | bin = gst_bin_new (bin_name);
208 |
209 | GstElement *source, *jpegparser, *decoder;
210 |
211 | source = gst_element_factory_make ("filesrc", "source");
212 |
213 | jpegparser = gst_element_factory_make ("jpegparse", "jpeg-parser");
214 |
215 | decoder = gst_element_factory_make ("nvv4l2decoder", "nvv4l2-decoder");
216 |
217 | if (!source || !jpegparser || !decoder)
218 | {
219 | g_printerr ("One element could not be created. Exiting.\n");
220 | return NULL;
221 | }
222 | g_object_set (G_OBJECT (source), "location", uri, NULL);
223 | const char *dot = strrchr(uri, '.');
224 | if ((!strcmp (dot+1, "mjpeg")) || (!strcmp (dot+1, "mjpg")))
225 | {
226 | #ifdef PLATFORM_TEGRA
227 | g_object_set (G_OBJECT (decoder), "mjpeg", 1, NULL);
228 | #endif
229 | }
230 |
231 | gst_bin_add_many (GST_BIN (bin), source, jpegparser, decoder, NULL);
232 |
233 | gst_element_link_many (source, jpegparser, decoder, NULL);
234 |
235 | /* We need to create a ghost pad for the source bin which will act as a proxy
236 | * for the video decoder src pad. The ghost pad will not have a target right
237 | * now. Once the decode bin creates the video decoder and generates the
238 | * cb_newpad callback, we will set the ghost pad target to the video decoder
239 | * src pad. */
240 | if (!gst_element_add_pad (bin, gst_ghost_pad_new_no_target ("src",
241 | GST_PAD_SRC))) {
242 | g_printerr ("Failed to add ghost pad in source bin\n");
243 | return NULL;
244 | }
245 |
246 | GstPad *srcpad = gst_element_get_static_pad (decoder, "src");
247 | if (!srcpad) {
248 | g_printerr ("Failed to get src pad of source bin. Exiting.\n");
249 | return NULL;
250 | }
251 | GstPad *bin_ghost_pad = gst_element_get_static_pad (bin, "src");
252 | if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),
253 | srcpad)) {
254 | g_printerr ("Failed to link decoder src pad to source bin ghost pad\n");
255 | }
256 |
257 | return bin;
258 | }
259 |
260 | static void printUsage(const char* cmd) {
261 | g_printerr ("\tUsage: %s -c dstest_segmentation_config_industrial.txt -b tatch-size -h img_out_height -w img_out_width -d image_dir\n", cmd);
262 | g_printerr ("-h: \n\timage output height \n");
263 | g_printerr ("-i: \n\timage output width \n");
264 | g_printerr ("-c: \n\tseg config file, e.g. dstest_segmentation_config_industrial.txt \n");
265 | g_printerr ("-b: \n\tbatch size, this will override the value of \"baitch-size\" in config file \n");
266 | g_printerr ("-d: \n\tThe image directory \n");
267 | }
268 |
269 |
270 | int
271 | main (int argc, char *argv[])
272 | {
273 |
274 | //define the GstElement pointer
275 | GMainLoop *loop = NULL;
276 | GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *seg = NULL,
277 | *nvsegvisual = NULL, *tiler = NULL, *nvvidconv = NULL,
278 | *parser = NULL, *parser1 = NULL, *source = NULL, *enc = NULL,
279 | *nvvidconv1 = NULL, *decoder = NULL, *tee = NULL, *nvdsosd = NULL;
280 |
281 |
282 | #ifdef PLATFORM_TEGRA
283 | GstElement *transform = NULL;
284 | #endif
285 | GstBus *bus = NULL;
286 | guint bus_watch_id;
287 | GstPad *seg_src_pad = NULL;
288 | guint i;
289 | guint tiler_rows, tiler_columns;
290 | guint pgie_batch_size;
291 | std::string seg_config;
292 | std::string usr_config;
293 | guint c;
294 | const char* optStr = "b:c:d:h:i:";
295 | std::string input_file;
296 | std::string image_dir;
297 | std::string in_file;
298 | std::string file_out;
299 | std::string out_file[2000];
300 |
301 | GList *files = NULL;
302 | gboolean is_nvinfer_server = FALSE;
303 | gchar infer_config_file[200];
304 | gchar usr_config_file[200];
305 | guint batchSize = 1;
306 | guint height, width;
307 |
308 | struct dirent *pDirent;
309 | DIR *pDir;
310 | char pic_file[100];
311 | char buff[200];
312 | char argv2[2000][2000];
313 | char file[2000][2000];
314 | char streams[200][200];
315 | int idx, idx2, r;
316 | int num_pic=0;
317 | int w_loop = 0;
318 |
319 | string images;
320 | string line;
321 | string result;
322 | string delimiter = "=";
323 | string delimiter2 = " ";
324 | size_t pos = 0;
325 | string token;
326 | string cmd;
327 | int del;
328 |
329 | profile_start();
330 | printf("Get CPU profile_start()\n");
331 |
332 | //process the command line argument
333 | while ((c = getopt(argc, argv, optStr)) != -1) {
334 | switch (c) {
335 | case 'c':
336 | seg_config.assign(optarg);
337 | strcpy(infer_config_file, seg_config.c_str());
338 | break;
339 | case 'i':
340 | usr_config.assign(optarg);
341 | strcpy(usr_config_file, usr_config.c_str());
342 | break;
343 | default:
344 | printUsage(argv[0]);
345 | return -1;
346 | }
347 | }
348 |
349 | /* Check input arguments */
350 | if (argc < 5) {
351 | printUsage(argv[0]);
352 | return -1;
353 | }
354 |
355 | //process the usr_input.txt as the input for the helm chart
356 | ifstream myfile ("usr_input.txt");
357 | if (myfile.is_open())
358 | {
359 | while ( getline (myfile,line) )
360 | {
361 | printf("Get the line: %s\n", line.c_str());
362 | while ((pos = line.find(delimiter)) != std::string::npos) {
363 | token = line.substr(0, pos);
364 | //std::cout << token << std::endl;
365 | line.erase(0, pos + delimiter.length());
366 | if((token.compare("batch_size")) == 0){
367 | token = line.substr(0, pos);
368 | //std::cout << token << std::endl;
369 | batchSize = stoi(token);
370 | }else if((token.compare("width")) == 0){
371 | token = line.substr(0, pos);
372 | //std::cout << token << std::endl;
373 | width = stoi(token);
374 | }else if((token.compare("height")) == 0){
375 | token = line.substr(0, pos);
376 | //std::cout << token << std::endl;
377 | height = stoi(token);
378 | }else if((token.compare("pro_per_sec")) == 0){
379 | token = line.substr(0, pos);
380 | //std::cout << token << std::endl;
381 | pro_per_sec = stoi(token);
382 | }else if((token.compare("no_streams")) == 0){
383 | token = line.substr(0, pos);
384 | //std::cout << token << std::endl;
385 | no_streams = stoi(token);
386 | }else if((token.compare("production")) == 0){
387 | token = line.substr(0, pos);
388 | //std::cout << token << std::endl;
389 | production = stoi(token);
390 | }else{
391 | result = token.substr (0,6);
392 | if(result.compare("stream") == 0){
393 | std::cout << token << std::endl;
394 | strcpy(streams[stream_index++], line.c_str());
395 | }
396 | }
397 | }
398 | }
399 | }else{
400 | cout << "Unable to open file";
401 | return 0;
402 | }
403 | myfile.close();
404 |
405 | printf("batchSize = %d, width = %d, height = %d\n", batchSize, width, height);
406 | printf("no_streams = %d, pro_per_sec = %d\n", no_streams, pro_per_sec);
407 | printf("production = %d\n", production);
408 |
409 | MAX_NUM_FILE = num_sources;
410 | MUXER_OUTPUT_WIDTH = width;
411 | MUXER_OUTPUT_HEIGHT = height;
412 | TILED_OUTPUT_WIDTH = width;
413 | TILED_OUTPUT_HEIGHT = height;
414 |
415 | printf("Get the batchSize = %d\n", batchSize);
416 | printf("Get the num_sources = %d\n", num_sources);
417 | printf("Get the infer_config_file = %s\n", infer_config_file);
418 | printf("Get the MUXER_OUTPUT_WIDTH = %d\n", MUXER_OUTPUT_WIDTH);
419 | printf("Get the MUXER_OUTPUT_HEIGHT = %d\n", MUXER_OUTPUT_HEIGHT);
420 |
421 |
422 | //loop forever until shut off
423 | while(1){
424 |
425 | //to get time delta
426 | profile_end();
427 | del = profile_delta();
428 |
429 | //start segmentation every pro_er_sec from usr_input.txt
430 | if(((del % pro_per_sec) == 0) && (del != 0)){
431 |
432 | for(int k=0; kd_name,".") != 0) &&
446 | (strcmp(pDirent->d_name,"..") != 0)){
447 | idx = num_pic;
448 | strcpy(pic_file, image_dir.c_str());
449 | strcat(pic_file, "/");
450 | strcat(pic_file, pDirent->d_name);
451 | strcpy(argv2[idx], pic_file);
452 | out_file[idx] = pDirent->d_name;
453 | //printf("Got the pic file: %s with idx=%d\n", argv2[idx], idx);
454 | num_pic++;
455 | }
456 | }
457 | closedir (pDir);
458 |
459 | if(!num_pic){ //after 0 increament
460 | printf ("There is NO images in the directory! \n");
461 | //return 1;
462 | }
463 |
464 | if(num_pic < batchSize){
465 | batchSize = num_pic; //in case of num_pic is less
466 | }
467 |
468 |
469 | //process the Gst pipeline
470 | for (int j=0; j nvinfer -> nvsegvidsual -> nvtiler -> filesink */
653 | if (!gst_element_link_many (streammux, seg, nvsegvisual, tiler,
654 | nvvidconv, nvdsosd, nvvidconv1, enc, parser1, sink, NULL))
655 | {
656 | g_printerr ("Elements could not be linked. Exiting.\n");
657 | return -1;
658 | }
659 |
660 | #else
661 |
662 | gst_bin_add_many (GST_BIN (pipeline), seg, nvsegvisual, tiler,
663 | nvvidconv, nvdsosd, nvvidconv1, enc, parser1, sink, NULL);
664 |
665 | /* Link the elements together
666 | * nvstreammux -> nvinfer -> nvsegvisual -> nvtiler -> video-renderer */
667 |
668 | if (!gst_element_link_many (streammux, seg, nvsegvisual, tiler, nvvidconv, nvdsosd, nvvidconv1, enc, parser1, sink, NULL)) {
669 | g_printerr ("Elements could not be linked. Exiting.\n");
670 | return -1;
671 | }
672 |
673 | #endif
674 |
675 | /* Lets add probe to get informed of the meta data generated, we add probe to
676 | * the src pad of the nvseg element, since by that time, the buffer would have
677 | * had got all the segmentation metadata. */
678 | seg_src_pad = gst_element_get_static_pad (seg, "src");
679 | if (!seg_src_pad)
680 | g_print ("Unable to get src pad\n");
681 | else
682 | gst_pad_add_probe (seg_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
683 | tiler_src_pad_buffer_probe, NULL, NULL);
684 | gst_object_unref (seg_src_pad);
685 |
686 | /* Set the pipeline to "playing" state */
687 | g_print ("\nNow playing:");
688 | g_print (" %s,", (char *)g_list_nth_data(files, j));
689 | g_print ("\n");
690 | gst_element_set_state (pipeline, GST_STATE_PLAYING);
691 |
692 | /* Wait till pipeline encounters an error or EOS */
693 | g_print ("Running...\n");
694 |
695 | //start the main loop and perform the profile check
696 | profile_start();
697 | g_main_loop_run (loop);
698 |
699 | /* Out of the main loop, clean up nicely */
700 | g_print ("Returned, stopping playback\n");
701 | gst_element_set_state (pipeline, GST_STATE_NULL);
702 | g_print ("Deleting pifile_outpeline\n");
703 | gst_object_unref (GST_OBJECT (pipeline));
704 | g_source_remove (bus_watch_id);
705 | g_main_loop_unref (loop);
706 |
707 | //save the output ground truth file masks dir.
708 | if(production){
709 | cmd = "mv " + file_out + " mask";
710 | system(cmd.c_str());
711 | printf("\nMove the file: %s into the mask directory\n", file_out.c_str());
712 |
713 | //remove the input file used
714 | cmd = "rm -f " + in_file;
715 | printf("Delete the file: %s\n\n", in_file.c_str());
716 | system(cmd.c_str());
717 | }
718 |
719 | }//end of for (int j=0; j