├── .gitignore
├── CMakeLists.txt
├── LICENSE
├── README
├── README.md
├── build.sh
├── cmake
└── FindGStreamer.cmake
├── config
├── facedetect
│ ├── aiinference.json
│ ├── drawresult.json
│ └── preprocess.json
├── refinedet
│ ├── aiinference.json
│ ├── drawresult.json
│ └── preprocess.json
└── ssd
│ ├── aiinference.json
│ ├── drawresult.json
│ ├── label.json
│ └── preprocess.json
├── notebook
├── LICENSE
├── images
│ └── xilinx_logo.png
└── smartcam.ipynb
├── script
├── 01.mipi-rtsp.sh
├── 02.mipi-dp.sh
├── 03.file-file.sh
├── 04.file-ssd-dp.sh
├── rtsp_server.sh
└── smartcam-install.py
└── src
├── ivas_airender.cpp
├── ivas_airender.hpp
├── ivas_xpp_pipeline.c
├── kernel_boundingbox.json
├── kernel_resize_bgr.json
└── main.cpp
/.gitignore:
--------------------------------------------------------------------------------
1 | build
2 | *.swp
3 | *.swo
4 | *~
5 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright 2019 Xilinx Inc.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 |
17 | cmake_minimum_required(VERSION 3.5)
18 |
19 | project(smartcam)
20 |
21 | SET(CMAKE_INSTALL_PREFIX $ENV{OECORE_TARGET_SYSROOT})
22 |
23 | SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -fpermissive -rdynamic -Wl,--no-undefined" )
24 | SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -std=c++14")
25 |
26 | list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake")
27 | find_package(GStreamer REQUIRED)
28 | find_package(OpenCV REQUIRED)
29 |
30 | SET(INSTALL_PATH "opt/xilinx")
31 |
32 | add_library(ivas_xpp SHARED src/ivas_xpp_pipeline.c)
33 | target_include_directories(ivas_xpp PRIVATE ${GSTREAMER_INCLUDE_DIRS})
34 | target_link_libraries(ivas_xpp
35 | jansson ivasutil gstivasinfermeta-1.0)
36 | install(TARGETS ivas_xpp DESTINATION ${INSTALL_PATH}/lib)
37 |
38 | add_library(ivas_airender SHARED src/ivas_airender.cpp)
39 | target_include_directories(ivas_airender PRIVATE ${GSTREAMER_INCLUDE_DIRS})
40 | target_link_libraries(ivas_airender
41 | jansson ivasutil gstivasinfermeta-1.0
42 | gstreamer-1.0 gstbase-1.0 glib-2.0
43 | opencv_core opencv_video opencv_videoio opencv_imgproc)
44 | install(TARGETS ivas_airender DESTINATION ${INSTALL_PATH}/lib)
45 |
46 |
47 | add_executable(${CMAKE_PROJECT_NAME} src/main.cpp)
48 | target_include_directories(${CMAKE_PROJECT_NAME} PRIVATE ${GSTREAMER_INCLUDE_DIRS})
49 | target_link_libraries(${CMAKE_PROJECT_NAME}
50 | gstapp-1.0 gstreamer-1.0 gstbase-1.0 gobject-2.0 glib-2.0 gstvideo-1.0 gstallocators-1.0 gstrtsp-1.0 gstrtspserver-1.0
51 | glib-2.0 gobject-2.0 )
52 | install(TARGETS ${CMAKE_PROJECT_NAME} DESTINATION ${INSTALL_PATH}/bin)
53 |
54 |
55 | install(PROGRAMS
56 | script/01.mipi-rtsp.sh
57 | script/02.mipi-dp.sh
58 | script/03.file-file.sh
59 | script/04.file-ssd-dp.sh
60 | script/smartcam-install.py
61 | DESTINATION ${INSTALL_PATH}/bin)
62 |
63 | install(FILES
64 | README
65 | DESTINATION ${INSTALL_PATH}/
66 | RENAME README_SMARTCAM
67 | )
68 |
69 | install(DIRECTORY
70 | config/facedetect
71 | config/refinedet
72 | config/ssd
73 | DESTINATION ${INSTALL_PATH}/share/ivas/${CMAKE_PROJECT_NAME}/)
74 |
75 | install(FILES
76 | config/ssd/label.json
77 | DESTINATION ${INSTALL_PATH}/share/vitis_ai_library/models/kv260-${CMAKE_PROJECT_NAME}/ssd_adas_pruned_0_95/
78 | )
79 |
80 | install(DIRECTORY
81 | notebook/
82 | DESTINATION ${INSTALL_PATH}/share/notebooks/${CMAKE_PROJECT_NAME}/)
83 |
84 | set(VERSION "1.0.1")
85 | set(CPACK_PACKAGE_VERSION ${VERSION})
86 | set(CPACK_GENERATOR "RPM")
87 | set(CPACK_PACKAGE_NAME "${CMAKE_PROJECT_NAME}")
88 | set(CPACK_PACKAGE_RELEASE 1)
89 | set(CPACK_PACKAGE_CONTACT "Yuxiz@xilinx.com")
90 | set(CPACK_PACKAGE_VENDOR "xilinx")
91 | set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "arm64")
92 | set(CPACK_RPM_PACKAGE_ARCHITECTURE ${CMAKE_SYSTEM_PROCESSOR})
93 | set(CPACK_PACKAGING_INSTALL_PREFIX "/")
94 | set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${CPACK_PACKAGE_RELEASE}.${CMAKE_SYSTEM_PROCESSOR}")
95 | set(CPACK_RPM_SPEC_MORE_DEFINE "%define _build_id_links none")
96 | include(CPack)
97 |
98 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2021 Xilinx Inc.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/README:
--------------------------------------------------------------------------------
1 | /******************************************************************************
2 | * Copyright (C) 2010 - 2021 Xilinx, Inc. All rights reserved.
3 | * SPDX-License-Identifier: Apache-2.0
4 | ******************************************************************************/
5 |
6 | 1. Development Guide:
7 |
8 | If you want to cross compile the source in Linux PC machine, follow these steps, otherwise skip this section.
9 |
10 | 1) Refer to the `K260 SOM Starter Kit Tutorial` to build the cross-compilation SDK, and install it to the path you choose or default. Suppose it's SDKPATH.
11 |
12 | 2) Run "./build.sh ${SDKPATH}" in the source code folder of current application, to build the application.
13 |
14 | 3) The build process in 1.2) will produce a rpm package smartcam-1.0.1-1.aarch64.rpm under build/, upload to the board, and run `rpm -ivh --force ./smartcam-1.0.1-1.aarch64.rpm` to update install.
15 |
16 | 2. Setting up the Board
17 |
18 | 1) Get the SD Card Image from (http://xilinx.com/) and follow the instructions in UG1089 to burn the SD card. And install the SD card to J11.
19 |
20 | 2) Hardware Setup:
21 |
22 | * Monitor:
23 |
24 | Before booting, connect a 1080P/4K monitor to the board via either DP or HDMI port.
25 |
26 | 4K monitor is preferred to demonstrate at the maximum supported resolution.
27 |
28 | * IAS sensor:
29 |
30 | Before power on, install an AR1335 sensor module in J7.
31 |
32 | * UART/JTAG interface:
33 |
34 | For interacting and seeing boot-time information, connect a USB debugger to the J4.
35 |
36 | * You may also use a USB webcam as an input device.
37 |
38 | The webcam is optional video input device supported in the application.
39 |
40 | Recommended webcam is the [Logitech BRIO](https://www.logitech.com/en-in/products/webcams/brio-4k-hdr-webcam.960-001105.html).
41 |
42 | * Network connection:
43 |
44 | Connect the Ethernet cable to your local network with DHCP enabled to install packages and run Jupyter Notebooks
45 |
46 | * Audio Pmod setup as RTSP audio input:
47 |
48 | Audio Pmod is optional audio input and output device.
49 |
50 | In smartcam application, only RTSP mode uses the audio input function to capture audio. Audio is then sent together with the images as RTSP stream and can be received at the client side.
51 |
52 | To set it up, first install the Pmod to J2, then connect a microphone or any other sound input device to the line input port(https://store.digilentinc.com/pmod-i2s2-stereo-audio-input-and-output/). A headphone with a microphone will not work - device needs to be a dedicated input.
53 |
54 | Smartcam application does not yet support speakers.
55 |
56 | 3) Software Preparation:
57 |
58 | You will use a PC having network access to the board as the RTSP client machine.
59 |
60 | Make sure that the PC and the KV260 Vision AI Starter Kit are on the same subnet mask.
61 |
62 | On the client machine, to receive and play the RTSP stream, we recommend to install FFplay which is part of FFmpeg package.
63 |
64 | For Linux, you can install FFmpeg with the package manager of your distribution.
65 |
66 | For Windows, you can find install instructions on https://ffmpeg.org/download.html
67 |
68 | Other than FFplay, VLC can also be used to play RTSP stream, but we find sometimes it doesn't work on some client machine, while the FFplay works well.
69 |
70 | 4) Power on the board, login with username `petalinux`, and you need to setup the password for the first time bootup.
71 |
72 | 5) Get the latest application package.
73 |
74 | 1. Get the list of available packages in the feed.
75 |
76 | `sudo xmutil getpkgs`
77 |
78 | 2. Install the package with dnf install:
79 |
80 | `sudo dnf install packagegroup-kv260-smartcam.noarch`
81 |
82 | Note: For setups without access to the internet, it is possible to download and use the packages locally. Please refer to the [Install from a local package feed](local_package_feed.md) for instructions.
83 |
84 | 6) Dynamically load the application package.
85 |
86 | The firmware consist of bitstream, device tree overlay (dtbo) and xclbin file. The firmware is loaded dynamically on user request once Linux is fully booted. The xmutil utility can be used for that purpose.
87 |
88 | 1. Show the list and status of available acceleration platforms and AI Applications:
89 |
90 | `sudo xmutil listapps`
91 |
92 | 2. Switch to a different platform for different AI Application:
93 |
94 | * When xmutil listapps shows that there's no active accelerator, just activate the one you want to use.
95 |
96 | `sudo xmutil loadapp kv260-smartcam`
97 |
98 | * When there's already an accelerator being activated, unload it first, then switch to the one you want.
99 |
100 | `sudo xmutil unloadapp `
101 |
102 | `sudo xmutil loadapp kv260-smartcam`
103 |
104 | 7) Getting demo video files suitable for the application:
105 |
106 | To be able to demostrate the function of the application in case you have no MIPI and USB camera in hand, we support the file video source too.
107 |
108 | You can download video files from the following links, which is of MP4 format.
109 |
110 | * Facedet / RefineDet AI Task:
111 | * https://pixabay.com/videos/alley-people-walk-street-ukraine-39837/
112 | * ADAS SSD AI Task:
113 | * https://pixabay.com/videos/freeway-traffic-cars-rainy-truck-8358/
114 |
115 | Then you need to transcode it to H264 file which is one supported input format.
116 |
117 | > ffmpeg -i input-video.mp4 -c:v libx264 -pix_fmt nv12 -r 30 output.nv12.h264
118 |
119 | Finally, please upload or copy these transcoded H264 files to the board (by using scp, ftp, or copy onto SD card and finding them in /media/sd-mmcblk0p1/), place it to somewhere under /home/petalinux, which is the home directory of the user you login as.
120 |
121 | 3. Run the Application
122 |
123 | There are two ways to interact with the application.
124 |
125 | ## Juypter notebook.
126 |
127 | Use a web-browser (e.g. Chrome, Firefox) to interact with the platform.
128 |
129 | The Jupyter notebook URL can be find with command:
130 |
131 | > sudo jupyter notebook list
132 |
133 | Output example:
134 |
135 | > Currently running servers:
136 | > `http://ip:port/?token=xxxxxxxxxxxxxxxxxx` :: /opt/xilinx/share/notebooks
137 |
138 | ## Command line
139 | These allow the user to define different video input and output device targets using the "smartcam" application. These are to be executed using the UART/debug interface.
140 |
141 | **Notice** The application need to be ran with ***sudo*** .
142 |
143 | ### Example scripts
144 |
145 | Example scripts and options definitions are provided below.
146 |
147 | Refer to [File Structure] to find the files' location.
148 |
149 | * MIPI RTSP server:
150 |
151 | 1. Invoking `sudo 01.mipi-rtsp.sh` will start rtsp server for mipi captured images.
152 |
153 | 2. Script accepts ${width} ${height} as the 1st and 2nd parameter, the default is 1920 x 1080.
154 |
155 | 3. Running the script will give message in the form:
156 |
157 | > stream ready at:
158 | >
159 | > rtsp://boardip:port/test
160 |
161 | Run `ffplay rtsp://boardip:port/test` on the client PC to receive the rtsp stream.
162 |
163 | 4. Checking:
164 |
165 | You should be able to see the images the camera is capturing on the ffplay window, and when there's face captured by camera, there should be blue box drawn around the face, and the box should follow the movement of the face.
166 |
167 | * MIPI DP display:
168 |
169 | 1. Make sure the monitor is connected as described in "Setting up the Board".
170 |
171 | 2. Invoking `sudo 02.mipi-dp.sh` will play the captured video with detection results on monitor.
172 |
173 | 3. Script accepts ${width} ${height} as the 1st and 2nd parameter, the default is 1920 x 1080.
174 |
175 | 4. Checking:
176 |
177 | You should be able to see the images the camera is capturing on the monitor connected to the board, and when there's face captured by the camera, there should be blue box drawn around the face, and the box should follow the movement of the face.
178 |
179 | * File to File
180 |
181 | 1. Invoking `sudo 03.file-to-file.sh`
182 |
183 | Take the first argument passed to this script as the path to the video file (you can use the demo video for face detection, or similar videos), perform face detection and generate video with detection bbox, save as `./out.h264`
184 |
185 | 2. Checking:
186 |
187 | Play the input video file and generated video file "./out.h264" with any media player you prefer, e.g. VLC, FFPlay. You should be able to see in the output video file, there are blue boxes around the faces of people, and the boxes should follow the movement of the faces, while there're no such boxes with the input video file.
188 |
189 | * File to DP
190 |
191 | 1. Invoking `sudo 04.file-ssd-dp.sh`
192 |
193 | Take the first argument passed to this script as the path to the video file (you can use the demo video for ADAS SSD, or similar videos), perform vehicles detection and generate video with detection bbox, and display onto monitor
194 |
195 | 2. Checking:
196 |
197 | You should be able to see a video of highway driving, with the detection of vehicles in a bounding box.
198 |
199 | ### Additional configuration options for smartcam invocation:
200 |
201 | The example scripts and Jupyter notebook work as examples to show the capability of the smartcam for specific configurations. More combinations could be made based on the options provided by smartcam. User can get detailed application options as following by invoking `smartcam --help`.
202 |
203 | #### Usage:
204 |
205 | ```
206 | smartcam [OPTION?] - Application for face detection on SOM board of Xilinx.
207 |
208 | Help Options:
209 |
210 | -h, --help Show help options
211 | --help-all Show all help options
212 | --help-gst Show GStreamer Options
213 |
214 | Application Options:
215 |
216 | -m, --mipi= use MIPI camera as input source, auto detect, fail if no mipi available.
217 | -u, --usb=media_ID usb camera video device id, e.g. 2 for /dev/video2
218 | -f, --file=file path location of h26x file as input
219 | -i, --infile-type=h264 input file type: [h264 | h265]
220 | -W, --width=1920 resolution w of the input
221 | -H, --height=1080 resolution h of the input
222 | -r, --framerate=30 framerate of the input
223 | -t, --target=dp [dp|rtsp|file]
224 | -o, --outmedia-type=h264 output file type: [h264 | h265]
225 | -p, --port=554 Port to listen on (default: 554)
226 | -a, --aitask select AI task to be run: [facedetect|ssd|refinedet]
227 | -n, --nodet no AI inference
228 | -A, --audio RTSP with I2S audio input
229 | -R, --report report fps
230 | -s, --screenfps display fps on screen, notic this will cause perfermance degradation.
231 | --ROI-off turn off ROI (Region-of-Interest)
232 | --control-rate=low-latency Encoder parameter control-rate
233 | --target-bitrate=3000 Encoder parameter target-bitrate
234 | --gop-length=60 Encoder parameter gop-length
235 | --profile Encoder parameter profile
236 | --level Encoder parameter level
237 | --tier Encoder parameter tier
238 | --encodeEnhancedParam String for fully customizing the encoder in the form "param1=val1, param2=val2,...", where paramn is the name of the encoder parameter
239 | ```
240 |
241 |
242 | #### Examples of supported combinations sorted by input are outlined below.
243 | If using the command line to invoke the smartcam, stop the process via CTRL-C prior to starting the next instance.
244 |
245 | * MIPI Input (IAS sensor input):
246 |
247 | * output: RTSP
248 | sudo smartcam --mipi -W 1920 -H 1080 --target rtsp
249 |
250 | * output: RTSP with audio
251 | sudo smartcam --mipi -W 1920 -H 1080 --target rtsp --audio
252 |
253 | * output: DP
254 | sudo smartcam --mipi -W 1920 -H 1080 --target dp
255 |
256 | * output: file
257 | sudo smartcam --mipi -W 1920 -H 1080 --target file
258 |
259 | * input file (file on file system):
260 |
261 | **Note** You must update the command to the specific file desired as the input source.
262 |
263 | * output: RTSP
264 | sudo smartcam --file ./test.h264 -i h264 -W 1920 -H 1080 -r 30 --target rtsp
265 |
266 | * output: DP
267 | sudo smartcam --file ./test.h264 -i h264 -W 1920 -H 1080 -r 30 --target dp
268 |
269 | * output: file
270 | sudo smartcam --file ./test.h264 -i h264 -W 1920 -H 1080 -r 30 --target file
271 |
272 | * input USB (USB webcam):
273 |
274 | **Note** You must ensure the width/height/framerate defined are supported by your USB camera.
275 |
276 | * output: RTSP
277 | sudo smartcam --usb 1 -W 1920 -H 1080 -r 30 --target rtsp
278 |
279 | * output: DP
280 | sudo smartcam --usb 1 -W 1920 -H 1080 -r 30 --target dp
281 |
282 | * output: file
283 | sudo smartcam --usb 1 -W 1920 -H 1080 -r 30 --target file
284 |
285 | # Files structure of the application
286 |
287 | * The application is installed as:
288 |
289 | * Binary File Directory: /opt/xilinx/bin
290 |
291 | | filename | description |
292 | |----------|-------------|
293 | |smartcam | main app |
294 |
295 | * Script File Directory: /opt/xilinx/bin/
296 |
297 | | filename | description |
298 | |------------------|-------------|
299 | |`01.mipi-rtsp.sh` | call smartcam to run facedetction and send out rtsp stream.|
300 | |`02.mipi-dp.sh` | call smartcam to run facedetction and display on DP display.|
301 | |`03.file-file.sh` | call smartcam to run facedetction and display on input h264/5 file and generate output h264/5 with detection boxes.|
302 |
303 | * Configuration File Directory: /opt/xilinx/share/ivas/smartcam/${AITASK}
304 |
305 | AITASK = "facedetect" | "refinedet" | "ssd"
306 |
307 | | filename | description |
308 | |-----------------|-------------|
309 | |preprocess.json | Config of preprocess for AI inference|
310 | |aiinference.json | Config of AI inference (facedetect\|refinedet\|ssd) |
311 | |drawresult.json | Config of boundbox drawing |
312 |
313 | * Jupyter notebook file: => /opt/xilinx/share/notebooks/smartcam
314 |
315 | | filename | description |
316 | |-----------------|-------------|
317 | |smartcam.ipynb | Jupyter notebook file for MIPI --> DP/RTSP demo.|
318 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Development Guide:
3 |
4 | If you want to cross compile the source in Linux PC machine, follow these steps, otherwise skip this section.
5 |
6 | 1. Refer to the `K260 SOM Starter Kit Tutorial` to build the cross-compilation SDK, and install it to the path you choose or default. Suppose it's SDKPATH.
7 |
8 | 2. Run "./build.sh ${SDKPATH}" in the source code folder of current application, to build the application.
9 |
10 | 3. The build process in [2](#build-app). will produce a rpm package smartcam-1.0.1-1.aarch64.rpm under build/, upload to the board, and run `rpm -ivh --force ./smartcam-1.0.1-1.aarch64.rpm` to update install.
11 |
12 | # Setting up the Board
13 |
14 | 1. Get the SD Card Image from [Boot Image Site](http://xilinx.com/) and follow the instructions in UG1089 to burn the SD card. And install the SD card to J11.
15 |
16 | 2. Hardware Setup:
17 |
18 | * Monitor:
19 |
20 | Before booting, connect a 1080P/4K monitor to the board via either DP or HDMI port.
21 |
22 | 4K monitor is preferred to demonstrate at the maximum supported resolution.
23 |
24 | * IAS sensor:
25 |
26 | Before power on, install an AR1335 sensor module in J7.
27 |
28 | * UART/JTAG interface:
29 |
30 | For interacting and seeing boot-time information, connect a USB debugger to the J4.
31 |
32 | * You may also use a USB webcam as an input device.
33 |
34 | The webcam is optional video input device supported in the application.
35 |
36 | Recommended webcam is the [Logitech BRIO](https://www.logitech.com/en-in/products/webcams/brio-4k-hdr-webcam.960-001105.html).
37 |
38 | * Network connection:
39 |
40 | Connect the Ethernet cable to your local network with DHCP enabled to install packages and run Jupyter Notebooks
41 |
42 | * Audio Pmod setup as RTSP audio input:
43 |
44 | Audio Pmod is optional audio input and output device.
45 |
46 | In smartcam application only RTSP mode [uses the audio input function](#rtsp-audio) to capture audio. Audio is then sent together with the images as RTSP stream and can be received at the client side.
47 |
48 | To set it up, first install the Pmod to J2, then connect a microphone or any other sound input device to the [line input port](https://store.digilentinc.com/pmod-i2s2-stereo-audio-input-and-output/). A headphone with a microphone will not work - device needs to be a dedicated input.
49 |
50 | Smartcam application does not yet support speakers.
51 |
52 | 3. Software Preparation:
53 |
54 | You will use a PC having network access to the board as the RTSP client machine.
55 |
56 | Make sure that the PC and the KV260 Vision AI Starter Kit are on the same subnet mask.
57 |
58 | On the client machine, to receive and play the RTSP stream, we recommend to install FFplay which is part of FFmpeg package.
59 |
60 | For Linux, you can install FFmpeg with the package manager of your distribution.
61 |
62 | For Windows, you can find install instructions on https://ffmpeg.org/download.html
63 |
64 | Other than FFplay, VLC can also be used to play RTSP stream, but we find sometimes it doesn't work on some client machine, while the FFplay works well.
65 |
66 | 4. Power on the board, login with username `petalinux`, and you need to setup the password for the first time bootup.
67 |
68 | 5. Get the latest application package.
69 |
70 | 1. Get the list of available packages in the feed.
71 |
72 | `sudo xmutil getpkgs`
73 |
74 | 2. Install the package with dnf install:
75 |
76 | `sudo dnf install packagegroup-kv260-smartcam.noarch`
77 |
78 | Note: For setups without access to the internet, it is possible to download and use the packages locally. Please refer to the `K260 SOM Starter Kit Tutorial` for instructions.
79 |
80 | 6. Dynamically load the application package.
81 |
82 | The firmware consist of bitstream, device tree overlay (dtbo) and xclbin file. The firmware is loaded dynamically on user request once Linux is fully booted. The xmutil utility can be used for that purpose.
83 |
84 | 1. Show the list and status of available acceleration platforms and AI Applications:
85 |
86 | `sudo xmutil listapps`
87 |
88 | 2. Switch to a different platform for different AI Application:
89 |
90 | * When xmutil listapps shows that there's no active accelerator, just activate the one you want to use.
91 |
92 | `sudo xmutil loadapp kv260-smartcam`
93 |
94 | * When there's already an accelerator being activated, unload it first, then switch to the one you want.
95 |
96 | `sudo xmutil unloadapp `
97 |
98 | `sudo xmutil loadapp kv260-smartcam`
99 |
100 | 7. Getting demo video files suitable for the application:
101 |
102 | To be able to demostrate the function of the application in case you have no MIPI and USB camera in hand, we support the file video source too.
103 |
104 | You can download video files from the following links, which is of MP4 format.
105 |
106 | * Facedet / RefineDet AI Task:
107 | * https://pixabay.com/videos/alley-people-walk-street-ukraine-39837/
108 | * ADAS SSD AI Task:
109 | * https://pixabay.com/videos/freeway-traffic-cars-rainy-truck-8358/
110 |
111 | Then you need to transcode it to H264 file which is one supported input format.
112 |
113 | > ffmpeg -i input-video.mp4 -c:v libx264 -pix_fmt nv12 -r 30 output.nv12.h264
114 |
115 | Finally, please upload or copy these transcoded H264 files to the board (by using scp, ftp, or copy onto SD card and finding them in /media/sd-mmcblk0p1/), place it to somewhere under /home/petalinux, which is the home directory of the user you login as.
116 |
117 | # Run the Application
118 |
119 | There are two ways to interact with the application.
120 |
121 | ## Juypter notebook.
122 |
123 | Use a web-browser (e.g. Chrome, Firefox) to interact with the platform.
124 |
125 | The Jupyter notebook URL can be find with command:
126 |
127 | > sudo jupyter notebook list
128 |
129 | Output example:
130 |
131 | > Currently running servers:
132 | >
133 | > `http://ip:port/?token=xxxxxxxxxxxxxxxxxx` :: /opt/xilinx/share/notebooks
134 |
135 | ## Command line
136 | These allow the user to define different video input and output device targets using the "smartcam" application. These are to be executed using the UART/debug interface.
137 |
138 | **Notice** The application need to be ran with ***sudo*** .
139 |
140 | ### Example scripts
141 |
142 | Example scripts and options definitions are provided below.
143 |
144 | Refer to [File Structure](#script-loc) to find the files' location.
145 |
146 |
147 | Click here to view the example script usage
148 | * MIPI RTSP server:
149 |
150 | 1. Invoking `"sudo 01.mipi-rtsp.sh"` will start rtsp server for mipi captured images.
151 |
152 | 2. Script accepts ${width} ${height} as the 1st and 2nd parameter, the default is 1920 x 1080.
153 |
154 | 3. Running the script will give message in the form:
155 |
156 | > stream ready at:
157 | >
158 | > rtsp://boardip:port/test
159 |
160 | Run "ffplay rtsp://boardip:port/test" on the client PC to receive the rtsp stream.
161 |
162 | 4. Checking:
163 |
164 | You should be able to see the images the camera is capturing on the ffplay window, and when there's face captured by camera, there should be blue box drawn around the face, and the box should follow the movement of the face.
165 |
166 | * MIPI DP display:
167 |
168 | 1. Make sure the monitor is connected as [here](#Setting-up-the-Board).
169 |
170 | 2. Invoking `"sudo 02.mipi-dp.sh"` will play the captured video with detection results on monitor.
171 |
172 | 3. Script accepts ${width} ${height} as the 1st and 2nd parameter, the default is 1920 x 1080.
173 |
174 | 4. Checking:
175 |
176 | You should be able to see the images the camera is capturing on the monitor connected to the board, and when there's face captured by the camera, there should be blue box drawn around the face, and the box should follow the movement of the face.
177 |
178 | * File to File
179 |
180 | 1. Invoking `"sudo 03.file-to-file.sh"`
181 |
182 | Take the first argument passed to this script as the path to the video file (you can use the demo video for face detection, or similar videos), perform face detection and generate video with detection bbox, save as `./out.h264`
183 |
184 | 2. Checking:
185 |
186 | Play the input video file and generated video file "./out.h264" with any media player you prefer, e.g. VLC, FFPlay. You should be able to see in the output video file, there are blue boxes around the faces of people, and the boxes should follow the movement of the faces, while there're no such boxes with the input video file.
187 |
188 | * File to DP
189 |
190 | 1. Invoking `"sudo 04.file-ssd-dp.sh"`
191 |
192 | Take the first argument passed to this script as the path to the video file (you can use the demo video for ADAS SSD, or similar videos), perform vehicles detection and generate video with detection bbox, and display onto monitor
193 |
194 | 2. Checking:
195 |
196 | You should be able to see a video of highway driving, with the detection of vehicles in a bounding box.
197 |
198 |
199 | ### Additional configuration options for smartcam invocation:
200 |
201 | The example scripts and Jupyter notebook work as examples to show the capability of the smartcam for specific configurations. More combinations could be made based on the options provided by smartcam. User can get detailed application options as following by invoking `smartcam --help`.
202 |
203 | #### Usage:
204 |
205 | ```
206 | smartcam [OPTION?] - Application for face detection on SOM board of Xilinx.
207 |
208 | Help Options:
209 |
210 | -h, --help Show help options
211 |
212 | --help-all Show all help options
213 |
214 | --help-gst Show GStreamer Options
215 |
216 |
217 | Application Options:
218 |
219 | -m, --mipi= use MIPI camera as input source, auto detect, fail if no mipi available.
220 |
221 | -u, --usb=media_ID usb camera video device id, e.g. 2 for /dev/video2
222 |
223 | -f, --file=file path location of h26x file as input
224 |
225 | -i, --infile-type=h264 input file type: [h264 | h265]
226 |
227 | -W, --width=1920 resolution w of the input
228 |
229 | -H, --height=1080 resolution h of the input
230 |
231 | -r, --framerate=30 framerate of the input
232 |
233 | -t, --target=dp [dp|rtsp|file]
234 |
235 | -o, --outmedia-type=h264 output file type: [h264 | h265]
236 |
237 | -p, --port=554 Port to listen on (default: 554)
238 |
239 | -a, --aitask select AI task to be run: [facedetect|ssd|refinedet]
240 |
241 | -n, --nodet no AI inference
242 |
243 | -A, --audio RTSP with I2S audio input
244 |
245 | -R, --report report fps
246 |
247 | -s, --screenfps display fps on screen, notic this will cause perfermance degradation.
248 |
249 | --ROI-off turn off ROI (Region-of-Interest)
250 | ```
251 |
252 |
253 | #### Examples of supported combinations sorted by input are outlined below.
254 | If using the command line to invoke the smartcam, stop the process via CTRL-C prior to starting the next instance.
255 |
256 | * MIPI Input (IAS sensor input):
257 |
258 | * output: RTSP
259 |
260 | `sudo smartcam --mipi -W 1920 -H 1080 --target rtsp`
261 |
262 | * output: RTSP with audio
263 |
264 | `sudo smartcam --mipi -W 1920 -H 1080 --target rtsp --audio`
265 |
266 | * output: DP
267 |
268 | `sudo smartcam --mipi -W 1920 -H 1080 --target dp`
269 |
270 | * output: file
271 |
272 | `sudo smartcam --mipi -W 1920 -H 1080 --target file `
273 |
274 | * input file (file on file system):
275 |
276 | **Note** You must update the command to the specific file desired as the input source.
277 |
278 | * output: RTSP
279 |
280 | `sudo smartcam --file ./test.h264 -i h264 -W 1920 -H 1080 -r 30 --target rtsp `
281 |
282 | * output: DP
283 |
284 | `sudo smartcam --file ./test.h264 -i h264 -W 1920 -H 1080 -r 30 --target dp`
285 |
286 | * output: file
287 |
288 | `sudo smartcam --file ./test.h264 -i h264 -W 1920 -H 1080 -r 30 --target file`
289 |
290 | * input USB (USB webcam):
291 |
292 | **Note** You must ensure the width/height/framerate defined are supported by your USB camera.
293 |
294 | * output: RTSP
295 |
296 | `sudo smartcam --usb 1 -W 1920 -H 1080 -r 30 --target rtsp`
297 |
298 | * output: DP
299 |
300 | `sudo smartcam --usb 1 -W 1920 -H 1080 -r 30 --target dp`
301 |
302 | * output: file
303 |
304 | `sudo smartcam --usb 1 -W 1920 -H 1080 -r 30 --target file`
305 |
306 | # Files structure of the application
307 |
308 | * The application is installed as:
309 |
310 | * Binary File Directory: /opt/xilinx/bin
311 |
312 | | filename | description |
313 | |----------|-------------|
314 | |smartcam| main app|
315 |
316 | * Script File Directory: /opt/xilinx/bin/
317 |
318 | | filename | description |
319 | |------------------|-------------|
320 | |`01.mipi-rtsp.sh` | call smartcam to run facedetction and send out rtsp stream.|
321 | |`02.mipi-dp.sh` | call smartcam to run facedetction and display on DP display.|
322 | |`03.file-file.sh` | call smartcam to run facedetction and display on input h264/5 file and generate output h264/5 with detection boxes.|
323 |
324 | * Configuration File Directory: /opt/xilinx/share/ivas/smartcam/${AITASK}
325 |
326 | AITASK = "facedetect" | "refinedet" | "ssd"
327 |
328 | | filename | description |
329 | |----------|-------------|
330 | |preprocess.json| Config of preprocess for AI inference|
331 | |aiinference.json| Config of AI inference (facedetect\|refinedet\|ssd) |
332 | |drawresult.json| Config of boundbox drawing |
333 |
334 | * Jupyter notebook file: => /opt/xilinx/share/notebooks/smartcam
335 |
336 | | filename | description |
337 | |----------|-------------|
338 | |smartcam.ipynb | Jupyter notebook file for MIPI --> DP/RTSP demo.|
339 |
340 |
Copyright© 2021 Xilinx
341 |
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | #! /bin/sh
2 |
3 | sdkdir=${1}
4 | conf=${2:-Release}
5 |
6 | unset LD_LIBRARY_PATH;
7 | source ${sdkdir}/environment-setup-*;
8 | mkdir -p build
9 | cd build
10 | cmake -DCMAKE_BUILD_TYPE=${conf} -DCMAKE_TOOLCHAIN_FILE=${sdkdir}/sysroots/x86_64-petalinux-linux/usr/share/cmake/OEToolchainConfig.cmake ../ && make -j && make package
11 | cd ..
12 |
--------------------------------------------------------------------------------
/cmake/FindGStreamer.cmake:
--------------------------------------------------------------------------------
1 | # - Try to find GStreamer and its plugins
2 | # Once done, this will define
3 | #
4 | # GSTREAMER_FOUND - system has GStreamer
5 | # GSTREAMER_INCLUDE_DIRS - the GStreamer include directories
6 | # GSTREAMER_LIBRARIES - link these to use GStreamer
7 | #
8 | # Additionally, gstreamer-base is always looked for and required, and
9 | # the following related variables are defined:
10 | #
11 | # GSTREAMER_BASE_INCLUDE_DIRS - gstreamer-base's include directory
12 | # GSTREAMER_BASE_LIBRARIES - link to these to use gstreamer-base
13 | #
14 | # Optionally, the COMPONENTS keyword can be passed to find_package()
15 | # and GStreamer plugins can be looked for. Currently, the following
16 | # plugins can be searched, and they define the following variables if
17 | # found:
18 | #
19 | # gstreamer-app: GSTREAMER_APP_INCLUDE_DIRS and GSTREAMER_APP_LIBRARIES
20 | # gstreamer-audio: GSTREAMER_AUDIO_INCLUDE_DIRS and GSTREAMER_AUDIO_LIBRARIES
21 | # gstreamer-fft: GSTREAMER_FFT_INCLUDE_DIRS and GSTREAMER_FFT_LIBRARIES
22 | # gstreamer-pbutils: GSTREAMER_PBUTILS_INCLUDE_DIRS and GSTREAMER_PBUTILS_LIBRARIES
23 | # gstreamer-video: GSTREAMER_VIDEO_INCLUDE_DIRS and GSTREAMER_VIDEO_LIBRARIES
24 | #
25 | # Copyright (C) 2012 Raphael Kubo da Costa
26 | #
27 | # Redistribution and use in source and binary forms, with or without
28 | # modification, are permitted provided that the following conditions
29 | # are met:
30 | # 1. Redistributions of source code must retain the above copyright
31 | # notice, this list of conditions and the following disclaimer.
32 | # 2. Redistributions in binary form must reproduce the above copyright
33 | # notice, this list of conditions and the following disclaimer in the
34 | # documentation and/or other materials provided with the distribution.
35 | #
36 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND ITS CONTRIBUTORS ``AS
37 | # IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
38 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
39 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ITS
40 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
41 | # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
42 | # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
43 | # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
44 | # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
45 | # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
46 | # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 |
48 | find_package(PkgConfig)
49 |
50 | # Helper macro to find a GStreamer plugin (or GStreamer itself)
51 | # _component_prefix is prepended to the _INCLUDE_DIRS and _LIBRARIES variables (eg. "GSTREAMER_AUDIO")
52 | # _pkgconfig_name is the component's pkg-config name (eg. "gstreamer-1.0", or "gstreamer-video-1.0").
53 | # _header is the component's header, relative to the gstreamer-1.0 directory (eg. "gst/gst.h").
54 | # _library is the component's library name (eg. "gstreamer-1.0" or "gstvideo-1.0")
55 | macro(FIND_GSTREAMER_COMPONENT _component_prefix _pkgconfig_name _header _library)
56 | pkg_check_modules(${_component_prefix} QUIET ${_pkgconfig_name})
57 | endmacro()
58 |
59 | # ------------------------
60 | # 1. Find GStreamer itself
61 | # ------------------------
62 |
63 | # 1.1. Find headers and libraries
64 | FIND_GSTREAMER_COMPONENT(GSTREAMER gstreamer-1.0 gst/gst.h gstreamer-1.0)
65 | FIND_GSTREAMER_COMPONENT(GSTREAMER_BASE gstreamer-base-1.0 gst/gst.h gstbase-1.0)
66 |
67 | # 1.2. Check GStreamer version
68 | if (GSTREAMER_INCLUDE_DIRS)
69 | if (EXISTS "${GSTREAMER_INCLUDE_DIRS}/gst/gstversion.h")
70 | file(READ "${GSTREAMER_INCLUDE_DIRS}/gst/gstversion.h" GSTREAMER_VERSION_CONTENTS)
71 |
72 | string(REGEX MATCH "#define +GST_VERSION_MAJOR +\\(([0-9]+)\\)" _dummy "${GSTREAMER_VERSION_CONTENTS}")
73 | set(GSTREAMER_VERSION_MAJOR "${CMAKE_MATCH_1}")
74 |
75 | string(REGEX MATCH "#define +GST_VERSION_MINOR +\\(([0-9]+)\\)" _dummy "${GSTREAMER_VERSION_CONTENTS}")
76 | set(GSTREAMER_VERSION_MINOR "${CMAKE_MATCH_1}")
77 |
78 | string(REGEX MATCH "#define +GST_VERSION_MICRO +\\(([0-9]+)\\)" _dummy "${GSTREAMER_VERSION_CONTENTS}")
79 | set(GSTREAMER_VERSION_MICRO "${CMAKE_MATCH_1}")
80 |
81 | set(GSTREAMER_VERSION "${GSTREAMER_VERSION_MAJOR}.${GSTREAMER_VERSION_MINOR}.${GSTREAMER_VERSION_MICRO}")
82 | endif ()
83 | endif ()
84 |
85 | if ("${GStreamer_FIND_VERSION}" VERSION_GREATER "${GSTREAMER_VERSION}")
86 | message(FATAL_ERROR "Required version (" ${GStreamer_FIND_VERSION} ") is higher than found version (" ${GSTREAMER_VERSION} ")")
87 | endif()
88 |
89 | # -------------------------
90 | # 2. Find GStreamer plugins
91 | # -------------------------
92 |
93 | FIND_GSTREAMER_COMPONENT(GSTREAMER_APP gstreamer-app-1.0 gst/app/gstappsink.h gstapp-1.0)
94 | FIND_GSTREAMER_COMPONENT(GSTREAMER_AUDIO gstreamer-audio-1.0 gst/audio/audio.h gstaudio-1.0)
95 | FIND_GSTREAMER_COMPONENT(GSTREAMER_FFT gstreamer-fft-1.0 gst/fft/gstfft.h gstfft-1.0)
96 | FIND_GSTREAMER_COMPONENT(GSTREAMER_PBUTILS gstreamer-pbutils-1.0 gst/pbutils/pbutils.h gstpbutils-1.0)
97 | FIND_GSTREAMER_COMPONENT(GSTREAMER_VIDEO gstreamer-video-1.0 gst/video/video.h gstvideo-1.0)
98 |
99 | # ------------------------------------------------
100 | # 3. Process the COMPONENTS passed to FIND_PACKAGE
101 | # ------------------------------------------------
102 | set(_GSTREAMER_REQUIRED_VARS GSTREAMER_INCLUDE_DIRS GSTREAMER_LIBRARIES GSTREAMER_VERSION GSTREAMER_BASE_INCLUDE_DIRS GSTREAMER_BASE_LIBRARIES)
103 |
104 | foreach (_component ${GStreamer_FIND_COMPONENTS})
105 | set(_gst_component "GSTREAMER_${_component}")
106 | string(TOUPPER ${_gst_component} _UPPER_NAME)
107 |
108 | list(APPEND _GSTREAMER_REQUIRED_VARS ${_UPPER_NAME}_INCLUDE_DIRS ${_UPPER_NAME}_LIBRARIES)
109 | endforeach ()
110 |
111 | include(FindPackageHandleStandardArgs)
112 | FIND_PACKAGE_HANDLE_STANDARD_ARGS(GStreamer REQUIRED_VARS ${_GSTREAMER_REQUIRED_VARS}
113 | VERSION_VAR GSTREAMER_VERSION)
114 |
115 | mark_as_advanced(
116 | GSTREAMER_APP_INCLUDE_DIRS
117 | GSTREAMER_APP_LIBRARIES
118 | GSTREAMER_AUDIO_INCLUDE_DIRS
119 | GSTREAMER_AUDIO_LIBRARIES
120 | GSTREAMER_BASE_INCLUDE_DIRS
121 | GSTREAMER_BASE_LIBRARIES
122 | GSTREAMER_FFT_INCLUDE_DIRS
123 | GSTREAMER_FFT_LIBRARIES
124 | GSTREAMER_INCLUDE_DIRS
125 | GSTREAMER_LIBRARIES
126 | GSTREAMER_PBUTILS_INCLUDE_DIRS
127 | GSTREAMER_PBUTILS_LIBRARIES
128 | GSTREAMER_VIDEO_INCLUDE_DIRS
129 | GSTREAMER_VIDEO_LIBRARIES
130 | )
131 |
--------------------------------------------------------------------------------
/config/facedetect/aiinference.json:
--------------------------------------------------------------------------------
1 | {
2 | "xclbin-location":"/lib/firmware/xilinx/kv260-smartcam/kv260-smartcam.xclbin",
3 | "ivas-library-repo": "/usr/lib/",
4 | "element-mode":"inplace",
5 | "kernels" :[
6 | {
7 | "library-name":"libivas_xdpuinfer.so",
8 | "config": {
9 | "model-name" : "densebox_640_360",
10 | "model-class" : "FACEDETECT",
11 | "model-path" : "/opt/xilinx/share/vitis_ai_library/models/kv260-smartcam",
12 | "run_time_model" : false,
13 | "need_preprocess" : false,
14 | "performance_test" : false,
15 | "debug_level" : 0
16 | }
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/config/facedetect/drawresult.json:
--------------------------------------------------------------------------------
1 | {
2 | "xclbin-location":"/usr/lib/dpu.xclbin",
3 | "ivas-library-repo": "/opt/xilinx/lib",
4 | "element-mode":"inplace",
5 | "kernels" :[
6 | {
7 | "library-name":"libivas_airender.so",
8 | "config": {
9 | "fps_interval" : 10,
10 | "font_size" : 2,
11 | "font" : 3,
12 | "thickness" : 2,
13 | "debug_level" : 0,
14 | "label_color" : { "blue" : 0, "green" : 0, "red" : 255 },
15 | "label_filter" : [ "class", "probability" ],
16 | "classes" : [
17 | ]
18 | }
19 | }
20 | ]
21 | }
22 |
--------------------------------------------------------------------------------
/config/facedetect/preprocess.json:
--------------------------------------------------------------------------------
1 | {
2 | "xclbin-location":"/lib/firmware/xilinx/kv260-smartcam/kv260-smartcam.xclbin",
3 | "ivas-library-repo": "/opt/xilinx/lib",
4 | "kernels": [
5 | {
6 | "kernel-name": "pp_pipeline_accel:pp_pipeline_accel_1",
7 | "library-name": "libivas_xpp.so",
8 | "config": {
9 | "debug_level" : 1,
10 | "mean_r": 128,
11 | "mean_g": 128,
12 | "mean_b": 128,
13 | "scale_r": 1,
14 | "scale_g": 1,
15 | "scale_b": 1
16 | }
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/config/refinedet/aiinference.json:
--------------------------------------------------------------------------------
1 | {
2 | "xclbin-location":"/lib/firmware/xilinx/kv260-smartcam/kv260-smartcam.xclbin",
3 | "ivas-library-repo": "/usr/lib/",
4 | "element-mode":"inplace",
5 | "kernels" :[
6 | {
7 | "library-name":"libivas_xdpuinfer.so",
8 | "config": {
9 | "model-name" : "refinedet_pruned_0_96",
10 | "model-class" : "REFINEDET",
11 | "model-path" : "/opt/xilinx/share/vitis_ai_library/models/kv260-smartcam",
12 | "run_time_model" : false,
13 | "need_preprocess" : false,
14 | "performance_test" : false,
15 | "debug_level" : 0
16 | }
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/config/refinedet/drawresult.json:
--------------------------------------------------------------------------------
1 | {
2 | "xclbin-location":"/usr/lib/dpu.xclbin",
3 | "ivas-library-repo": "/opt/xilinx/lib",
4 | "element-mode":"inplace",
5 | "kernels" :[
6 | {
7 | "library-name":"libivas_airender.so",
8 | "config": {
9 | "fps_interval" : 10,
10 | "font_size" : 2,
11 | "font" : 3,
12 | "thickness" : 2,
13 | "debug_level" : 0,
14 | "label_color" : { "blue" : 0, "green" : 0, "red" : 255 },
15 | "label_filter" : [ "class", "probability" ],
16 | "classes" : [
17 | ]
18 | }
19 | }
20 | ]
21 | }
22 |
--------------------------------------------------------------------------------
/config/refinedet/preprocess.json:
--------------------------------------------------------------------------------
1 | {
2 | "xclbin-location":"/lib/firmware/xilinx/kv260-smartcam/kv260-smartcam.xclbin",
3 | "ivas-library-repo": "/opt/xilinx/lib",
4 | "kernels": [
5 | {
6 | "kernel-name": "pp_pipeline_accel:pp_pipeline_accel_1",
7 | "library-name": "libivas_xpp.so",
8 | "config": {
9 | "debug_level" : 1,
10 | "mean_r": 123,
11 | "mean_g": 117,
12 | "mean_b": 104,
13 | "scale_r": 1,
14 | "scale_g": 1,
15 | "scale_b": 1
16 | }
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/config/ssd/aiinference.json:
--------------------------------------------------------------------------------
1 | {
2 | "xclbin-location":"/lib/firmware/xilinx/kv260-smartcam/kv260-smartcam.xclbin",
3 | "ivas-library-repo": "/usr/lib/",
4 | "element-mode":"inplace",
5 | "kernels" :[
6 | {
7 | "library-name":"libivas_xdpuinfer.so",
8 | "config": {
9 | "model-name" : "ssd_adas_pruned_0_95",
10 | "model-class" : "SSD",
11 | "model-path" : "/opt/xilinx/share/vitis_ai_library/models/kv260-smartcam",
12 | "run_time_model" : false,
13 | "need_preprocess" : false,
14 | "performance_test" : false,
15 | "debug_level" : 0
16 | }
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/config/ssd/drawresult.json:
--------------------------------------------------------------------------------
1 | {
2 | "xclbin-location":"/usr/lib/dpu.xclbin",
3 | "ivas-library-repo": "/opt/xilinx/lib",
4 | "element-mode":"inplace",
5 | "kernels" :[
6 | {
7 | "library-name":"libivas_airender.so",
8 | "config": {
9 | "fps_interval" : 10,
10 | "font_size" : 2,
11 | "font" : 3,
12 | "thickness" : 2,
13 | "debug_level" : 0,
14 | "label_color" : { "blue" : 0, "green" : 0, "red" : 255 },
15 | "label_filter" : [ "class", "probability" ],
16 | "classes" : [
17 | {
18 | "name" : "car",
19 | "blue" : 255,
20 | "green" : 0,
21 | "red" : 0
22 | },
23 | {
24 | "name" : "person",
25 | "blue" : 0,
26 | "green" : 255,
27 | "red" : 0
28 | },
29 | {
30 | "name" : "bicycle",
31 | "blue" : 0,
32 | "green" : 0,
33 | "red" : 255
34 | }]
35 | }
36 | }
37 | ]
38 | }
39 |
--------------------------------------------------------------------------------
/config/ssd/label.json:
--------------------------------------------------------------------------------
1 | {
2 | "model-name":"ssd_adas_pruned_0_95",
3 | "num-labels": 4,
4 | "labels": [
5 | {
6 | "label": 0,
7 | "name": "background",
8 | "display_name": "background"
9 | },
10 | {
11 | "label": 1,
12 | "name": "car",
13 | "display_name": "car"
14 | },
15 | {
16 | "label": 2,
17 | "name": "bicycle",
18 | "display_name": "bicycle"
19 | },
20 | {
21 | "label": 3,
22 | "name": "person",
23 | "display_name": "person"
24 | }
25 | ]
26 | }
27 |
--------------------------------------------------------------------------------
/config/ssd/preprocess.json:
--------------------------------------------------------------------------------
1 | {
2 | "xclbin-location":"/lib/firmware/xilinx/kv260-smartcam/kv260-smartcam.xclbin",
3 | "ivas-library-repo": "/opt/xilinx/lib",
4 | "kernels": [
5 | {
6 | "kernel-name": "pp_pipeline_accel:pp_pipeline_accel_1",
7 | "library-name": "libivas_xpp.so",
8 | "config": {
9 | "debug_level" : 1,
10 | "mean_r": 123,
11 | "mean_g": 117,
12 | "mean_b": 104,
13 | "scale_r": 1,
14 | "scale_g": 1,
15 | "scale_b": 1
16 | }
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/notebook/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2021 Xilinx Inc.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/notebook/images/xilinx_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xilinx/smartcam/f76f59951f36bc89cb35f081accdd5c380a4ca9b/notebook/images/xilinx_logo.png
--------------------------------------------------------------------------------
/notebook/smartcam.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | ""
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "# 1. Introduction:\n",
15 | "\n",
16 | "This notebook demonstrates how to captures images from the MIPI devices, then performs face detection or refinedet or SSD inference on it with DPU, and send the video frames with bounding boxes of detected results either to DP to display or stream it out as an RTSP server.\n",
17 | "\n",
18 | "The application is based on the VVAS (Vitis Video Analytics SDK) framework, also utilizing the open source GStreamer plugins.\n",
19 | "\n",
20 | "Vitis Video Analytics SDK (VVAS) is developed by Xilinx to provide many useful GStreamer plugins as the middleware between the application and underlying FPGA acclerators, including DPU AI inference engine, and other PL accelerators such as the one for AI input preprocessing.\n",
21 | "\n",
22 | "Please refer to the [Kria™ KV260 Vision AI Starter Kit Applications GitHub Pages](https://xilinx.github.io/kria-apps-docs/index.html) for detailed HW/SW architecture and [Vitis Video Analytics SDK GitHub Pages](https://xilinx.github.io/VVAS/#) for the VVAS related info."
23 | ]
24 | },
25 | {
26 | "cell_type": "markdown",
27 | "metadata": {},
28 | "source": [
29 | "# 2. Some User Options:"
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "metadata": {},
35 | "source": [
36 | "* Option to choose the AI model to run"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "aitask=\"facedetect\" # \"refinedet\" "
46 | ]
47 | },
48 | {
49 | "cell_type": "markdown",
50 | "metadata": {},
51 | "source": [
52 | "* Option to set input type"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": null,
58 | "metadata": {},
59 | "outputs": [],
60 | "source": [
61 | "source = \"mipi\" # choose either 'mipi' or 'usb'"
62 | ]
63 | },
64 | {
65 | "cell_type": "markdown",
66 | "metadata": {},
67 | "source": [
68 | "* Option to choose DP / RTSP output"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": null,
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "DP_output=True # True to choose DP output, False to choose RTSP output"
78 | ]
79 | },
80 | {
81 | "cell_type": "markdown",
82 | "metadata": {},
83 | "source": [
84 | "# 3. Imports and Initialization"
85 | ]
86 | },
87 | {
88 | "cell_type": "markdown",
89 | "metadata": {},
90 | "source": [
91 | "## Preapare Data to Visualize the Pipeline"
92 | ]
93 | },
94 | {
95 | "cell_type": "markdown",
96 | "metadata": {},
97 | "source": [
98 | "* Create a directory for saving the pipeline graph as dot file. Set the GStreamer debug dot directory environment variable to point to that directory"
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": null,
104 | "metadata": {},
105 | "outputs": [],
106 | "source": [
107 | "nb=\"smartcam\"\n",
108 | "dotdir = \"/tmp/gst-dot/\" + nb + \"/\"\n",
109 | "!mkdir -p $dotdir\n",
110 | "%env GST_DEBUG_DUMP_DOT_DIR = $dotdir"
111 | ]
112 | },
113 | {
114 | "cell_type": "markdown",
115 | "metadata": {},
116 | "source": [
117 | "## Import all python modules required for this notebook."
118 | ]
119 | },
120 | {
121 | "cell_type": "markdown",
122 | "metadata": {},
123 | "source": [
124 | "* Import system, util modules"
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": null,
130 | "metadata": {},
131 | "outputs": [],
132 | "source": [
133 | "import sys\n",
134 | "import glob\n",
135 | "import subprocess\n",
136 | "import re\n",
137 | "import os"
138 | ]
139 | },
140 | {
141 | "cell_type": "markdown",
142 | "metadata": {},
143 | "source": [
144 | "* Add some util path"
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "execution_count": null,
150 | "metadata": {},
151 | "outputs": [],
152 | "source": [
153 | "pathv=\"{}:/usr/sbin:/sbin\".format(os.getenv(\"PATH\"))\n",
154 | "%env PATH = $pathv"
155 | ]
156 | },
157 | {
158 | "cell_type": "markdown",
159 | "metadata": {},
160 | "source": [
161 | "* GStreamer related library import."
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "execution_count": null,
167 | "metadata": {},
168 | "outputs": [],
169 | "source": [
170 | "import gi\n",
171 | "gi.require_version('Gst', '1.0')\n",
172 | "gi.require_version(\"GstApp\", \"1.0\")\n",
173 | "gi.require_version('GstVideo', '1.0')\n",
174 | "gi.require_version('GstRtspServer', '1.0')\n",
175 | "gi.require_version('GIRepository', '2.0')\n",
176 | "from gi.repository import GObject, GLib, Gst, GstVideo, GstRtspServer, GLib, GIRepository"
177 | ]
178 | },
179 | {
180 | "cell_type": "markdown",
181 | "metadata": {},
182 | "source": [
183 | "* Initialize."
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": null,
189 | "metadata": {},
190 | "outputs": [],
191 | "source": [
192 | "Gst.init(None)\n",
193 | "#Gst.debug_set_threshold_from_string('*:1', True)"
194 | ]
195 | },
196 | {
197 | "cell_type": "markdown",
198 | "metadata": {},
199 | "source": [
200 | "# 4. Construct the String Representation of GStreamer Pipeline"
201 | ]
202 | },
203 | {
204 | "cell_type": "markdown",
205 | "metadata": {},
206 | "source": [
207 | "## Function ***get_media_by_device*** \n",
208 | "\n",
209 | "This function returns the matching media node for a given video capture source.\n",
210 | "\n",
211 | "The following sources are supported in this notebook:\n",
212 | "\n",
213 | "* usb : requires USB webcam supporting 1080p output, we recommend the [Logitech BRIO](https://www.logitech.com/en-in/products/webcams/brio-4k-hdr-webcam.960-001105.html).\n",
214 | "* mipi : platform1 only"
215 | ]
216 | },
217 | {
218 | "cell_type": "code",
219 | "execution_count": null,
220 | "metadata": {},
221 | "outputs": [],
222 | "source": [
223 | "def get_media_dev_by_name(src):\n",
224 | " sources = {\n",
225 | " \"usb\" : 'uvcvideo',\n",
226 | " 'mipi' : 'vcap_csi',\n",
227 | " }\n",
228 | " devices = glob.glob('/dev/media*')\n",
229 | " for dev in devices:\n",
230 | " proc = subprocess.run(['media-ctl', '-d', dev, '-p'], capture_output=True, encoding='utf8')\n",
231 | " for line in proc.stdout.splitlines():\n",
232 | " if sources[src] in line:\n",
233 | " return dev"
234 | ]
235 | },
236 | {
237 | "cell_type": "code",
238 | "execution_count": null,
239 | "metadata": {},
240 | "outputs": [],
241 | "source": [
242 | "def get_video_dev_of_mediadev(src):\n",
243 | " proc = subprocess.Popen(['media-ctl', '-d', src, '-p'], stdout=subprocess.PIPE)\n",
244 | " output = subprocess.check_output(('awk', '/^driver\\s*uvcvideo/ {u=1} /device node name *\\/dev\\/video/ {x=$4;f=1;next} u&&f&&/pad0: Sink/ {print x; x=\"\"} f {f=0}'), stdin=proc.stdout).decode('utf8').splitlines()\n",
245 | " if len(output) > 1:\n",
246 | " return output[0]\n",
247 | " "
248 | ]
249 | },
250 | {
251 | "cell_type": "markdown",
252 | "metadata": {},
253 | "source": [
254 | "## Get the mediasrc\n",
255 | "\n",
256 | "* Get the mediasrc index by calling get_media_dev_by_name()"
257 | ]
258 | },
259 | {
260 | "cell_type": "code",
261 | "execution_count": null,
262 | "metadata": {},
263 | "outputs": [],
264 | "source": [
265 | "media_device = get_media_dev_by_name(source) \n",
266 | "if media_device is None:\n",
267 | " raise Exception('Unable to find video source ' + source + '. Make sure the device is plugged in, powered, and the correct platform is used.')"
268 | ]
269 | },
270 | {
271 | "cell_type": "markdown",
272 | "metadata": {},
273 | "source": [
274 | "* mediasrcbin is the Xilinx developed plugin for media devices."
275 | ]
276 | },
277 | {
278 | "cell_type": "code",
279 | "execution_count": null,
280 | "metadata": {},
281 | "outputs": [],
282 | "source": [
283 | "if source == \"mipi\":\n",
284 | " src = \"mediasrcbin media-device=\" + media_device\n",
285 | " if DP_output:\n",
286 | " src += \" v4l2src0::io-mode=dmabuf v4l2src0::stride-align=256 \"\n",
287 | "elif source == \"usb\":\n",
288 | " usbmedia=media_device\n",
289 | " usbvideo=get_video_dev_of_mediadev(usbmedia)\n",
290 | " src = \"v4l2src name=videosrc device={usbvideo} io-mode=mmap stride-align=256 \".format(usbvideo=usbvideo)"
291 | ]
292 | },
293 | {
294 | "cell_type": "markdown",
295 | "metadata": {},
296 | "source": [
297 | "## Construct the real pipeline string."
298 | ]
299 | },
300 | {
301 | "cell_type": "markdown",
302 | "metadata": {},
303 | "source": [
304 | "### configuration directory for IVAS plugin"
305 | ]
306 | },
307 | {
308 | "cell_type": "code",
309 | "execution_count": null,
310 | "metadata": {},
311 | "outputs": [],
312 | "source": [
313 | "confdir=\"/opt/xilinx/share/ivas/smartcam/\"+aitask"
314 | ]
315 | },
316 | {
317 | "cell_type": "markdown",
318 | "metadata": {},
319 | "source": [
320 | "### Set the caps.\n",
321 | "User can change the resolution and framerate here.\n",
322 | "\n",
323 | "If videosrc cannot support format NV12, adjust the pipeline to fit with followning elements."
324 | ]
325 | },
326 | {
327 | "cell_type": "code",
328 | "execution_count": null,
329 | "metadata": {},
330 | "outputs": [],
331 | "source": [
332 | "if source==\"mipi\":\n",
333 | " pip=src + ' ! video/x-raw, width=1920, height=1080, format=NV12, framerate=30/1 '\n",
334 | "elif source==\"usb\":\n",
335 | " pip=src + ' ! video/x-raw, width=1920, height=1080 ! videoconvert ! video/x-raw, format=NV12 '"
336 | ]
337 | },
338 | {
339 | "cell_type": "markdown",
340 | "metadata": {},
341 | "source": [
342 | "### Add one branch to perform AI inference.\n",
343 | "\n",
344 | "* ivas_xmultisrc kconfig=\"{confdir}/preprocess.json\"\n",
345 | "\n",
346 | " This is for an element to do colorspace conversion from NV12 to BGR, scale to the size needed by DPU, and also perform the quantization as DPU model needed.\n",
347 | " \n",
348 | " In current project there's a dedicate PL ***accelerator pp_pipeline_accel:pp_pipeline_accel_1@0xa0020000*** will do this work with greater performance thatn software version.\n",
349 | "\n",
350 | " Detailed configuration please see the json file."
351 | ]
352 | },
353 | {
354 | "cell_type": "code",
355 | "execution_count": null,
356 | "metadata": {},
357 | "outputs": [],
358 | "source": [
359 | "pip += ' ! tee name=t ! queue \\\n",
360 | "! ivas_xmultisrc kconfig=\"{confdir}/preprocess.json\" '.format(confdir=confdir)"
361 | ]
362 | },
363 | {
364 | "cell_type": "markdown",
365 | "metadata": {},
366 | "source": [
367 | "* AI inference\n",
368 | "\n",
369 | " With the buffer being preprocess, AI inference plugin is linked to perform the AI tasks.\n",
370 | " \n",
371 | " The DPU AI inference hardware engine and Vitis AI library is behind the scenes."
372 | ]
373 | },
374 | {
375 | "cell_type": "code",
376 | "execution_count": null,
377 | "metadata": {},
378 | "outputs": [],
379 | "source": [
380 | "pip += ' ! queue ! ivas_xfilter kernels-config=\"{confdir}/aiinference.json\" '.format(confdir=confdir)"
381 | ]
382 | },
383 | {
384 | "cell_type": "markdown",
385 | "metadata": {},
386 | "source": [
387 | "* AI inference meta pass down\n",
388 | "\n",
389 | " AI inference results is passed to sink_master pad of ***ivas_xmetaaffixer***, which is an IVAS plugin which can scale the meta info, such as bbox info, based on the size ratio of the buffers of sink_slave to sink_master.\n",
390 | " \n",
391 | " For detailed usage please refer to IVAS docs."
392 | ]
393 | },
394 | {
395 | "cell_type": "code",
396 | "execution_count": null,
397 | "metadata": {},
398 | "outputs": [],
399 | "source": [
400 | "pip += ' ! ima.sink_master ivas_xmetaaffixer name=ima ima.src_master ! fakesink '"
401 | ]
402 | },
403 | {
404 | "cell_type": "markdown",
405 | "metadata": {},
406 | "source": [
407 | "### Another branch to accept the inference meta data, and drawing boundingbox."
408 | ]
409 | },
410 | {
411 | "cell_type": "markdown",
412 | "metadata": {},
413 | "source": [
414 | "* Accept and scale the original AI inference meta info\n",
415 | "\n",
416 | " As the previous step, the meta info is pass down to here, the original buffer from **t.** is linked to the sink_slave_0, and get the scaled meta at the corresponding src_slave_0."
417 | ]
418 | },
419 | {
420 | "cell_type": "code",
421 | "execution_count": null,
422 | "metadata": {},
423 | "outputs": [],
424 | "source": [
425 | "pip += ' t. ! queue max-size-buffers=1 leaky=2 ! ima.sink_slave_0 ima.src_slave_0 '"
426 | ]
427 | },
428 | {
429 | "cell_type": "markdown",
430 | "metadata": {},
431 | "source": [
432 | "* Draw bbox on the buffer"
433 | ]
434 | },
435 | {
436 | "cell_type": "code",
437 | "execution_count": null,
438 | "metadata": {},
439 | "outputs": [],
440 | "source": [
441 | "pip += ' ! queue ! ivas_xfilter kernels-config=\"{confdir}/drawresult.json\" '.format(confdir=confdir)"
442 | ]
443 | },
444 | {
445 | "cell_type": "markdown",
446 | "metadata": {},
447 | "source": [
448 | "# 5. Two types of Outputs."
449 | ]
450 | },
451 | {
452 | "cell_type": "markdown",
453 | "metadata": {},
454 | "source": [
455 | "## DP output"
456 | ]
457 | },
458 | {
459 | "cell_type": "code",
460 | "execution_count": null,
461 | "metadata": {},
462 | "outputs": [],
463 | "source": [
464 | "if DP_output:\n",
465 | " pip += ' ! queue ! kmssink driver-name=xlnx plane-id=39 sync=false fullscreen-overlay=true '\n",
466 | " pipe = Gst.parse_launch(pip)\n",
467 | " pipe.set_state(Gst.State.PLAYING)"
468 | ]
469 | },
470 | {
471 | "cell_type": "markdown",
472 | "metadata": {},
473 | "source": [
474 | "### View the GStreamer Pipeline Graph"
475 | ]
476 | },
477 | {
478 | "cell_type": "markdown",
479 | "metadata": {},
480 | "source": [
481 | "* Generate pipeline dot file."
482 | ]
483 | },
484 | {
485 | "cell_type": "code",
486 | "execution_count": null,
487 | "metadata": {},
488 | "outputs": [],
489 | "source": [
490 | "if DP_output:\n",
491 | " Gst.debug_bin_to_dot_file(pipe, Gst.DebugGraphDetails.ALL, nb)\n"
492 | ]
493 | },
494 | {
495 | "cell_type": "markdown",
496 | "metadata": {},
497 | "source": [
498 | "* Convert the dot file to png and display the pipeline graph.\n",
499 | "\n",
500 | " The image will be displayed bellow the following code cell.\n",
501 | "\n",
502 | " **Note**: This step may take a few seconds."
503 | ]
504 | },
505 | {
506 | "cell_type": "code",
507 | "execution_count": null,
508 | "metadata": {},
509 | "outputs": [],
510 | "source": [
511 | "import pydot\n",
512 | "from IPython.display import Image, display, clear_output\n",
513 | "if DP_output:\n",
514 | " dotfile = dotdir + \"/\" + nb + \".dot\"\n",
515 | " print(\"Converting dot to graph...\")\n",
516 | " graph = pydot.graph_from_dot_file(dotfile, 'utf-8')\n",
517 | " display(Image(graph[0].create(None,'png', 'utf-8')))\n",
518 | " print(\"Pipeline graph is shown, double click it to zoom in and out.\") "
519 | ]
520 | },
521 | {
522 | "cell_type": "markdown",
523 | "metadata": {},
524 | "source": [
525 | "* Mainloop to be interruptable by clicking the stop square button on the Jupyter toolbar.\n",
526 | "\n",
527 | " **Notice:** For DP output case, stopping the process with the square button can only work until the previous step finishes to show the pipeline dot graph."
528 | ]
529 | },
530 | {
531 | "cell_type": "code",
532 | "execution_count": null,
533 | "metadata": {},
534 | "outputs": [],
535 | "source": [
536 | "if DP_output:\n",
537 | " loop = GLib.MainLoop()\n",
538 | " try:\n",
539 | " loop.run()\n",
540 | " except:\n",
541 | " sys.stdout.write(\"Interrupt caught\\n\")\n",
542 | " Gst.debug_bin_to_dot_file(pipe, Gst.DebugGraphDetails.ALL, nb)\n",
543 | " pipe.set_state(Gst.State.NULL)\n",
544 | " loop.quit()\n",
545 | " pass"
546 | ]
547 | },
548 | {
549 | "cell_type": "markdown",
550 | "metadata": {},
551 | "source": [
552 | "## RTSP output"
553 | ]
554 | },
555 | {
556 | "cell_type": "markdown",
557 | "metadata": {},
558 | "source": [
559 | "* Start an RTSP server will consume the GStreamer pipeline constructed in next section."
560 | ]
561 | },
562 | {
563 | "cell_type": "code",
564 | "execution_count": null,
565 | "metadata": {},
566 | "outputs": [],
567 | "source": [
568 | "if not DP_output:\n",
569 | " mainloop = GLib.MainLoop()\n",
570 | " server = GstRtspServer.RTSPServer.new()\n",
571 | " server.props.service = \"5000\"\n",
572 | " mounts = server.get_mount_points()\n",
573 | " serverid=server.attach(None)\n",
574 | " factory = GstRtspServer.RTSPMediaFactory()"
575 | ]
576 | },
577 | {
578 | "cell_type": "markdown",
579 | "metadata": {},
580 | "source": [
581 | "### Then pass the frame with bbox to do VCU encoding with bbox info as encoding ROI."
582 | ]
583 | },
584 | {
585 | "cell_type": "markdown",
586 | "metadata": {},
587 | "source": [
588 | "* ROI info for VCU encoding generation\n"
589 | ]
590 | },
591 | {
592 | "cell_type": "code",
593 | "execution_count": null,
594 | "metadata": {},
595 | "outputs": [],
596 | "source": [
597 | "if not DP_output:\n",
598 | " pip += ' ! queue ! ivas_xroigen roi-type=1 roi-qp-delta=-10 roi-max-num=10 '"
599 | ]
600 | },
601 | {
602 | "cell_type": "markdown",
603 | "metadata": {},
604 | "source": [
605 | "* VCU encoding"
606 | ]
607 | },
608 | {
609 | "cell_type": "code",
610 | "execution_count": null,
611 | "metadata": {},
612 | "outputs": [],
613 | "source": [
614 | "if not DP_output:\n",
615 | " pip += '! queue ! omxh264enc qp-mode=1 num-slices=8 gop-length=60 \\\n",
616 | " periodicity-idr=270 control-rate=low-latency \\\n",
617 | " gop-mode=low-delay-p gdr-mode=horizontal cpb-size=200 \\\n",
618 | " initial-delay=100 filler-data=false min-qp=15 \\\n",
619 | " max-qp=40 b-frames=0 low-bandwidth=false target-bitrate=3000 \\\n",
620 | "! video/x-h264, alignment=au '"
621 | ]
622 | },
623 | {
624 | "cell_type": "markdown",
625 | "metadata": {},
626 | "source": [
627 | "* RTP payloading"
628 | ]
629 | },
630 | {
631 | "cell_type": "code",
632 | "execution_count": null,
633 | "metadata": {},
634 | "outputs": [],
635 | "source": [
636 | "if not DP_output:\n",
637 | " pip += '! queue ! rtph264pay name=pay0 pt=96'"
638 | ]
639 | },
640 | {
641 | "cell_type": "markdown",
642 | "metadata": {},
643 | "source": [
644 | "* Start the RTSP Server With the Pipeline String"
645 | ]
646 | },
647 | {
648 | "cell_type": "code",
649 | "execution_count": null,
650 | "metadata": {},
651 | "outputs": [],
652 | "source": [
653 | "if not DP_output:\n",
654 | " factory.set_launch('( ' + pip + ' )')\n",
655 | " factory.set_shared(True)\n",
656 | " mounts.add_factory(\"/test\", factory)\n",
657 | "\n",
658 | " out=subprocess.check_output(\"ifconfig | grep inet\", shell=True)\n",
659 | " for line in out.decode(\"ascii\").splitlines():\n",
660 | " m = re.search('inet *(.*?) ', line)\n",
661 | " if m:\n",
662 | " found = m.group(1)\n",
663 | " if found != \"127.0.0.1\":\n",
664 | " break\n",
665 | " uri=\"rtsp://{}:{}/test\".format(\"127.0.0.1\" if (found==\"\") else found, server.props.service)\n",
666 | " print (\"Video is now streaming from {src} source. \\n\\\n",
667 | " Run the command \\\"ffplay {uri}\\\" in another PC which have network access to the SoM board to view the video.\\n\".format(src=source, uri=uri))\n",
668 | " try:\n",
669 | " mainloop.run()\n",
670 | " except:\n",
671 | " sys.stdout.write(\"Interrupt caught.\\n\")\n",
672 | " GLib.Source.remove(serverid)\n",
673 | " mainloop.quit()\n",
674 | " pass\n"
675 | ]
676 | },
677 | {
678 | "cell_type": "markdown",
679 | "metadata": {},
680 | "source": [
681 | "# 5. Summary\n",
682 | "The Jupyter application shows how to:\n",
683 | "\n",
684 | "1. Create a GStreamer pipeline which utilize the VVAS framework to call Vitis AI Library to do face detection inference on the incoming frames, and draw boundboxing of detected results.\n",
685 | "2. Use the GStreamer RTSPServer module to setup an RTSP server.\n",
686 | "3. User can try to customize the source to video file or USB camera."
687 | ]
688 | },
689 | {
690 | "cell_type": "markdown",
691 | "metadata": {},
692 | "source": [
693 | "Copyright© 2021 Xilinx"
694 | ]
695 | }
696 | ],
697 | "metadata": {
698 | "kernelspec": {
699 | "display_name": "Python 3",
700 | "language": "python",
701 | "name": "python3"
702 | },
703 | "language_info": {
704 | "codemirror_mode": {
705 | "name": "ipython",
706 | "version": 3
707 | },
708 | "file_extension": ".py",
709 | "mimetype": "text/x-python",
710 | "name": "python",
711 | "nbconvert_exporter": "python",
712 | "pygments_lexer": "ipython3",
713 | "version": "3.7.6"
714 | }
715 | },
716 | "nbformat": 4,
717 | "nbformat_minor": 4
718 | }
719 |
--------------------------------------------------------------------------------
/script/01.mipi-rtsp.sh:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright 2021 Xilinx Inc.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 | w=${1:-"1920"} h=${2:-"1080"}
17 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
18 |
19 | ${DIR}/smartcam --mipi -t rtsp --width ${w} --height ${h}
20 |
21 |
--------------------------------------------------------------------------------
/script/02.mipi-dp.sh:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright 2021 Xilinx Inc.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 |
17 | w=${1:-"1920"} h=${2:-"1080"}
18 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
19 |
20 | ${DIR}/smartcam --mipi --target dp --width ${w} --height ${h}
21 |
22 |
--------------------------------------------------------------------------------
/script/03.file-file.sh:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright 2021 Xilinx Inc.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 | if [ $# -lt 1 ]; then
17 | echo "Please give a path to the video file"
18 | exit 1
19 | fi
20 | file=${1}
21 | w=${2:-"1920"} h=${3:-"1080"}
22 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
23 |
24 | ${DIR}/smartcam --file ${file} --infile-type h264 --target file --width ${w} --height ${h}
25 |
--------------------------------------------------------------------------------
/script/04.file-ssd-dp.sh:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright 2021 Xilinx Inc.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 |
17 | if [ $# -lt 1 ]; then
18 | echo "Please give a path to the video file"
19 | exit 1
20 | fi
21 | file=${1}
22 | w=${2:-"1920"} h=${3:-"1080"}
23 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
24 |
25 | ${DIR}/smartcam --file ${file} --target dp --width ${w} --height ${h} -r 30 --aitask ssd
26 |
27 |
--------------------------------------------------------------------------------
/script/rtsp_server.sh:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright 2021 Xilinx Inc.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
17 | ${DIR}/smartcam -f /usr/share/somapp/movies/AA2/AA2-park.nv12.30fps.1080p.h264 -W 1920 -H 1080 -r 30 -t rtsp -p 5000 -n &
18 |
--------------------------------------------------------------------------------
/script/smartcam-install.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 | #
3 | # Copyright 2021 Xilinx Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 |
18 | import argparse
19 | import pathlib
20 | import os
21 | import sys
22 | import shutil
23 |
24 | def main(appname):
25 | note_dir="/opt/xilinx/share/notebooks/"+appname
26 | parser = argparse.ArgumentParser(prog=appname+'-install',
27 | formatter_class=argparse.RawDescriptionHelpFormatter,
28 | description='Script to copy {} Jupyter notebook to user directory'.format(appname))
29 | parser.add_argument('-d', '--dir', type=pathlib.Path, help='Install the Jupyter notebook to the specified directory.', default=os.path.join("/home/petalinux/notebooks", "./{}".format(appname)) )
30 | parser.add_argument('-f', '--force', action='store_true', help='Force to install the Jupyter notebook even if the destination directory exists.')
31 | args = parser.parse_args()
32 | destdir = os.path.abspath(args.dir)
33 |
34 | if os.path.exists(destdir):
35 | if os.path.isfile(destdir):
36 | sys.exit("Error: Destination directory {} is an existing file.".format(destdir))
37 | else:
38 | if not args.force:
39 | sys.exit("Error: Destination directory {} already exists, please use another dirname or use '--force' option.".format(destdir))
40 | else:
41 | print("Info: Destination directory "+destdir+" already exists, force removing it.")
42 | shutil.rmtree(destdir)
43 |
44 | ignore = shutil.ignore_patterns('.ipynb_checkpoints','*.pyc','*~')
45 | srcdir = os.path.abspath(note_dir)
46 |
47 | if os.path.isdir(srcdir):
48 | try:
49 | shutil.copytree(srcdir, destdir, ignore=ignore, symlinks=True)
50 | print("Info: Notebook files under {} are copied to {}.".format(srcdir, destdir))
51 | except BaseException as error:
52 | sys.exit("Error: An exception occurred: {}".format(error))
53 | else:
54 | sys.exit("Error: Predefined system notebook directory %s doesn't exist." % srcdir)
55 |
56 | if __name__ == "__main__":
57 | main("smartcam")
58 |
--------------------------------------------------------------------------------
/src/ivas_airender.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2021 Xilinx Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 | #include
27 |
28 | #include "ivas_airender.hpp"
29 |
30 | int log_level = LOG_LEVEL_WARNING;
31 |
32 | using namespace cv;
33 | using namespace std;
34 |
35 | #define MAX_CLASS_LEN 1024
36 | #define MAX_LABEL_LEN 1024
37 | #define MAX_ALLOWED_CLASS 20
38 | #define MAX_ALLOWED_LABELS 20
39 |
40 | struct color
41 | {
42 | unsigned int blue;
43 | unsigned int green;
44 | unsigned int red;
45 | };
46 |
47 | struct ivass_xclassification
48 | {
49 | color class_color;
50 | char class_name[MAX_CLASS_LEN];
51 | };
52 |
53 | struct overlayframe_info
54 | {
55 | IVASFrame *inframe;
56 | Mat image;
57 | Mat I420image;
58 | Mat NV12image;
59 | Mat lumaImg;
60 | Mat chromaImg;
61 | int y_offset;
62 | };
63 |
64 |
65 | using Clock = std::chrono::steady_clock;
66 |
67 | struct ivas_xoverlaypriv
68 | {
69 | float font_size;
70 | unsigned int font;
71 | int line_thickness;
72 | int y_offset;
73 | color label_color;
74 | char label_filter[MAX_ALLOWED_LABELS][MAX_LABEL_LEN];
75 | unsigned char label_filter_cnt;
76 | unsigned short classes_count;
77 | ivass_xclassification class_list[MAX_ALLOWED_CLASS];
78 | struct overlayframe_info frameinfo;
79 | int drawfps;
80 | int fps_interv;
81 | double fps;
82 | int framecount;
83 | Clock::time_point startClk;
84 | };
85 |
86 |
87 |
88 | /* Check if the given classification is to be filtered */
89 | int
90 | ivas_classification_is_allowed (char *cls_name, ivas_xoverlaypriv * kpriv)
91 | {
92 | unsigned int idx;
93 |
94 | if (cls_name == NULL)
95 | return -1;
96 |
97 | for (idx = 0;
98 | idx < sizeof (kpriv->class_list) / sizeof (kpriv->class_list[0]); idx++) {
99 | if (!strcmp (cls_name, kpriv->class_list[idx].class_name)) {
100 | return idx;
101 | }
102 | }
103 | return -1;
104 | }
105 |
106 | /* Get y and uv color components corresponding to givne RGB color */
107 | void
108 | convert_rgb_to_yuv_clrs (color clr, unsigned char *y, unsigned short *uv)
109 | {
110 | Mat YUVmat;
111 | Mat BGRmat (2, 2, CV_8UC3, Scalar (clr.red, clr.green, clr.blue));
112 | cvtColor (BGRmat, YUVmat, cv::COLOR_BGR2YUV_I420);
113 | *y = YUVmat.at < uchar > (0, 0);
114 | *uv = YUVmat.at < uchar > (2, 0) << 8 | YUVmat.at < uchar > (2, 1);
115 | return;
116 | }
117 |
118 | /* Compose label text based on config json */
119 | bool
120 | get_label_text (GstInferenceClassification * c, ivas_xoverlaypriv * kpriv,
121 | char *label_string)
122 | {
123 | unsigned char idx = 0, buffIdx = 0;
124 | if (!c->class_label || !strlen ((char *) c->class_label))
125 | return false;
126 |
127 | for (idx = 0; idx < kpriv->label_filter_cnt; idx++) {
128 | if (!strcmp (kpriv->label_filter[idx], "class")) {
129 | sprintf (label_string + buffIdx, "%s", (char *) c->class_label);
130 | buffIdx += strlen (label_string);
131 | } else if (!strcmp (kpriv->label_filter[idx], "probability")) {
132 | sprintf (label_string + buffIdx, " : %.2f ", c->class_prob);
133 | buffIdx += strlen (label_string);
134 | }
135 | }
136 | return true;
137 | }
138 |
139 | static gboolean
140 | overlay_node_foreach (GNode * node, gpointer kpriv_ptr)
141 | {
142 | ivas_xoverlaypriv *kpriv = (ivas_xoverlaypriv *) kpriv_ptr;
143 | struct overlayframe_info *frameinfo = &(kpriv->frameinfo);
144 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "enter");
145 |
146 | GList *classes;
147 | GstInferenceClassification *classification;
148 | GstInferencePrediction *prediction = (GstInferencePrediction *) node->data;
149 |
150 | /* On each children, iterate through the different associated classes */
151 | for (classes = prediction->classifications;
152 | classes; classes = g_list_next (classes)) {
153 | classification = (GstInferenceClassification *) classes->data;
154 |
155 | int idx = ivas_classification_is_allowed ((char *)
156 | classification->class_label, kpriv);
157 | if (kpriv->classes_count && idx == -1)
158 | continue;
159 |
160 | color clr;
161 | if (kpriv->classes_count) {
162 | clr = {
163 | kpriv->class_list[idx].class_color.blue,
164 | kpriv->class_list[idx].class_color.green,
165 | kpriv->class_list[idx].class_color.red};
166 | } else {
167 | /* If there are no classes specified, we will go with default blue */
168 | clr = {
169 | 255, 0, 0};
170 | }
171 |
172 | char label_string[MAX_LABEL_LEN];
173 | bool label_present;
174 | Size textsize;
175 | label_present = get_label_text (classification, kpriv, label_string);
176 |
177 | if (label_present) {
178 | int baseline;
179 | textsize = getTextSize (label_string, kpriv->font,
180 | kpriv->font_size, 1, &baseline);
181 | /* Get y offset to use in case of classification model */
182 | if ((prediction->bbox.height < 1) && (prediction->bbox.width < 1)) {
183 | if (kpriv->y_offset) {
184 | frameinfo->y_offset = kpriv->y_offset;
185 | } else {
186 | frameinfo->y_offset = (frameinfo->inframe->props.height * 0.10);
187 | }
188 | }
189 | }
190 |
191 | LOG_MESSAGE (LOG_LEVEL_INFO,
192 | "RESULT: (prediction node %ld) %s(%d) %d %d %d %d (%f)",
193 | prediction->prediction_id,
194 | label_present ? classification->class_label : NULL,
195 | classification->class_id, prediction->bbox.x, prediction->bbox.y,
196 | prediction->bbox.width + prediction->bbox.x,
197 | prediction->bbox.height + prediction->bbox.y,
198 | classification->class_prob);
199 |
200 | /* Check whether the frame is NV12 or BGR and act accordingly */
201 | if (frameinfo->inframe->props.fmt == IVAS_VFMT_Y_UV8_420) {
202 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "Drawing rectangle for NV12 image");
203 | unsigned char yScalar;
204 | unsigned short uvScalar;
205 | convert_rgb_to_yuv_clrs (clr, &yScalar, &uvScalar);
206 | /* Draw rectangle on y an uv plane */
207 | int new_xmin = floor (prediction->bbox.x / 2) * 2;
208 | int new_ymin = floor (prediction->bbox.y / 2) * 2;
209 | int new_xmax =
210 | floor ((prediction->bbox.width + prediction->bbox.x) / 2) * 2;
211 | int new_ymax =
212 | floor ((prediction->bbox.height + prediction->bbox.y) / 2) * 2;
213 | Size test_rect (new_xmax - new_xmin, new_ymax - new_ymin);
214 |
215 | if (!(!prediction->bbox.x && !prediction->bbox.y)) {
216 | rectangle (frameinfo->lumaImg, Point (new_xmin,
217 | new_ymin), Point (new_xmax,
218 | new_ymax), Scalar (yScalar), kpriv->line_thickness, 1, 0);
219 | rectangle (frameinfo->chromaImg, Point (new_xmin / 2,
220 | new_ymin / 2), Point (new_xmax / 2,
221 | new_ymax / 2), Scalar (uvScalar), kpriv->line_thickness, 1, 0);
222 | }
223 |
224 | if (label_present) {
225 | /* Draw filled rectangle for labelling, both on y and uv plane */
226 | rectangle (frameinfo->lumaImg, Rect (Point (new_xmin,
227 | new_ymin - textsize.height), textsize),
228 | Scalar (yScalar), FILLED, 1, 0);
229 | textsize.height /= 2;
230 | textsize.width /= 2;
231 | rectangle (frameinfo->chromaImg, Rect (Point (new_xmin / 2,
232 | new_ymin / 2 - textsize.height), textsize),
233 | Scalar (uvScalar), FILLED, 1, 0);
234 |
235 | /* Draw label text on the filled rectanngle */
236 | convert_rgb_to_yuv_clrs (kpriv->label_color, &yScalar, &uvScalar);
237 | putText (frameinfo->lumaImg, label_string, cv::Point (new_xmin,
238 | new_ymin + frameinfo->y_offset), kpriv->font, kpriv->font_size,
239 | Scalar (yScalar), 1, 1);
240 | putText (frameinfo->chromaImg, label_string, cv::Point (new_xmin / 2,
241 | new_ymin / 2 + frameinfo->y_offset / 2), kpriv->font,
242 | kpriv->font_size / 2, Scalar (uvScalar), 1, 1);
243 | }
244 | } else if (frameinfo->inframe->props.fmt == IVAS_VFMT_BGR8) {
245 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "Drawing rectangle for BGR image");
246 |
247 | if (!(!prediction->bbox.x && !prediction->bbox.y)) {
248 | /* Draw rectangle over the dectected object */
249 | rectangle (frameinfo->image, Point (prediction->bbox.x,
250 | prediction->bbox.y),
251 | Point (prediction->bbox.width + prediction->bbox.x,
252 | prediction->bbox.height + prediction->bbox.y), Scalar (clr.blue,
253 | clr.green, clr.red), kpriv->line_thickness, 1, 0);
254 | }
255 |
256 | if (label_present) {
257 | /* Draw filled rectangle for label */
258 | rectangle (frameinfo->image, Rect (Point (prediction->bbox.x,
259 | prediction->bbox.y - textsize.height), textsize),
260 | Scalar (clr.blue, clr.green, clr.red), FILLED, 1, 0);
261 |
262 | /* Draw label text on the filled rectanngle */
263 | putText (frameinfo->image, label_string,
264 | cv::Point (prediction->bbox.x,
265 | prediction->bbox.y + frameinfo->y_offset), kpriv->font,
266 | kpriv->font_size, Scalar (kpriv->label_color.blue,
267 | kpriv->label_color.green, kpriv->label_color.red), 1, 1);
268 | }
269 | }
270 | }
271 |
272 |
273 |
274 | return FALSE;
275 | }
276 |
277 | static void
278 | fps_overlay(gpointer kpriv_ptr)
279 | {
280 | ivas_xoverlaypriv *kpriv = (ivas_xoverlaypriv *) kpriv_ptr;
281 | if (!kpriv->drawfps)
282 | {
283 | return ;
284 | }
285 | struct overlayframe_info *frameinfo = &(kpriv->frameinfo);
286 |
287 | if (kpriv->framecount == 0)
288 | {
289 | kpriv->startClk = Clock::now();
290 | }
291 | else
292 | {
293 | if (kpriv->framecount%kpriv->fps_interv == 0)
294 | {
295 |
296 | Clock::time_point nowClk = Clock::now();
297 | int duration = (std::chrono::duration_cast(nowClk - kpriv->startClk)).count();
298 | kpriv->fps = kpriv->framecount * 1e3 / duration ;
299 | }
300 |
301 | color clr = {255, 0, 0};
302 | int new_xmin = 50;
303 | int new_ymin = 50;
304 |
305 | std::ostringstream oss;
306 | oss << "Framerate:" << kpriv->fps << " FPS";
307 | Size textsize;
308 |
309 | if (frameinfo->inframe->props.fmt == IVAS_VFMT_Y_UV8_420) {
310 | unsigned char yScalar;
311 | unsigned short uvScalar;
312 | convert_rgb_to_yuv_clrs (clr, &yScalar, &uvScalar);
313 | {
314 | /* Draw label text on the filled rectanngle */
315 | convert_rgb_to_yuv_clrs (kpriv->label_color, &yScalar, &uvScalar);
316 | putText (frameinfo->lumaImg, oss.str(), cv::Point (new_xmin,
317 | new_ymin), kpriv->font, kpriv->font_size,
318 | Scalar (yScalar), 1, 1);
319 | putText (frameinfo->chromaImg, oss.str(), cv::Point (new_xmin / 2,
320 | new_ymin / 2), kpriv->font,
321 | kpriv->font_size / 2, Scalar (uvScalar), 1, 1);
322 | }
323 | } else if (frameinfo->inframe->props.fmt == IVAS_VFMT_BGR8) {
324 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "Drawing rectangle for BGR image");
325 | {
326 | /* Draw label text on the filled rectanngle */
327 | putText (frameinfo->image, oss.str(),
328 | cv::Point (new_xmin, new_ymin), kpriv->font,
329 | kpriv->font_size, Scalar (clr.blue,
330 | clr.green, clr.red), 1, 1);
331 | }
332 | }
333 | }
334 | kpriv->framecount++;
335 |
336 | return ;
337 | }
338 |
339 | extern "C"
340 | {
341 | int32_t xlnx_kernel_init (IVASKernel * handle)
342 | {
343 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "enter");
344 |
345 | ivas_xoverlaypriv *kpriv =
346 | (ivas_xoverlaypriv *) calloc (1, sizeof (ivas_xoverlaypriv));
347 |
348 | json_t *jconfig = handle->kernel_config;
349 | json_t *val, *karray = NULL, *classes = NULL;
350 |
351 | /* Initialize config params with default values */
352 | log_level = LOG_LEVEL_WARNING;
353 | kpriv->font_size = 0.5;
354 | kpriv->font = 0;
355 | kpriv->line_thickness = 1;
356 | kpriv->y_offset = 0;
357 | kpriv->label_color = {0, 0, 0};
358 | strcpy(kpriv->label_filter[0], "class");
359 | strcpy(kpriv->label_filter[1], "probability");
360 | kpriv->label_filter_cnt = 2;
361 | kpriv->classes_count = 0;
362 | kpriv->framecount = 0;
363 |
364 | char* env = getenv("SMARTCAM_SCREENFPS");
365 | if (env)
366 | {
367 | kpriv->drawfps = 1;
368 | }
369 | else
370 | {
371 | kpriv->drawfps = 0;
372 | }
373 |
374 | val = json_object_get (jconfig, "fps_interval");
375 | if (!val || !json_is_integer (val))
376 | kpriv->fps_interv = 1;
377 | else
378 | kpriv->fps_interv = json_integer_value (val);
379 |
380 | val = json_object_get (jconfig, "debug_level");
381 | if (!val || !json_is_integer (val))
382 | log_level = LOG_LEVEL_WARNING;
383 | else
384 | log_level = json_integer_value (val);
385 |
386 | val = json_object_get (jconfig, "font_size");
387 | if (!val || !json_is_integer (val))
388 | kpriv->font_size = 0.5;
389 | else
390 | kpriv->font_size = json_integer_value (val);
391 |
392 | val = json_object_get (jconfig, "font");
393 | if (!val || !json_is_integer (val))
394 | kpriv->font = 0;
395 | else
396 | kpriv->font = json_integer_value (val);
397 |
398 | val = json_object_get (jconfig, "thickness");
399 | if (!val || !json_is_integer (val))
400 | kpriv->line_thickness = 1;
401 | else
402 | kpriv->line_thickness = json_integer_value (val);
403 |
404 | val = json_object_get (jconfig, "y_offset");
405 | if (!val || !json_is_integer (val))
406 | kpriv->y_offset = 0;
407 | else
408 | kpriv->y_offset = json_integer_value (val);
409 |
410 | /* get label color array */
411 | karray = json_object_get (jconfig, "label_color");
412 | if (!karray)
413 | {
414 | LOG_MESSAGE (LOG_LEVEL_ERROR, "failed to find label_color");
415 | return -1;
416 | } else
417 | {
418 | kpriv->label_color.blue =
419 | json_integer_value (json_object_get (karray, "blue"));
420 | kpriv->label_color.green =
421 | json_integer_value (json_object_get (karray, "green"));
422 | kpriv->label_color.red =
423 | json_integer_value (json_object_get (karray, "red"));
424 | }
425 |
426 | karray = json_object_get (jconfig, "label_filter");
427 |
428 | if (!json_is_array (karray)) {
429 | LOG_MESSAGE (LOG_LEVEL_ERROR, "label_filter not found in the config\n");
430 | return -1;
431 | }
432 | kpriv->label_filter_cnt = 0;
433 | for (unsigned int index = 0; index < json_array_size (karray); index++) {
434 | strcpy (kpriv->label_filter[index],
435 | json_string_value (json_array_get (karray, index)));
436 | kpriv->label_filter_cnt++;
437 | }
438 |
439 | /* get classes array */
440 | karray = json_object_get (jconfig, "classes");
441 | if (!karray) {
442 | LOG_MESSAGE (LOG_LEVEL_ERROR, "failed to find key labels");
443 | return -1;
444 | }
445 |
446 | if (!json_is_array (karray)) {
447 | LOG_MESSAGE (LOG_LEVEL_ERROR, "labels key is not of array type");
448 | return -1;
449 | }
450 | kpriv->classes_count = json_array_size (karray);
451 | for (unsigned int index = 0; index < kpriv->classes_count; index++) {
452 | classes = json_array_get (karray, index);
453 | if (!classes) {
454 | LOG_MESSAGE (LOG_LEVEL_ERROR, "failed to get class object");
455 | return -1;
456 | }
457 |
458 | val = json_object_get (classes, "name");
459 | if (!json_is_string (val)) {
460 | LOG_MESSAGE (LOG_LEVEL_ERROR, "name is not found for array %d", index);
461 | return -1;
462 | } else {
463 | strncpy (kpriv->class_list[index].class_name,
464 | (char *) json_string_value (val), MAX_CLASS_LEN - 1);
465 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "name %s",
466 | kpriv->class_list[index].class_name);
467 | }
468 |
469 | val = json_object_get (classes, "green");
470 | if (!val || !json_is_integer (val))
471 | kpriv->class_list[index].class_color.green = 0;
472 | else
473 | kpriv->class_list[index].class_color.green = json_integer_value (val);
474 |
475 | val = json_object_get (classes, "blue");
476 | if (!val || !json_is_integer (val))
477 | kpriv->class_list[index].class_color.blue = 0;
478 | else
479 | kpriv->class_list[index].class_color.blue = json_integer_value (val);
480 |
481 | val = json_object_get (classes, "red");
482 | if (!val || !json_is_integer (val))
483 | kpriv->class_list[index].class_color.red = 0;
484 | else
485 | kpriv->class_list[index].class_color.red = json_integer_value (val);
486 | }
487 |
488 | handle->kernel_priv = (void *) kpriv;
489 | return 0;
490 | }
491 |
492 | uint32_t xlnx_kernel_deinit (IVASKernel * handle)
493 | {
494 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "enter");
495 | ivas_xoverlaypriv *kpriv = (ivas_xoverlaypriv *) handle->kernel_priv;
496 |
497 | if (kpriv)
498 | free (kpriv);
499 |
500 | return 0;
501 | }
502 |
503 |
504 | uint32_t xlnx_kernel_start (IVASKernel * handle, int start,
505 | IVASFrame * input[MAX_NUM_OBJECT], IVASFrame * output[MAX_NUM_OBJECT])
506 | {
507 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "enter");
508 | GstInferenceMeta *infer_meta = NULL;
509 | char *pstr;
510 |
511 | ivas_xoverlaypriv *kpriv = (ivas_xoverlaypriv *) handle->kernel_priv;
512 | struct overlayframe_info *frameinfo = &(kpriv->frameinfo);
513 | frameinfo->y_offset = 0;
514 | frameinfo->inframe = input[0];
515 | char *indata = (char *) frameinfo->inframe->vaddr[0];
516 | char *lumaBuf = (char *) frameinfo->inframe->vaddr[0];
517 | char *chromaBuf = (char *) frameinfo->inframe->vaddr[1];
518 | infer_meta = ((GstInferenceMeta *) gst_buffer_get_meta ((GstBuffer *)
519 | frameinfo->inframe->app_priv, gst_inference_meta_api_get_type ()));
520 | if (infer_meta == NULL) {
521 | LOG_MESSAGE (LOG_LEVEL_ERROR,
522 | "ivas meta data is not available for postdpu");
523 | return false;
524 | } else {
525 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "ivas_mata ptr %p", infer_meta);
526 | }
527 |
528 | if (frameinfo->inframe->props.fmt == IVAS_VFMT_Y_UV8_420) {
529 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "Input frame is in NV12 format\n");
530 | frameinfo->lumaImg.create (input[0]->props.height, input[0]->props.stride,
531 | CV_8UC1);
532 | frameinfo->lumaImg.data = (unsigned char *) lumaBuf;
533 | frameinfo->chromaImg.create (input[0]->props.height / 2,
534 | input[0]->props.stride / 2, CV_16UC1);
535 | frameinfo->chromaImg.data = (unsigned char *) chromaBuf;
536 | } else if (frameinfo->inframe->props.fmt == IVAS_VFMT_BGR8) {
537 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "Input frame is in BGR format\n");
538 | frameinfo->image.create (input[0]->props.height,
539 | input[0]->props.stride / 3, CV_8UC3);
540 | frameinfo->image.data = (unsigned char *) indata;
541 | } else {
542 | LOG_MESSAGE (LOG_LEVEL_WARNING, "Unsupported color format\n");
543 | return 0;
544 | }
545 |
546 |
547 | /* Print the entire prediction tree */
548 | pstr = gst_inference_prediction_to_string (infer_meta->prediction);
549 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "Prediction tree: \n%s", pstr);
550 | free (pstr);
551 |
552 | g_node_traverse (infer_meta->prediction->predictions, G_PRE_ORDER,
553 | G_TRAVERSE_ALL, -1, overlay_node_foreach, kpriv);
554 |
555 | fps_overlay(kpriv);
556 | return 0;
557 | }
558 |
559 |
560 | int32_t xlnx_kernel_done (IVASKernel * handle)
561 | {
562 | LOG_MESSAGE (LOG_LEVEL_DEBUG, "enter");
563 | return 0;
564 | }
565 | }
566 |
--------------------------------------------------------------------------------
/src/ivas_airender.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2021 Xilinx Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | #ifndef __IVAS_AIRENDER_H__
18 | #define __IVAS_AIRENDER_H__
19 |
20 | enum
21 | {
22 | LOG_LEVEL_ERROR,
23 | LOG_LEVEL_WARNING,
24 | LOG_LEVEL_INFO,
25 | LOG_LEVEL_DEBUG
26 | };
27 |
28 | #define __FILENAME__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
29 | #define LOG_MESSAGE(level, ...) {\
30 | do {\
31 | char *str; \
32 | if (level == LOG_LEVEL_ERROR)\
33 | str = (char*)"ERROR";\
34 | else if (level == LOG_LEVEL_WARNING)\
35 | str = (char*)"WARNING";\
36 | else if (level == LOG_LEVEL_INFO)\
37 | str = (char*)"INFO";\
38 | else if (level == LOG_LEVEL_DEBUG)\
39 | str = (char*)"DEBUG";\
40 | if (level <= log_level) {\
41 | printf("[%s %s:%d] %s: ",__FILENAME__, __func__, __LINE__, str);\
42 | printf(__VA_ARGS__);\
43 | printf("\n");\
44 | }\
45 | } while (0); \
46 | }
47 |
48 | #endif /* __IVAS_AIRENDER_H__ */
49 |
--------------------------------------------------------------------------------
/src/ivas_xpp_pipeline.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2021 Xilinx, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | #include
18 | #include
19 | #include
20 |
21 | typedef struct _kern_priv
22 | {
23 | float mean_r;
24 | float mean_g;
25 | float mean_b;
26 | float scale_r;
27 | float scale_g;
28 | float scale_b;
29 | IVASFrame *params;
30 | } ResizeKernelPriv;
31 |
32 | int32_t
33 | xlnx_kernel_start(IVASKernel *handle, int start, IVASFrame *input[MAX_NUM_OBJECT], IVASFrame *output[MAX_NUM_OBJECT]);
34 | int32_t xlnx_kernel_done(IVASKernel *handle);
35 | int32_t xlnx_kernel_init(IVASKernel *handle);
36 | uint32_t xlnx_kernel_deinit(IVASKernel *handle);
37 |
38 | uint32_t xlnx_kernel_deinit(IVASKernel *handle)
39 | {
40 | ResizeKernelPriv *kernel_priv;
41 | kernel_priv = (ResizeKernelPriv *)handle->kernel_priv;
42 | ivas_free_buffer (handle, kernel_priv->params);
43 | free(kernel_priv);
44 | return 0;
45 | }
46 |
47 | int32_t xlnx_kernel_init(IVASKernel *handle)
48 | {
49 | json_t *jconfig = handle->kernel_config;
50 | json_t *val; /* kernel config from app */
51 | ResizeKernelPriv *kernel_priv;
52 | float *pPtr;
53 |
54 | kernel_priv = (ResizeKernelPriv *)calloc(1, sizeof(ResizeKernelPriv));
55 | if (!kernel_priv) {
56 | printf("Error: Unable to allocate resize kernel memory\n");
57 | }
58 |
59 | /* parse config */
60 | val = json_object_get(jconfig, "mean_r");
61 | if (!val || !json_is_number(val))
62 | kernel_priv->mean_r = 0;
63 | else {
64 | kernel_priv->mean_r = json_number_value(val);
65 | }
66 | printf("Resize: mean_r=%f\n", kernel_priv->mean_r);
67 |
68 | val = json_object_get(jconfig, "mean_g");
69 | if (!val || !json_is_number(val))
70 | kernel_priv->mean_g = 0;
71 | else {
72 | kernel_priv->mean_g = json_number_value(val);
73 | }
74 | printf("Resize: mean_g=%f\n", kernel_priv->mean_g);
75 |
76 | val = json_object_get(jconfig, "mean_b");
77 | if (!val || !json_is_number(val))
78 | kernel_priv->mean_b = 0;
79 | else {
80 | kernel_priv->mean_b = json_number_value(val);
81 | }
82 | printf("Resize: mean_b=%f\n", kernel_priv->mean_b);
83 |
84 | /* parse config */
85 | val = json_object_get(jconfig, "scale_r");
86 | if (!val || !json_is_number(val))
87 | kernel_priv->scale_r = 1;
88 | else
89 | kernel_priv->scale_r = json_number_value(val);
90 | printf("Resize: scale_r=%f\n", kernel_priv->scale_r);
91 |
92 | val = json_object_get(jconfig, "scale_g");
93 | if (!val || !json_is_number(val))
94 | kernel_priv->scale_g = 1;
95 | else
96 | kernel_priv->scale_g = json_number_value(val);
97 | printf("Resize: scale_g=%f\n", kernel_priv->scale_g);
98 |
99 | val = json_object_get(jconfig, "scale_b");
100 | if (!val || !json_is_number(val))
101 | kernel_priv->scale_b = 1;
102 | else
103 | kernel_priv->scale_b = json_number_value(val);
104 | printf("Resize: scale_b=%f\n", kernel_priv->scale_b);
105 |
106 | kernel_priv->params = ivas_alloc_buffer (handle, 6*(sizeof(float)), IVAS_INTERNAL_MEMORY, NULL);
107 | pPtr = kernel_priv->params->vaddr[0];
108 | pPtr[0] = (float)kernel_priv->mean_r;
109 | pPtr[1] = (float)kernel_priv->mean_g;
110 | pPtr[2] = (float)kernel_priv->mean_b;
111 | pPtr[3] = (float)kernel_priv->scale_r;
112 | pPtr[4] = (float)kernel_priv->scale_g;
113 | pPtr[5] = (float)kernel_priv->scale_b;
114 |
115 | handle->kernel_priv = (void *)kernel_priv;
116 |
117 | return 0;
118 | }
119 |
120 | int32_t xlnx_kernel_start(IVASKernel *handle, int start, IVASFrame *input[MAX_NUM_OBJECT], IVASFrame *output[MAX_NUM_OBJECT])
121 | {
122 | ResizeKernelPriv *kernel_priv;
123 | kernel_priv = (ResizeKernelPriv *)handle->kernel_priv;
124 |
125 | ivas_register_write(handle, &(input[0]->props.width), sizeof(uint32_t), 0x40); /* In width */
126 | ivas_register_write(handle, &(input[0]->props.height), sizeof(uint32_t), 0x48); /* In height */
127 | ivas_register_write(handle, &(input[0]->props.stride), sizeof(uint32_t), 0x50); /* In stride */
128 |
129 | ivas_register_write(handle, &(output[0]->props.width), sizeof(uint32_t), 0x58); /* Out width */
130 | ivas_register_write(handle, &(output[0]->props.height), sizeof(uint32_t), 0x60); /* Out height */
131 | ivas_register_write(handle, &(output[0]->props.width), sizeof(uint32_t), 0x68); /* Out stride */
132 |
133 | ivas_register_write(handle, &(input[0]->paddr[0]), sizeof(uint64_t), 0x10); /* Y Input */
134 | ivas_register_write(handle, &(input[0]->paddr[1]), sizeof(uint64_t), 0x1C); /* UV Input */
135 | ivas_register_write(handle, &(output[0]->paddr[0]), sizeof(uint64_t), 0x28); /* Output */
136 | ivas_register_write(handle, &(kernel_priv->params->paddr[0]), sizeof(uint64_t), 0x34); /* Params */
137 |
138 | ivas_register_write(handle, &start, sizeof(uint32_t), 0x0); /* start */
139 | return 0;
140 | }
141 |
142 | int32_t xlnx_kernel_done(IVASKernel *handle)
143 | {
144 | uint32_t val = 0, count = 0;
145 | do {
146 | ivas_register_read(handle, &val, sizeof(uint32_t), 0x0); /* start */
147 | count++;
148 | if (count > 1000000) {
149 | printf("ERROR: kernel done wait TIME OUT !!\n");
150 | return 0;
151 | }
152 | } while (!(0x4 & val));
153 | return 1;
154 | }
155 |
--------------------------------------------------------------------------------
/src/kernel_boundingbox.json:
--------------------------------------------------------------------------------
1 | {
2 | "xclbin-location":"/usr/lib/dpu.xclbin",
3 | "ivas-library-repo": "/usr/local/lib/ivas",
4 | "element-mode":"inplace",
5 | "kernels" :[
6 | {
7 | "library-name":"libivas_xboundingbox.so",
8 | "config": {
9 | "font_size" : 0.5,
10 | "font" : 3,
11 | "thickness" : 2,
12 | "debug_level" : 4,
13 | "label_color" : { "blue" : 0, "green" : 0, "red" : 0 },
14 | "y_offset" : 0,
15 | "label_filter" : [ "class", "probability" ],
16 | "classes" : [
17 | {
18 | "name" : "car",
19 | "blue" : 255,
20 | "green" : 0,
21 | "red" : 0
22 | },
23 | {
24 | "name" : "person",
25 | "blue" : 0,
26 | "green" : 255,
27 | "red" : 0
28 | },
29 | {
30 | "name" : "bus",
31 | "blue" : 0,
32 | "green" : 0,
33 | "red" : 255
34 | },
35 | {
36 | "name" : "bicycle",
37 | "blue" : 0,
38 | "green" : 0,
39 | "red" : 255
40 | }
41 | ]
42 | }
43 | }
44 | ]
45 | }
46 |
--------------------------------------------------------------------------------
/src/kernel_resize_bgr.json:
--------------------------------------------------------------------------------
1 | {
2 | "xclbin-location":"/media/sd-mmcblk1p1/dpu.xclbin",
3 | "ivas-library-repo": "./",
4 | "kernels" :[
5 | {
6 | "kernel-name":"pp_pipeline_accel:pp_pipeline_accel_1",
7 | "library-name":"libivas_xresize_bgr.so",
8 | "config": {
9 | "mean_value": 128,
10 | "scale_value": 1
11 | }
12 | }
13 | ]
14 | }
15 |
--------------------------------------------------------------------------------
/src/main.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2021 Xilinx Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 | #include
27 | #include
28 |
29 | #define DEFAULT_RTSP_PORT "554"
30 |
31 |
32 | static char *port = (char *) DEFAULT_RTSP_PORT;
33 | static char *msgFirmware = (char *)"Please make sure that the HW accelerator firmware is loaded via xmutil loadapp kv260-smartcam.\n";
34 |
35 | static gchar* filename = NULL;
36 | static gchar* infileType = (gchar*)"h264";
37 | static gchar* outMediaType = (gchar*)"h264";
38 | static gchar* target = (gchar*)"dp";
39 | static gchar* aitask = (gchar*)"facedetect";
40 |
41 | static gchar* controlRate = (gchar*)"low-latency";
42 | static gchar* targetBitrate = (gchar*)"3000";
43 | static gchar* gopLength = (gchar*)"60";
44 |
45 | static gchar* profile = NULL;
46 | static gchar* level = NULL;
47 | static gchar* tier = NULL;
48 |
49 | static gchar* encodeEnhancedParam = NULL;
50 |
51 | static gint fr = 30;
52 | static gboolean mipi = FALSE;
53 | static std::string mipidev("");
54 | static gint usb = -2;
55 | static std::string usbvideo("");
56 | static gint w = 1920;
57 | static gint h = 1080;
58 | static gboolean nodet = FALSE;
59 | static gboolean audio = FALSE;
60 | static gboolean reportFps = FALSE;
61 | static gboolean screenfps = FALSE;
62 | static gboolean roiOff = FALSE;
63 | static GOptionEntry entries[] =
64 | {
65 | { "mipi", 'm', 0, G_OPTION_ARG_NONE, &mipi, "use MIPI camera as input source, auto detect, fail if no mipi connected", ""},
66 | { "usb", 'u', 0, G_OPTION_ARG_INT, &usb, "usb camera media device id, e.g. 0 for /dev/media0", "media ID"},
67 | { "file", 'f', 0, G_OPTION_ARG_FILENAME, &filename, "location of h26x file as input", "file path"},
68 | { "infile-type", 'i', 0, G_OPTION_ARG_STRING, &infileType, "input file type: [h264 | h265]", "h264"},
69 | { "width", 'W', 0, G_OPTION_ARG_INT, &w, "resolution w of the input", "1920"},
70 | { "height", 'H', 0, G_OPTION_ARG_INT, &h, "resolution h of the input", "1080"},
71 | { "framerate", 'r', 0, G_OPTION_ARG_INT, &fr, "framerate of the input", "30"},
72 |
73 | { "target", 't', 0, G_OPTION_ARG_STRING, &target, "[dp|rtsp|file]", "dp"},
74 | { "outmedia-type", 'o', 0, G_OPTION_ARG_STRING, &outMediaType, "output file type: [h264 | h265]", "h264"},
75 | { "port", 'p', 0, G_OPTION_ARG_STRING, &port,
76 | "Port to listen on (default: " DEFAULT_RTSP_PORT ")", DEFAULT_RTSP_PORT},
77 |
78 | { "aitask", 'a', 0, G_OPTION_ARG_STRING, &aitask, "select AI task to be run: [facedetect|ssd|refinedet]" },
79 | { "nodet", 'n', 0, G_OPTION_ARG_NONE, &nodet, "no AI inference", NULL },
80 | { "audio", 'A', 0, G_OPTION_ARG_NONE, &audio, "RTSP with I2S audio", NULL },
81 | { "report", 'R', 0, G_OPTION_ARG_NONE, &reportFps, "report fps", NULL },
82 | { "screenfps", 's', 0, G_OPTION_ARG_NONE, &screenfps, "display fps on screen, notice this will cause performance degradation", NULL },
83 | { "ROI-off", 0, 0, G_OPTION_ARG_NONE, &roiOff, "turn off ROI", NULL },
84 |
85 | { "control-rate", 0, 0, G_OPTION_ARG_STRING, &controlRate, "Encoder parameter control-rate", "low-latency" },
86 | { "target-bitrate", 0, 0, G_OPTION_ARG_STRING, &targetBitrate, "Encoder parameter target-bitrate", targetBitrate},
87 | { "gop-length", 0, 0, G_OPTION_ARG_STRING, &gopLength, "Encoder parameter gop-length", "60"},
88 |
89 | { "profile", 0, 0, G_OPTION_ARG_STRING, &profile, "Encoder parameter profile.", NULL },
90 | { "level", 0, 0, G_OPTION_ARG_STRING, &level, "Encoder parameter level", NULL },
91 | { "tier", 0, 0, G_OPTION_ARG_STRING, &tier, "Encoder parameter tier", NULL },
92 |
93 | { "encodeEnhancedParam", 0, 0, G_OPTION_ARG_STRING, &encodeEnhancedParam, "String for fully customizing the encoder in the form \"param1=val1, param2=val2,...\", where paramn is the name of the encoder parameter", NULL },
94 |
95 | { NULL }
96 | };
97 |
98 | static gboolean
99 | my_bus_callback (GstBus * bus, GstMessage * message, gpointer data)
100 | {
101 | GMainLoop *loop = (GMainLoop *) data;
102 | switch (GST_MESSAGE_TYPE (message)) {
103 | case GST_MESSAGE_INFO:{
104 | GError *err;
105 | gchar *debug;
106 | gst_message_parse_info (message, &err, &debug);
107 | g_print ("Info: %s\n", debug);
108 | g_free(debug);
109 | g_error_free(err);
110 | break;
111 | }
112 | case GST_MESSAGE_EOS:
113 | g_print ("End of stream\n");
114 | g_main_loop_quit (loop);
115 | break;
116 | case GST_MESSAGE_ERROR:{
117 | GError *err;
118 | gchar *debug;
119 | gst_message_parse_error (message, &err, &debug);
120 | g_printerr ("Error: %s\n", debug);
121 | g_free(debug);
122 | g_error_free(err);
123 | break;
124 | }
125 | default:
126 | /* unhandled message */
127 | break;
128 | }
129 |
130 | return TRUE;
131 | }
132 |
133 |
134 |
135 |
136 |
137 | static std::string exec(const char* cmd) {
138 | std::array buffer;
139 | std::string result;
140 | std::unique_ptr pipe(popen(cmd, "r"), pclose);
141 | if (!pipe) {
142 | throw std::runtime_error("popen() failed!");
143 | }
144 | while (fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr) {
145 | result += buffer.data();
146 | }
147 | return result;
148 | }
149 |
150 | static std::vector GetIp()
151 | {
152 | std::string s = exec("ifconfig | grep 'inet ' | sed 's/.*inet *\\([^ ]*\\).*/\\1/'");
153 |
154 | std::vector rarray;
155 | std::size_t pos;
156 | while ((pos = s.find("\n")) != std::string::npos) {
157 | std::string token = s.substr(0, pos);
158 | if (token != "127.0.0.1") {
159 | rarray.push_back(token);
160 | }
161 | s.erase(0, pos + std::string("\n").length());
162 | }
163 |
164 | return rarray;
165 | }
166 |
167 |
168 | static std::string FindMIPIDev()
169 | {
170 | glob_t globbuf;
171 |
172 | std::string dev("");
173 | glob("/dev/media*", 0, NULL, &globbuf);
174 | for (int i = 0; i < globbuf.gl_pathc; i++)
175 | {
176 | std::ostringstream cmd;
177 | cmd << "media-ctl -d " << globbuf.gl_pathv[i] << " -p | grep driver | grep xilinx-video | wc -l";
178 |
179 | std::string a = exec(cmd.str().c_str());
180 | a=a.substr(0, a.find("\n"));
181 | if ( a == std::string("1") )
182 | {
183 | dev = globbuf.gl_pathv[i];
184 | break;
185 | }
186 | }
187 | globfree(&globbuf);
188 | return dev;
189 | }
190 |
191 | static std::vector GetMonitorResolution(std::string& all)
192 | {
193 | all = exec("modetest -M xlnx -c| awk '/name refresh/ {f=1;next} /props:/{f=0;} f{print $2 \"@\" $3} '");
194 |
195 | std::string s = all;
196 | std::vector rarray;
197 | std::size_t pos;
198 | while ((pos = s.find("\n")) != std::string::npos) {
199 | std::string token = s.substr(0, pos);
200 | rarray.push_back(token);
201 | s.erase(0, pos + std::string("\n").length());
202 | }
203 |
204 | return rarray;
205 | }
206 |
207 | static int CheckMIPISrc()
208 | {
209 | mipidev = FindMIPIDev();
210 | if (mipidev == "")
211 | {
212 | g_printerr("ERROR: MIPI device is not ready.\n%s", msgFirmware);
213 | return 1;
214 | }
215 | if ( access( mipidev.c_str(), F_OK ) != 0 )
216 | {
217 | g_printerr("ERROR: Device %s is not ready.\n%s", mipidev.c_str(), msgFirmware);
218 | return 1;
219 | }
220 | if( !(w == 1920 && h == 1080 ) && !(w == 3840 && h == 2160) )
221 | {
222 | g_printerr("ERROR: MIPI src resolution can only be:\n 1) 1920x1080@30\n 2) 3840x2160@30\n");
223 | return 1;
224 | }
225 | return 0;
226 | }
227 |
228 | static std::vector GetUSBRes(std::string video, std::string& all)
229 | {
230 | std::ostringstream cmd;
231 | cmd << "v4l2-ctl --list-formats-ext -d " << video << " | awk '/\\s*\\[/ {f=1;} /MJPG/ {f=0;} /\\s*\\[/ {next} f && /Size/{print s; s=\"\"; print $3;} END{print s} f && /Interval:/{s=s $4 $5}' | awk 'NF' | sed 's/\\((\\|)(\\|)\\)/ /g' ";
232 | all = exec(cmd.str().c_str());
233 | std::string s = all;
234 | std::vector rarray;
235 | std::size_t pos;
236 | while ((pos = s.find("\n")) != std::string::npos) {
237 | std::string token = s.substr(0, pos);
238 | rarray.push_back(token);
239 | s.erase(0, pos + std::string("\n").length());
240 | }
241 |
242 | return rarray;
243 | }
244 |
245 |
246 | static std::string GetUSBVideoDevFromMedia(std::string media)
247 | {
248 | std::ostringstream cmd;
249 | cmd << "media-ctl -d " << media << " -p | awk '/^driver\\s*uvcvideo/ {u=1} /device node name *\\/dev\\/video/ {x=$4;f=1;next} u&&f&&/pad0: Sink/ {print x; x=\"\"} f {f=0} '";
250 |
251 | std::string s = exec(cmd.str().c_str());
252 |
253 | std::vector rarray;
254 | std::size_t pos;
255 | while ((pos = s.find("\n")) != std::string::npos) {
256 | std::string token = s.substr(0, pos);
257 | rarray.push_back(token);
258 | s.erase(0, pos + std::string("\n").length());
259 | }
260 | if (rarray.size() > 0)
261 | return rarray[0];
262 | else
263 | return "";
264 | }
265 |
266 | static std::string FindUSBDev()
267 | {
268 | glob_t globbuf;
269 |
270 | std::string video("");
271 | std::string medialist("");
272 | int num = 0;
273 | glob("/dev/media*", 0, NULL, &globbuf);
274 | for (int i = 0; i < globbuf.gl_pathc; i++)
275 | {
276 | std::string tmp = GetUSBVideoDevFromMedia(globbuf.gl_pathv[i]);
277 | if (tmp != "")
278 | {
279 | video = tmp;
280 | medialist += "\n";
281 | medialist += globbuf.gl_pathv[i];
282 | num++;
283 | }
284 | }
285 | if (num > 1)
286 | {
287 | g_printerr("ERROR: More than 1 USB cam, please choose one: %s\n", medialist.c_str());
288 | video = "";
289 | }
290 | else if (num == 0)
291 | {
292 | g_printerr("ERROR: No USB camera found.\n");
293 | }
294 | else
295 | {
296 | g_print("INFO: 1 USB camera found: %s\n", medialist.c_str() );
297 | }
298 | return video;
299 | }
300 |
301 | static int CheckUSBSrc()
302 | {
303 | if (usb < 0)
304 | {
305 | usbvideo = FindUSBDev();
306 | if (usbvideo == "")
307 | {
308 | return 1;
309 | }
310 | }
311 | else
312 | {
313 | std::ostringstream media;
314 | media << "/dev/media" << usb;
315 | if ( access( media.str().c_str(), F_OK ) != 0 )
316 | {
317 | g_printerr("ERROR: Device %s is not ready.\n", media.str().c_str());
318 | return 1;
319 | }
320 |
321 | usbvideo = GetUSBVideoDevFromMedia(media.str());
322 | if (usbvideo == "") {
323 | g_printerr("ERROR: Device %s is not USB cam.\n", media.str().c_str());
324 | return 1;
325 | }
326 | }
327 |
328 |
329 | std::string allres;
330 | std::vector resV = GetUSBRes(usbvideo, allres);
331 | std::ostringstream inputRes;
332 | inputRes << w << "x" << h;
333 | bool match = false;
334 | for (int i = 0; i < resV.size(); i+=2)
335 | {
336 | if ( resV[i] == inputRes.str() )
337 | {
338 | match = true;
339 | }
340 | }
341 | if (!match)
342 | {
343 | g_printerr ("Error: USB camera doesn't support resolution %s\nAll supported resolution:\n%s\n", inputRes.str().c_str(), allres.c_str());
344 | return 1;
345 | }
346 |
347 | return 0;
348 | }
349 |
350 | static int CheckCoexistSrc()
351 | {
352 | std::string given("");
353 | std::string msg("");
354 | bool checkMipi = false, checkUsb = false;
355 | if ( filename )
356 | {
357 | given = "File is given by -f, ";
358 | }
359 |
360 | if ( mipi )
361 | {
362 | if (given.size() > 0)
363 | {
364 | msg = "mipi src is ignored.";
365 | }
366 | else
367 | {
368 | given = "MIPI is given by -m, ";
369 | checkMipi = true;
370 | }
371 | }
372 |
373 | if ( usb > -2 )
374 | {
375 | if (given.size() > 0)
376 | {
377 | msg = "USB src is ignored.";
378 | }
379 | else
380 | {
381 | checkUsb = true;
382 | given = "MIPI is given by -m, ";
383 | }
384 | }
385 | if (msg.size() > 0)
386 | {
387 | g_print("WARNING: %s\n", msg.c_str());
388 | }
389 |
390 | if ( checkMipi )
391 | {
392 | return CheckMIPISrc();
393 | }
394 | if ( checkUsb )
395 | {
396 | return CheckUSBSrc();
397 | }
398 | return 0;
399 | }
400 |
401 | int
402 | main (int argc, char *argv[])
403 | {
404 | char* pathVar = std::getenv("PATH");
405 | std::string setPath = std::string("PATH=") + std::string(pathVar) + ":/usr/sbin:/sbin";
406 | putenv((char*)setPath.c_str());
407 |
408 | GMainLoop *loop;
409 | GstRTSPServer *server;
410 | GstRTSPMountPoints *mounts;
411 | GstRTSPMediaFactory *factory;
412 | GstRTSPSessionPool *session;
413 | GOptionContext *optctx;
414 | GError *error = NULL;
415 | guint busWatchId;
416 |
417 | session = gst_rtsp_session_pool_new();
418 | gst_rtsp_session_pool_set_max_sessions (session, 255);
419 |
420 |
421 | optctx = g_option_context_new ("- Application for facedetion detction on SoM board of Xilinx.");
422 | g_option_context_add_main_entries (optctx, entries, NULL);
423 | g_option_context_add_group (optctx, gst_init_get_option_group ());
424 | if (!g_option_context_parse (optctx, &argc, &argv, &error)) {
425 | g_printerr ("Error parsing options: %s\n", error->message);
426 | g_option_context_free (optctx);
427 | g_clear_error (&error);
428 | return -1;
429 | }
430 | g_option_context_free (optctx);
431 |
432 | if (getuid() != 0)
433 | {
434 | g_printerr ("Please run with sudo.\n");
435 | return 1;
436 | }
437 |
438 | if (!filename && !mipi && usb <= -2)
439 | {
440 | g_printerr ("Error: No input is given by -m / -u / -f .\n");
441 | return 1;
442 | }
443 |
444 | if (filename && access( filename, F_OK ) != 0 )
445 | {
446 | g_printerr ("Error: File specified by -f doesn't exist: %s .\n", filename);
447 | return 1;
448 | }
449 |
450 | if (!(filename && nodet && std::string(target) =="rtsp" && std::string(infileType) == std::string(outMediaType)) && access("/dev/allegroDecodeIP", F_OK) != 0)
451 | {
452 | g_printerr("ERROR: VCU decoder is not ready.\n%s", msgFirmware);
453 | return 1;
454 | }
455 |
456 | if ( CheckCoexistSrc() != 0 )
457 | {
458 | return 1;
459 | }
460 |
461 | if (std::string(target) == "dp")
462 | {
463 | if (access( "/dev/dri/by-path/platform-fd4a0000.display-card", F_OK ) != 0 )
464 | {
465 | g_printerr ("Error: zynqmp-display device is not ready.\n%s", filename, msgFirmware);
466 | return 1;
467 | }
468 |
469 | std::string allres;
470 | std::vector resV = GetMonitorResolution(allres);
471 | std::ostringstream inputRes;
472 | inputRes << w << "x" << h;
473 | bool match = false;
474 | for (const auto &res : resV)
475 | {
476 | std::size_t pos = res.find("@");
477 | std::string wh = res.substr(0, pos);
478 | if ( wh == inputRes.str() )
479 | {
480 | match = true;
481 | }
482 | }
483 | if (!match)
484 | {
485 | g_printerr ("Error: Monitor doesn't support resolution %s\nAll supported resolution:\n%s\n", inputRes.str().c_str(), allres.c_str());
486 | return 1;
487 | }
488 | }
489 | else if (std::string(target) == "rtsp")
490 | {
491 | if ( !(filename && nodet && std::string(infileType) == std::string(outMediaType)) && access( "/dev/allegroIP", F_OK ) != 0 )
492 | {
493 | g_printerr("ERROR: VCU encoder is not ready.\n");
494 | return 1;
495 | }
496 | }
497 |
498 |
499 | loop = g_main_loop_new (NULL, FALSE);
500 |
501 | std::string confdir("/opt/xilinx/share/ivas/smartcam/");
502 | confdir += (aitask);
503 | char pip[2500];
504 | pip[0] = '\0';
505 |
506 | char *perf = (char*)"";
507 | if (reportFps)
508 | {
509 | perf = (char*)"! perf ";
510 | }
511 | if (screenfps)
512 | {
513 | setenv("SMARTCAM_SCREENFPS", "1", 1);
514 | }
515 |
516 | if (std::string(target) == "rtsp")
517 | {
518 | sprintf(pip + strlen(pip), "( ");
519 | }
520 | {
521 | if (filename) {
522 | sprintf(pip + strlen(pip),
523 | "%s location=%s ! %sparse ! queue ! omx%sdec ! video/x-raw, width=%d, height=%d, format=NV12, framerate=%d/1 ",
524 | (std::string(target) == "file") ? "filesrc" : "multifilesrc",
525 | filename, infileType, infileType, w, h, fr);
526 | } else if (mipidev != "") {
527 | sprintf(pip + strlen(pip),
528 | "mediasrcbin name=videosrc media-device=%s %s ! video/x-raw, width=%d, height=%d, format=NV12, framerate=%d/1 ", mipidev.c_str(), (w==1920 && h==1080 && std::string(target) == "dp" ? " v4l2src0::io-mode=dmabuf v4l2src0::stride-align=256" : ""), w, h, fr);
529 | } else if (usbvideo != "") {
530 | sprintf(pip + strlen(pip),
531 | "v4l2src name=videosrc device=%s io-mode=mmap %s ! video/x-raw, width=%d, height=%d ! videoconvert \
532 | ! video/x-raw, format=NV12",
533 | usbvideo.c_str(), (w==1920 && h==1080 && std::string(target) == "dp" ? "stride-align=256" : ""), w, h );
534 | }
535 |
536 | if (!nodet) {
537 | sprintf(pip + strlen(pip), " ! tee name=t \
538 | ! queue ! ivas_xmultisrc kconfig=\"%s/preprocess.json\" \
539 | ! queue ! ivas_xfilter kernels-config=\"%s/aiinference.json\" \
540 | ! ima.sink_master \
541 | ivas_xmetaaffixer name=ima ima.src_master ! fakesink \
542 | t. \
543 | ! queue max-size-buffers=1 leaky=%d ! ima.sink_slave_0 ima.src_slave_0 ! queue ! ivas_xfilter kernels-config=\"%s/drawresult.json\" ",
544 | confdir.c_str(),
545 | confdir.c_str(),
546 | filename? 0 : 2, confdir.c_str());
547 | }
548 | }
549 |
550 | if (std::string(target) == "rtsp")
551 | {
552 | /* create a server instance */
553 | server = gst_rtsp_server_new ();
554 | g_object_set (server, "service", port, NULL);
555 | mounts = gst_rtsp_server_get_mount_points (server);
556 | factory = gst_rtsp_media_factory_new ();
557 |
558 |
559 | if (filename && std::string(infileType) == std::string(outMediaType) && nodet)
560 | {
561 | sprintf(pip, "( multifilesrc location=%s ! %sparse ",
562 | filename, infileType, outMediaType
563 | );
564 | }
565 | else
566 | {
567 | sprintf(pip + strlen(pip), " \
568 | %s \
569 | ! queue ! omx%senc \
570 | qp-mode=%s \
571 | control-rate=%s %s%s gop-length=%s \
572 | %s \
573 | ! video/x-%s, alignment=au\
574 | %s%s %s%s %s%s \
575 | ",
576 | roiOff ? "" : " ! queue ! ivas_xroigen roi-type=1 roi-qp-delta=-10 roi-max-num=10 ",
577 | outMediaType,
578 | roiOff ? "auto" : "1",
579 | controlRate, targetBitrate?"target-bitrate=":"", targetBitrate?targetBitrate:"", gopLength,
580 | encodeEnhancedParam ? encodeEnhancedParam : "gop-mode=low-delay-p gdr-mode=horizontal cpb-size=200 num-slices=8 periodicity-idr=270 \
581 | initial-delay=100 filler-data=false min-qp=15 max-qp=40 b-frames=0 low-bandwidth=false ",
582 | outMediaType,
583 | profile ? ", profile=\\(string\\)" : "", profile ? profile : "", level ? ", level=\\(string\\)":"", level ? level : "", tier?", tier=\\(string\\)" : "", tier ? tier: ""
584 | );
585 | }
586 |
587 | std::string audioId = "";
588 | if (audio)
589 | {
590 | audioId = exec("arecord -l|grep xlnx-i2s-snd-card | awk '{print $2}' | sed 's/://'");
591 | std::size_t pos = audioId.find("\n");
592 | if (pos != std::string::npos) {
593 | audioId = audioId.substr(0,pos);
594 | } else {
595 | audioId = "";
596 | }
597 | }
598 |
599 | if (audio && audioId != "")
600 | {
601 | sprintf(pip + strlen(pip), " \
602 | ! queue ! mux. \
603 | alsasrc device=hw:%s,1 ! queue ! audio/x-raw,format=S24_32LE,rate=48000,channnels=2 \
604 | ! audioconvert ! faac ! mux. \
605 | mpegtsmux name=mux \
606 | ! rtpmp2tpay name=pay0 pt=33 )", audioId.c_str()
607 | );
608 | }
609 | else
610 | {
611 | sprintf(pip + strlen(pip), " \
612 | ! queue %s ! rtp%spay name=pay0 pt=96 )",
613 | perf, outMediaType);
614 | }
615 |
616 | gst_rtsp_media_factory_set_launch (factory, pip);
617 | gst_rtsp_media_factory_set_shared (factory, TRUE);
618 | gst_rtsp_mount_points_add_factory (mounts, "/test", factory);
619 |
620 | g_object_unref (mounts);
621 |
622 | /* attach the server to the default maincontext */
623 | gst_rtsp_server_attach (server, NULL);
624 |
625 | /* start serving */
626 | std::vector ips = GetIp();
627 | std::ostringstream addr("");
628 | for (auto&ip : ips)
629 | {
630 | addr << "rtsp://" << ip << ":" << port << "/test\n";
631 | }
632 | g_print ("stream ready at:\n %s", addr.str().c_str());
633 | g_main_loop_run (loop);
634 | }
635 | else
636 | {
637 | if (std::string(target) == "file")
638 | {
639 | sprintf(pip + strlen(pip), "\
640 | %s \
641 | ! queue ! omx%senc \
642 | qp-mode=%s \
643 | control-rate=%s %s%s gop-length=%s \
644 | %s \
645 | ! video/x-%s, alignment=au\
646 | %s%s %s%s %s%s \
647 | %s \
648 | ! filesink location=./out.%s async=false",
649 | roiOff ? "" : " ! queue ! ivas_xroigen roi-type=1 roi-qp-delta=-10 roi-max-num=10 ",
650 | outMediaType,
651 | roiOff ? "auto" : "1",
652 | controlRate, targetBitrate?"target-bitrate=":"", targetBitrate?targetBitrate:"", gopLength,
653 | encodeEnhancedParam ? encodeEnhancedParam : "gop-mode=low-delay-p gdr-mode=horizontal cpb-size=200 num-slices=8 periodicity-idr=270 \
654 | initial-delay=100 filler-data=false min-qp=15 max-qp=40 b-frames=0 low-bandwidth=false ",
655 | outMediaType,
656 | profile ? ", profile=(string)" : "", profile ? profile : "", level ? ", level=(string)":"", level ? level : "", tier?"tier=(string)" : "", tier ? tier: "",
657 | perf,
658 | outMediaType);
659 | }
660 | else if (std::string(target) == "dp")
661 | {
662 | sprintf(pip + strlen(pip), "\
663 | ! queue %s ! kmssink driver-name=xlnx plane-id=39 sync=%s fullscreen-overlay=true", perf, filename? "true" : "false");
664 | }
665 |
666 | GstElement *pipeline = gst_parse_launch(pip, NULL);
667 | gst_element_set_state (pipeline, GST_STATE_PLAYING);
668 | /* Wait until error or EOS */
669 | GstBus *bus = gst_element_get_bus (pipeline);
670 | busWatchId = gst_bus_add_watch (bus, my_bus_callback, loop);
671 | g_main_loop_run (loop);
672 |
673 | g_print("Output file is out.%s, please play with your favorite media player, such as VLC, ffplay, etc. to see the video with %s AI results.\n",
674 | outMediaType, nodet ? "no" : aitask);
675 | gst_object_unref (bus);
676 | gst_element_set_state (pipeline, GST_STATE_NULL);
677 | gst_object_unref (pipeline);
678 | g_source_remove (busWatchId);
679 | g_main_loop_unref (loop);
680 | }
681 | return 0;
682 | }
683 |
--------------------------------------------------------------------------------