├── CLA.md ├── LICENSE.md ├── README.md ├── common ├── FPS.py ├── __init__.py ├── __pycache__ │ ├── FPS.cpython-36.pyc │ ├── __init__.cpython-36.pyc │ ├── bus_call.cpython-36.pyc │ └── is_aarch_64.cpython-36.pyc ├── bus_call.py ├── is_aarch_64.py └── utils.py ├── config_files ├── color_labels.txt ├── dstest1_pgie_config.txt ├── dstest2_pgie_config.txt ├── dstest2_sgie1_config.txt ├── dstest2_sgie2_config.txt ├── dstest2_sgie3_config.txt ├── dstest2_tracker_config.txt ├── dstest_imagedata_config.txt ├── libnvds_mot_klt.so ├── make_labels.txt ├── object_labels.txt ├── tracker_config.yml └── type_labels.txt ├── docker ├── README.md ├── docker_build.sh ├── docker_run.sh ├── dockerfile.ros.eloquent.deepstream └── packages │ └── ros_entrypoint.sh ├── images ├── .gitkeep ├── DS_publisher.PNG ├── blue_bmw_sedan.png ├── blue_sedan.png └── two_stream.png ├── multi_stream_pkg ├── multi_stream_pkg │ ├── __init__.py │ ├── multi_stream.py │ └── multi_stream_class.py ├── package.xml ├── resource │ └── multi_stream_pkg ├── setup.cfg ├── setup.py └── test │ ├── test_copyright.py │ ├── test_flake8.py │ └── test_pep257.py ├── single_stream_pkg ├── package.xml ├── resource │ └── single_stream_pkg ├── setup.cfg ├── setup.py ├── single_stream_pkg │ ├── __init__.py │ ├── single_stream.py │ └── single_stream_class.py └── test │ ├── test_copyright.py │ ├── test_flake8.py │ └── test_pep257.py └── subscriber_pkg ├── package.xml ├── resource └── subscriber_pkg ├── setup.cfg ├── setup.py ├── subscriber_pkg ├── __init__.py ├── sub_classification.py ├── sub_detection.py ├── sub_multi_classification.py └── sub_multi_detection.py └── test ├── test_copyright.py ├── test_flake8.py └── test_pep257.py /CLA.md: -------------------------------------------------------------------------------- 1 | ## Individual Contributor License Agreement (CLA) 2 | 3 | **Thank you for submitting your contributions to this project.** 4 | 5 | By signing this CLA, you agree that the following terms apply to all of your past, present and future contributions 6 | to the project. 7 | 8 | ### License. 9 | 10 | You hereby represent that all present, past and future contributions are governed by the 11 | [MIT License](https://opensource.org/licenses/MIT) 12 | copyright statement. 13 | 14 | This entails that to the extent possible under law, you transfer all copyright and related or neighboring rights 15 | of the code or documents you contribute to the project itself or its maintainers. 16 | Furthermore you also represent that you have the authority to perform the above waiver 17 | with respect to the entirety of you contributions. 18 | 19 | ### Moral Rights. 20 | 21 | To the fullest extent permitted under applicable law, you hereby waive, and agree not to 22 | assert, all of your “moral rights” in or relating to your contributions for the benefit of the project. 23 | 24 | ### Third Party Content. 25 | 26 | If your Contribution includes or is based on any source code, object code, bug fixes, configuration changes, tools, 27 | specifications, documentation, data, materials, feedback, information or other works of authorship that were not 28 | authored by you (“Third Party Content”) or if you are aware of any third party intellectual property or proprietary 29 | rights associated with your Contribution (“Third Party Rights”), 30 | then you agree to include with the submission of your Contribution full details respecting such Third Party 31 | Content and Third Party Rights, including, without limitation, identification of which aspects of your 32 | Contribution contain Third Party Content or are associated with Third Party Rights, the owner/author of the 33 | Third Party Content and Third Party Rights, where you obtained the Third Party Content, and any applicable 34 | third party license terms or restrictions respecting the Third Party Content and Third Party Rights. For greater 35 | certainty, the foregoing obligations respecting the identification of Third Party Content and Third Party Rights 36 | do not apply to any portion of a Project that is incorporated into your Contribution to that same Project. 37 | 38 | ### Representations. 39 | 40 | You represent that, other than the Third Party Content and Third Party Rights identified by 41 | you in accordance with this Agreement, you are the sole author of your Contributions and are legally entitled 42 | to grant the foregoing licenses and waivers in respect of your Contributions. If your Contributions were 43 | created in the course of your employment with your past or present employer(s), you represent that such 44 | employer(s) has authorized you to make your Contributions on behalf of such employer(s) or such employer 45 | (s) has waived all of their right, title or interest in or to your Contributions. 46 | 47 | ### Disclaimer. 48 | 49 | To the fullest extent permitted under applicable law, your Contributions are provided on an "as is" 50 | basis, without any warranties or conditions, express or implied, including, without limitation, any implied 51 | warranties or conditions of non-infringement, merchantability or fitness for a particular purpose. You are not 52 | required to provide support for your Contributions, except to the extent you desire to provide support. 53 | 54 | ### No Obligation. 55 | 56 | You acknowledge that the maintainers of this project are under no obligation to use or incorporate your contributions 57 | into the project. The decision to use or incorporate your contributions into the project will be made at the 58 | sole discretion of the maintainers or their authorized delegates. -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------ 2 | # This sample application is no longer maintained 3 | # ------------------------------------------------------ 4 | 5 | # DeepStream_ROS2 6 | 7 | ROS2 nodes for DeepStream applications. 8 | 9 | [NVIDIA Developer Blog](https://developer.nvidia.com/blog/implementing-robotics-applications-with-ros-2-and-ai-on-jetson-platform-2/) 10 | 11 | This work is based on sample applications from the [DeepStream Python Apps](https://github.com/NVIDIA-AI-IOT/deepstream_python_apps) project. The packages have been tested on NVIDIA Jetson AGX Xavier with Ubuntu 18.04, ROS Eloquent, DeepStream SDK 5.0 (or later) and TensorRT. The project accesses some files in the DeepStream 5.0 root location (/opt/nvidia/deepstream/deepstream/samples/). 12 | 13 | This project includes ROS2 publisher nodes which take a single/multiple video streams as input from webcam or from file: 14 | 15 | 1. **single_stream** node: This performs 2 inference tasks on a single video input: 16 | 17 | - **Object Detection:** Detects 4 classes of objects: Vehicle, Person, Road Sign, Two wheeler. 18 | - Output of this inference is published on topic '**infer_detection**'. 19 | 20 | - **Attribute Classification:** For objects of class 'Vehicle', 3 categories of atrributes are identified: color, make and type. 21 | - Output of this inference is published on topic '**infer_classification**'. 22 | 23 | 2. **multi_stream** node: This takes multiple video files as input, performs the same inference tasks and publishes to topics **multi_detection** and **multi_classification**. 24 | 25 | Sample ROS2 subscriber nodes have also been provided in **subscriber_pkg**, subscribing to the following topics: 26 | 27 | | Node | Topic | 28 | | ------ | ------ | 29 | | sub_detection | infer_detection | 30 | | sub_classification | infer_classification | 31 | | sub_multi_detection | multi_detection | 32 | | sub_multi_classification | multi_classification | 33 | 34 | ## Prerequisites 35 | 36 | Ubuntu 18.04 37 | 38 | Python 3.6 39 | 40 | [DeepStream SDK 5.0](https://developer.nvidia.com/deepstream-getting-started) or later 41 | 42 | NumPy 43 | 44 | OpenCV 45 | 46 | [vision_msgs](https://github.com/Kukanani/vision_msgs/tree/ros2) 47 | 48 | [Gst Python](https://gstreamer.freedesktop.org/modules/gst-python.html) v1.14.5 (should already be installed on Jetson) 49 | 50 | If missing, install using following commands: 51 | 52 | `sudo apt update` 53 | 54 | `sudo apt install python3-gi python3-dev python3-gst-1.0 -y` 55 | 56 | ## Running the ROS2 nodes 57 | 58 | 1. Clone this repo into the **src** folder inside your ROS2 workspace ([creating a ROS2 workspace](https://index.ros.org/doc/ros2/Tutorials/Workspace/Creating-A-Workspace/)) using the following command: 59 | 60 | `git clone https://github.com/NVIDIA-AI-IOT/ros2_deepstream.git` 61 | 62 | The directory structure should look like this: 63 | 64 | ```python 65 | . 66 | +- dev_ws 67 | +- src 68 | +- ros2_deepstream 69 | +- common 70 | +- config_files 71 | +- dstest1_pgie_config.txt (several other config files) 72 | +- single_stream_pkg 73 | +- multi_stream_pkg 74 | +- subscriber_pkg 75 | +- resource 76 | +- subscriber_pkg 77 | +- test 78 | +- package.xml 79 | +- setup.cfg 80 | +- setup.py 81 | ``` 82 | 83 | 2. To build the package, navigate back to your workspace and run the following: 84 | 85 | `colcon build` 86 | 87 | 3. Source your main ROS 2 installation: 88 | 89 | `source /opt/ros/eloquent/setup.bash` 90 | 91 | 4. Then, to source your workspace, run the following command from your workspace: 92 | 93 | `. install/setup.bash` 94 | 95 | 5. To run the **single_stream** publisher node, run the following command by specifying the **input_source**. This command will take some time to start and print log messages to the console. 96 | 97 | `ros2 run single_stream_pkg single_stream --ros-args -p input_source:="/dev/video0"` 98 | 99 | This project has been tested using a Logitech C270 usb webcam to capture camera stream as input. H.264/H.265 video streams can also be given as input as shown later in this repo. 100 | 101 | 6. To run the subscribers, open separate terminals, navigate to your ros workspace and repeat step 4 in each. 102 | 103 | **sub_detection** subscribes to output from detection inference. 104 | 105 | `ros2 run subscriber_pkg sub_detection` 106 | 107 | **sub_classification** subscribes to output from classification inference. 108 | 109 | `ros2 run subscriber_pkg sub_classification` 110 | 111 | To understand the application workflow better: 112 | 113 | ![alt text](images/DS_publisher.PNG "publisher") 114 | 115 | The pipeline uses a GStreamer **tee** element to branch out and perform different tasks after taking video input. In this example, we perform only two tasks but more tasks can be added to the pipeline easily. 116 | 117 | An example output: 118 | 119 | ![alt text](images/blue_bmw_sedan.png "sample output") 120 | 121 | Message received by the node subscribing to topic **infer_detection**: 122 | 123 | ``` 124 | [vision_msgs.msg.Detection2D(header=std_msgs.msg.Header(stamp=builtin_interfaces.msg.Time(sec=0, nanosec=0), frame_id=''), results=[vision_msgs.msg.ObjectHypothesisWithPose(id='Car', score=0.4975374639034271, pose=geometry_msgs.msg.PoseWithCovariance(pose=geometry_msgs.msg.Pose(position=geometry_msgs.msg.Point(x=0.0, y=0.0, z=0.0), orientation=geometry_msgs.msg.Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)), covariance=array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 125 | 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 126 | 0., 0.])))], bbox=vision_msgs.msg.BoundingBox2D(center=geometry_msgs.msg.Pose2D(x=733.5, y=70.3125, theta=0.0), size_x=627.0, size_y=303.75), source_img=sensor_msgs.msg.Image(header=std_msgs.msg.Header(stamp=builtin_interfaces.msg.Time(sec=0, nanosec=0), frame_id=''), height=0, width=0, encoding='', is_bigendian=0, step=0, data=[]), is_tracking=False, tracking_id='')] 127 | ``` 128 | 129 | The **infer_detection** topic publishes messages in the `vision_msgs` Detection2DArray type. 130 | 131 | Message received by the node subscribing to topic **infer_classification**: 132 | 133 | `[vision_msgs.msg.ObjectHypothesis(id='blue', score=0.9575958847999573), vision_msgs.msg.ObjectHypothesis(id='bmw', score=0.6080179214477539), vision_msgs.msg.ObjectHypothesis(id='sedan', score=0.8021238446235657)]` 134 | 135 | The **infer_classification** topic publishes messages in the `vision_msgs` Classification2D type. These messages contain information about the color, make and type of detected cars alongwith their confidence scores. 136 | 137 | ## Multi input publisher node 138 | 139 | For applications that take videos from multiple input sources, we have provided node **multi_stream**. This takes multiple H.264/H.265 video streams as input and performs inference (detection and classification). Output is published on topics **multi_detection** and **multi_classification** in Detection2DArray and Classification2D types respectively. 140 | 141 | Run the multi_stream publisher using the following command (check that workspace is sourced by following steps 3 and 4 above). This command will take some time to start and print log messages to the console. 142 | 143 | `ros2 run multi_stream_pkg multi_stream --ros-args -p input_sources:="['file://', 'file://']"` 144 | 145 | For instance, you can use some sample videos that come with the DeepStream installation: 146 | 147 | `ros2 run multi_stream_pkg multi_stream --ros-args -p input_sources:="['file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4', 'file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_qHD.mp4']"` 148 | 149 | The command above takes input from two sources. This can be modified to take input from one or more sources by specifying the input file names in the list **input_sources**. 150 | 151 | To run the sample subscribers, open separate terminals, navigate to your ros workspace and repeat step 4 above in each. 152 | 153 | **sub_multi_detection** subscribes to topic multi_detection. 154 | 155 | `ros2 run subscriber_pkg sub_multi_detection` 156 | 157 | **sub_multi_classification** subscribes to topic multi_classification. 158 | 159 | `ros2 run subscriber_pkg sub_multi_classification` 160 | 161 | An example output: 162 | 163 | ![alt text](images/two_stream.png "two stream output") 164 | 165 | ## Performance 166 | ``` 167 | Fps of stream 1 is 36.6 168 | Fps of stream 0 is 36.6 169 | 170 | Fps of stream 1 is 40.4 171 | Fps of stream 0 is 40.0 172 | ``` 173 | 174 | FPS with one input video source for **multi_stream** node was observed to be between 30-40; with two input sources was observed to be between 20-30; and with three sources was observed to be between 20-25 (with JAX in **MODE 15W**). 175 | 176 | To see the rate at which data is being published on topic **multi_detection**, open a separate terminal and source it (Step 3 above). Make sure the publisher node is running in another terminal and run the following command: 177 | 178 | `ros2 topic hz multi_detection` 179 | 180 | Replace **multi_detection** with **multi_classification** to see the publishing rate on topic multi_classification. 181 | 182 | Sample average rate for multi_detection: 75.96 183 | 184 | Sample average rate for inference: 46.599-118.048 185 | 186 | ## Contact Us 187 | Please let us know if you run into any issues [here](https://github.com/NVIDIA-AI-IOT/ros2_deepstream/issues). 188 | 189 | ## Related ROS 2 Projects 190 | - [ros2_torch_trt](https://github.com/NVIDIA-AI-IOT/ros2_torch_trt) : ROS 2 Real Time Classification and Detection 191 | - [ros2_jetson_stats](https://github.com/NVIDIA-AI-IOT/ros2_jetson_stats) : ROS 2 package for monitoring and controlling NVIDIA Jetson Platform resources 192 | - [ros2_trt_pose](https://github.com/NVIDIA-AI-IOT/ros2_trt_pose) : ROS 2 package for "trt_pose": real-time human pose estimation on NVIDIA Jetson Platform 193 | 194 | -------------------------------------------------------------------------------- /common/FPS.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import time 24 | start_time=time.time() 25 | frame_count=0 26 | 27 | class GETFPS: 28 | def __init__(self,stream_id): 29 | global start_time 30 | self.start_time=start_time 31 | self.is_first=True 32 | global frame_count 33 | self.frame_count=frame_count 34 | self.stream_id=stream_id 35 | def get_fps(self): 36 | end_time=time.time() 37 | if(self.is_first): 38 | self.start_time=end_time 39 | self.is_first=False 40 | if(end_time-self.start_time>5): 41 | print("**********************FPS*****************************************") 42 | print("Fps of stream",self.stream_id,"is ", float(self.frame_count)/5.0) 43 | self.frame_count=0 44 | self.start_time=end_time 45 | else: 46 | self.frame_count=self.frame_count+1 47 | def print_data(self): 48 | print('frame_count=',self.frame_count) 49 | print('start_time=',self.start_time) 50 | 51 | -------------------------------------------------------------------------------- /common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/common/__init__.py -------------------------------------------------------------------------------- /common/__pycache__/FPS.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/common/__pycache__/FPS.cpython-36.pyc -------------------------------------------------------------------------------- /common/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/common/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /common/__pycache__/bus_call.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/common/__pycache__/bus_call.cpython-36.pyc -------------------------------------------------------------------------------- /common/__pycache__/is_aarch_64.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/common/__pycache__/is_aarch_64.cpython-36.pyc -------------------------------------------------------------------------------- /common/bus_call.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import gi 24 | import sys 25 | gi.require_version('Gst', '1.0') 26 | from gi.repository import GObject, Gst 27 | def bus_call(bus, message, loop): 28 | t = message.type 29 | if t == Gst.MessageType.EOS: 30 | sys.stdout.write("End-of-stream\n") 31 | loop.quit() 32 | elif t==Gst.MessageType.WARNING: 33 | err, debug = message.parse_warning() 34 | sys.stderr.write("Warning: %s: %s\n" % (err, debug)) 35 | elif t == Gst.MessageType.ERROR: 36 | err, debug = message.parse_error() 37 | sys.stderr.write("Error: %s: %s\n" % (err, debug)) 38 | loop.quit() 39 | return True 40 | -------------------------------------------------------------------------------- /common/is_aarch_64.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import platform 24 | import sys 25 | 26 | 27 | def is_aarch64(): 28 | return platform.uname()[4] == 'aarch64' 29 | 30 | sys.path.append('/opt/nvidia/deepstream/deepstream/lib') 31 | -------------------------------------------------------------------------------- /common/utils.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import ctypes 24 | import sys 25 | sys.path.append('/opt/nvidia/deepstream/deepstream/lib') 26 | 27 | def long_to_int(l): 28 | value = ctypes.c_int(l & 0xffffffff).value 29 | return value 30 | -------------------------------------------------------------------------------- /config_files/color_labels.txt: -------------------------------------------------------------------------------- 1 | black;blue;brown;gold;green;grey;maroon;orange;red;silver;white;yellow 2 | -------------------------------------------------------------------------------- /config_files/dstest1_pgie_config.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # Following properties are mandatory when engine files are not specified: 24 | # int8-calib-file(Only in INT8) 25 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 26 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 27 | # ONNX: onnx-file 28 | # 29 | # Mandatory properties for detectors: 30 | # num-detected-classes 31 | # 32 | # Optional properties for detectors: 33 | # cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0) 34 | # custom-lib-path, 35 | # parse-bbox-func-name 36 | # 37 | # Mandatory properties for classifiers: 38 | # classifier-threshold, is-classifier 39 | # 40 | # Optional properties for classifiers: 41 | # classifier-async-mode(Secondary mode only, Default=false) 42 | # 43 | # Optional properties in secondary mode: 44 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 45 | # input-object-min-width, input-object-min-height, input-object-max-width, 46 | # input-object-max-height 47 | # 48 | # Following properties are always recommended: 49 | # batch-size(Default=1) 50 | # 51 | # Other optional properties: 52 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 53 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 54 | # mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary), 55 | # custom-lib-path, network-mode(Default=0 i.e FP32) 56 | # 57 | # The values in the config file are overridden by values set through GObject 58 | # properties. 59 | 60 | [property] 61 | gpu-id=0 62 | net-scale-factor=0.0039215697906911373 63 | model-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/resnet10.caffemodel 64 | proto-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/resnet10.prototxt 65 | labelfile-path=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/labels.txt 66 | 67 | int8-calib-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/cal_trt.bin 68 | 69 | force-implicit-batch-dim=1 70 | batch-size=1 71 | network-mode=1 72 | num-detected-classes=4 73 | interval=0 74 | gie-unique-id=1 75 | output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid 76 | ## 0=Group Rectangles, 1=DBSCAN, 2=NMS, 3 = None(No clustering) 77 | cluster-mode=1 78 | 79 | #scaling-filter=0 80 | #scaling-compute-hw=0 81 | 82 | [class-attrs-all] 83 | pre-cluster-threshold=0.2 84 | eps=0.2 85 | minBoxes=1 86 | group-threshold=1 87 | dbscan-min-score=0.95 88 | 89 | -------------------------------------------------------------------------------- /config_files/dstest2_pgie_config.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # Following properties are mandatory when engine files are not specified: 24 | # int8-calib-file(Only in INT8) 25 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 26 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 27 | # ONNX: onnx-file 28 | # 29 | # Mandatory properties for detectors: 30 | # num-detected-classes 31 | # 32 | # Optional properties for detectors: 33 | # cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0) 34 | # custom-lib-path, 35 | # parse-bbox-func-name 36 | # 37 | # Mandatory properties for classifiers: 38 | # classifier-threshold, is-classifier 39 | # 40 | # Optional properties for classifiers: 41 | # classifier-async-mode(Secondary mode only, Default=false) 42 | # 43 | # Optional properties in secondary mode: 44 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 45 | # input-object-min-width, input-object-min-height, input-object-max-width, 46 | # input-object-max-height 47 | # 48 | # Following properties are always recommended: 49 | # batch-size(Default=1) 50 | # 51 | # Other optional properties: 52 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 53 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 54 | # mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary), 55 | # custom-lib-path, network-mode(Default=0 i.e FP32) 56 | # 57 | # The values in the config file are overridden by values set through GObject 58 | # properties. 59 | 60 | [property] 61 | gpu-id=0 62 | net-scale-factor=0.0039215697906911373 63 | model-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/resnet10.caffemodel 64 | proto-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/resnet10.prototxt 65 | labelfile-path=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/labels.txt 66 | int8-calib-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/cal_trt.bin 67 | force-implicit-batch-dim=1 68 | batch-size=1 69 | network-mode=1 70 | process-mode=1 71 | model-color-format=0 72 | num-detected-classes=4 73 | interval=0 74 | gie-unique-id=1 75 | output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid 76 | ## 0=Group Rectangles, 1=DBSCAN, 2=NMS, 3 = None(No clustering) 77 | cluster-mode=1 78 | 79 | #scaling-filter=0 80 | #scaling-compute-hw=0 81 | 82 | [class-attrs-all] 83 | pre-cluster-threshold=0.2 84 | eps=0.2 85 | group-threshold=1 86 | minBoxes=1 87 | dbscan-min-score=0.95 88 | -------------------------------------------------------------------------------- /config_files/dstest2_sgie1_config.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # Following properties are mandatory when engine files are not specified: 24 | # int8-calib-file(Only in INT8) 25 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 26 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 27 | # ONNX: onnx-file 28 | # 29 | # Mandatory properties for detectors: 30 | # num-detected-classes 31 | # 32 | # Optional properties for detectors: 33 | # cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0) 34 | # custom-lib-path, 35 | # parse-bbox-func-name 36 | # 37 | # Mandatory properties for classifiers: 38 | # classifier-threshold, is-classifier 39 | # 40 | # Optional properties for classifiers: 41 | # classifier-async-mode(Secondary mode only, Default=false) 42 | # 43 | # Optional properties in secondary mode: 44 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 45 | # input-object-min-width, input-object-min-height, input-object-max-width, 46 | # input-object-max-height 47 | # 48 | # Following properties are always recommended: 49 | # batch-size(Default=1) 50 | # 51 | # Other optional properties: 52 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 53 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 54 | # mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary), 55 | # custom-lib-path, network-mode(Default=0 i.e FP32) 56 | # 57 | # The values in the config file are overridden by values set through GObject 58 | # properties. 59 | 60 | [property] 61 | gpu-id=0 62 | net-scale-factor=1 63 | model-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarColor/resnet18.caffemodel 64 | proto-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarColor/resnet18.prototxt 65 | mean-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarColor/mean.ppm 66 | labelfile-path=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarColor/labels.txt 67 | int8-calib-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarColor/cal_trt.bin 68 | force-implicit-batch-dim=1 69 | batch-size=16 70 | # 0=FP32 and 1=INT8 mode 71 | network-mode=1 72 | input-object-min-width=64 73 | input-object-min-height=64 74 | process-mode=2 75 | model-color-format=1 76 | gpu-id=0 77 | gie-unique-id=2 78 | operate-on-gie-id=1 79 | operate-on-class-ids=0 80 | is-classifier=1 81 | output-blob-names=predictions/Softmax 82 | classifier-async-mode=1 83 | classifier-threshold=0.51 84 | process-mode=2 85 | #scaling-filter=0 86 | #scaling-compute-hw=0 87 | -------------------------------------------------------------------------------- /config_files/dstest2_sgie2_config.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # Following properties are mandatory when engine files are not specified: 24 | # int8-calib-file(Only in INT8) 25 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 26 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 27 | # ONNX: onnx-file 28 | # 29 | # Mandatory properties for detectors: 30 | # num-detected-classes 31 | # 32 | # Optional properties for detectors: 33 | # cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0) 34 | # custom-lib-path, 35 | # parse-bbox-func-name 36 | # 37 | # Mandatory properties for classifiers: 38 | # classifier-threshold, is-classifier 39 | # 40 | # Optional properties for classifiers: 41 | # classifier-async-mode(Secondary mode only, Default=false) 42 | # 43 | # Optional properties in secondary mode: 44 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 45 | # input-object-min-width, input-object-min-height, input-object-max-width, 46 | # input-object-max-height 47 | # 48 | # Following properties are always recommended: 49 | # batch-size(Default=1) 50 | # 51 | # Other optional properties: 52 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 53 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 54 | # mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary), 55 | # custom-lib-path, network-mode(Default=0 i.e FP32) 56 | # 57 | # The values in the config file are overridden by values set through GObject 58 | # properties. 59 | 60 | [property] 61 | gpu-id=0 62 | net-scale-factor=1 63 | model-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarMake/resnet18.caffemodel 64 | proto-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarMake/resnet18.prototxt 65 | mean-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarMake/mean.ppm 66 | labelfile-path=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarMake/labels.txt 67 | int8-calib-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarMake/cal_trt.bin 68 | force-implicit-batch-dim=1 69 | batch-size=16 70 | # 0=FP32 and 1=INT8 mode 71 | network-mode=1 72 | input-object-min-width=64 73 | input-object-min-height=64 74 | process-mode=2 75 | model-color-format=1 76 | gpu-id=0 77 | gie-unique-id=3 78 | operate-on-gie-id=1 79 | operate-on-class-ids=0 80 | is-classifier=1 81 | output-blob-names=predictions/Softmax 82 | classifier-async-mode=1 83 | classifier-threshold=0.51 84 | process-mode=2 85 | #scaling-filter=0 86 | #scaling-compute-hw=0 87 | -------------------------------------------------------------------------------- /config_files/dstest2_sgie3_config.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # Following properties are mandatory when engine files are not specified: 24 | # int8-calib-file(Only in INT8) 25 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 26 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 27 | # ONNX: onnx-file 28 | # 29 | # Mandatory properties for detectors: 30 | # num-detected-classes 31 | # 32 | # Optional properties for detectors: 33 | # cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0) 34 | # custom-lib-path, 35 | # parse-bbox-func-name 36 | # 37 | # Mandatory properties for classifiers: 38 | # classifier-threshold, is-classifier 39 | # 40 | # Optional properties for classifiers: 41 | # classifier-async-mode(Secondary mode only, Default=false) 42 | # 43 | # Optional properties in secondary mode: 44 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 45 | # input-object-min-width, input-object-min-height, input-object-max-width, 46 | # input-object-max-height 47 | # 48 | # Following properties are always recommended: 49 | # batch-size(Default=1) 50 | # 51 | # Other optional properties: 52 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 53 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 54 | # mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary), 55 | # custom-lib-path, network-mode(Default=0 i.e FP32) 56 | # 57 | # The values in the config file are overridden by values set through GObject 58 | # properties. 59 | 60 | [property] 61 | gpu-id=0 62 | net-scale-factor=1 63 | model-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_VehicleTypes/resnet18.caffemodel 64 | proto-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_VehicleTypes/resnet18.prototxt 65 | mean-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_VehicleTypes/mean.ppm 66 | labelfile-path=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_VehicleTypes/labels.txt 67 | int8-calib-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_VehicleTypes/cal_trt.bin 68 | force-implicit-batch-dim=1 69 | batch-size=16 70 | # 0=FP32 and 1=INT8 mode 71 | network-mode=1 72 | input-object-min-width=64 73 | input-object-min-height=64 74 | model-color-format=1 75 | process-mode=2 76 | gpu-id=0 77 | gie-unique-id=4 78 | operate-on-gie-id=1 79 | operate-on-class-ids=0 80 | is-classifier=1 81 | output-blob-names=predictions/Softmax 82 | classifier-async-mode=1 83 | classifier-threshold=0.51 84 | process-mode=2 85 | #scaling-filter=0 86 | #scaling-compute-hw=0 87 | -------------------------------------------------------------------------------- /config_files/dstest2_tracker_config.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # Mandatory properties for the tracker: 24 | # tracker-width 25 | # tracker-height: needs to be multiple of 6 for NvDCF 26 | # gpu-id 27 | # ll-lib-file: path to low-level tracker lib 28 | # ll-config-file: required for NvDCF, optional for KLT and IOU 29 | # 30 | [tracker] 31 | tracker-width=640 32 | tracker-height=384 33 | gpu-id=0 34 | ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_mot_klt.so 35 | #ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so 36 | 37 | #ll-config-file=tracker_config.yml 38 | enable-batch-process=1 39 | -------------------------------------------------------------------------------- /config_files/dstest_imagedata_config.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # Following properties are mandatory when engine files are not specified: 24 | # int8-calib-file(Only in INT8) 25 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 26 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 27 | # ONNX: onnx-file 28 | # 29 | # Mandatory properties for detectors: 30 | # num-detected-classes 31 | # 32 | # Optional properties for detectors: 33 | # cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0) 34 | # custom-lib-path 35 | # parse-bbox-func-name 36 | # 37 | # Mandatory properties for classifiers: 38 | # classifier-threshold, is-classifier 39 | # 40 | # Optional properties for classifiers: 41 | # classifier-async-mode(Secondary mode only, Default=false) 42 | # 43 | # Optional properties in secondary mode: 44 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 45 | # input-object-min-width, input-object-min-height, input-object-max-width, 46 | # input-object-max-height 47 | # 48 | # Following properties are always recommended: 49 | # batch-size(Default=1) 50 | # 51 | # Other optional properties: 52 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 53 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 54 | # mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary), 55 | # custom-lib-path, network-mode(Default=0 i.e FP32) 56 | # 57 | # The values in the config file are overridden by values set through GObject 58 | # properties. 59 | 60 | [property] 61 | gpu-id=0 62 | net-scale-factor=0.0039215697906911373 63 | model-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/resnet10.caffemodel 64 | proto-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/resnet10.prototxt 65 | labelfile-path=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/labels.txt 66 | int8-calib-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/cal_trt.bin 67 | force-implicit-batch-dim=1 68 | batch-size=1 69 | process-mode=1 70 | model-color-format=0 71 | network-mode=1 72 | num-detected-classes=4 73 | interval=0 74 | gie-unique-id=1 75 | output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid 76 | ## 0=Group Rectangles, 1=DBSCAN, 2=NMS, 3 = None(No clustering) 77 | cluster-mode=1 78 | 79 | [class-attrs-all] 80 | threshold=0.2 81 | eps=0.7 82 | minBoxes=1 83 | 84 | #Use the config params below for dbscan clustering mode 85 | [class-attrs-all] 86 | detected-min-w=4 87 | detected-min-h=4 88 | minBoxes=3 89 | 90 | ## Per class configurations 91 | [class-attrs-0] 92 | pre-cluster-threshold=0.05 93 | eps=0.7 94 | dbscan-min-score=0.95 95 | 96 | [class-attrs-1] 97 | pre-cluster-threshold=0.05 98 | eps=0.7 99 | dbscan-min-score=0.5 100 | 101 | [class-attrs-2] 102 | pre-cluster-threshold=0.1 103 | eps=0.6 104 | dbscan-min-score=0.95 105 | 106 | [class-attrs-3] 107 | pre-cluster-threshold=0.05 108 | eps=0.7 109 | dbscan-min-score=0.5 110 | -------------------------------------------------------------------------------- /config_files/libnvds_mot_klt.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/config_files/libnvds_mot_klt.so -------------------------------------------------------------------------------- /config_files/make_labels.txt: -------------------------------------------------------------------------------- 1 | acura;audi;bmw;chevrolet;chrysler;dodge;ford;gmc;honda;hyundai;infiniti;jeep;kia;lexus;mazda;mercedes;nissan;subaru;toyota;volkswagen 2 | -------------------------------------------------------------------------------- /config_files/object_labels.txt: -------------------------------------------------------------------------------- 1 | Car;Bicycle;Person;Roadsign 2 | -------------------------------------------------------------------------------- /config_files/tracker_config.yml: -------------------------------------------------------------------------------- 1 | %YAML:1.0 2 | 3 | NvDCF: 4 | useColorNames: 0 # use ColorNames feature 5 | useHog: 1 # use Histogram-of-Oriented-Gradient (HOG) feature 6 | useHighPrecisionFeature: 0 # Use high-precision in feature extraction. Default is [true] 7 | useUniqueID: 0 # Use 64-bit long Unique ID when assignining tracker ID. Default is [false] 8 | 9 | maxTargetsPerStream: 99 # Max number of targets to track per stream. Recommended to set >10. Note: this value should account for the targets being tracked in shadow mode as well. Max value depends on the GPU memory capacity 10 | 11 | filterLr: 0.075 #11 #11 #0.175 #0.11 # learning rate for DCF filter in exponential moving average. Valid Range: [0.0, 1.0] 12 | gaussianSigma: 0.75 #0.75 #0.75 #0.75 # Standard deviation for Gaussian for desired response when creating DCF filter 13 | 14 | minDetectorConfidence: 0.0 # If the confidence of a detector bbox is lower than this, then it won't be considered for tracking 15 | minTrackerConfidence: 0.7 # If the confidence of an object tracker is lower than this on the fly, then it will be tracked in shadow mode. Valid Range: [0.0, 1.0] 16 | minTargetBboxSize: 5 # If the width or height of the bbox size gets smaller than this threshold, the target will be terminated 17 | 18 | featureImgSizeLevel: 2 # Size of a feature image. Valid range: {1, 2, 3, 4, 5}, from the smallest to the largest 19 | SearchRegionPaddingScale: 1 # Search region size. Determines how large the search region should be scaled from the target bbox. Valid range: {1, 2, 3}, from the smallest to the largest 20 | 21 | maxShadowTrackingAge: 30 # Max length of shadow tracking (the shadow tracking age is incremented when (1) there's detector input yet no match or (2) tracker confidence is lower than minTrackerConfidence). Once reached, the tracker will be terminated. 22 | probationAge: 3 # Once the tracker age (incremented at every frame) reaches this, the tracker is considered to be valid 23 | earlyTerminationAge: 1 # Early termination age (in terms of shadow tracking age) during the probation period 24 | 25 | # thresholds for data association 26 | minMatchingScore4Overall: 0.0 27 | minMatchingScore4Motion: 0.5 28 | minMatchingScore4Iou: 0.1 29 | minMatchingScore4VisualSimilarity: 0.2 30 | minTrackingConfidenceDuringInactive: 0.9 31 | 32 | matchingScoreWeight4VisualSimilarity: 0.8 # Weight for the visual similarity (in terms of correlation response ratio) 33 | matchingScoreWeight4Motion: 0.0 # Weight for the Size-similarity score 34 | matchingScoreWeight4Iou: 0.1 # Weight for the IOU score 35 | matchingScoreWeight4Age: 0.1 # Weight for the tracker age 36 | 37 | minDetectorBboxVisibilityTobeTracked: 0.0 38 | minVisibiilty4Tracking: 0.0 39 | 40 | bboxPaddingScaleForAssociation: 0.0 # Padding scale for bboxes when computing IOU for data association 41 | visibilityRoiFactor: 0.00 # Define the ROI of image where tracking and detection is considered to be valid. If visibilityRoiFactor = 0.05, it would shrink the ROI by 5% from the image boundary 42 | 43 | trackExponentialSmoothingLr_loc: 0.5 # Learning rate for new location 44 | trackExponentialSmoothingLr_scale: 0.3 # Learning rate for new scale 45 | trackExponentialSmoothingLr_velocity: 0.05 # Learning rate for new velocity 46 | -------------------------------------------------------------------------------- /config_files/type_labels.txt: -------------------------------------------------------------------------------- 1 | coupe;largevehicle;sedan;suv;truck;van 2 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | # Run ROS2-DeepStream in Docker 2 | 3 | For more Jetson dockers, please look at [jetson-containers](https://github.com/dusty-nv/jetson-containers) github repository. 4 | 5 | ## Docker Default Runtime 6 | 7 | To enable access to the CUDA compiler (nvcc) during `docker build` operations, add `"default-runtime": "nvidia"` to your `/etc/docker/daemon.json` configuration file before attempting to build the containers: 8 | 9 | ``` json 10 | { 11 | "runtimes": { 12 | "nvidia": { 13 | "path": "nvidia-container-runtime", 14 | "runtimeArgs": [] 15 | } 16 | }, 17 | 18 | "default-runtime": "nvidia" 19 | } 20 | ``` 21 | 22 | You will then want to restart the Docker service or reboot your system before proceeding. 23 | 24 | ## Building the Containers 25 | 26 | Run the following commands to build the dockerfile: 27 | 28 | `cp /etc/apt/trusted.gpg.d/jetson-ota-public.asc .` 29 | 30 | ``` sh docker_build.sh ```
31 | Once you sucessfully build, you will have a ros2-eloquent container with all necessary packages required for this repository.
32 | 33 | 34 | ## Run Container 35 | 36 | ``` sh docker_run.sh ```
37 | This will initialize docker. Clone this repository using following command and follow build and run instructions for ros2 package from here.
38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /docker/docker_build.sh: -------------------------------------------------------------------------------- 1 | sudo docker build -t ros2_deepstream_base:jp44 -f dockerfile.ros.eloquent.deepstream . 2 | -------------------------------------------------------------------------------- /docker/docker_run.sh: -------------------------------------------------------------------------------- 1 | sudo xhost +si:localuser:root 2 | sudo docker run -it --rm --net=host --runtime nvidia -e DISPLAY=$DISPLAY --device="/dev/video0:/dev/video0" -v /tmp/.X11-unix/:/tmp/.X11-unix/ -v ${pwd}:/workdir ros2_deepstream_base:jp44 3 | -------------------------------------------------------------------------------- /docker/dockerfile.ros.eloquent.deepstream: -------------------------------------------------------------------------------- 1 | # 2 | # this dockerfile roughly follows the 'Install ROS2 Via Debian Packages' from: 3 | # https://index.ros.org/doc/ros2/Installation/Eloquent/Linux-Install-Debians/ 4 | # 5 | ARG BASE_IMAGE=nvcr.io/nvidia/deepstream-l4t:5.0.1-20.09-samples 6 | FROM ${BASE_IMAGE} 7 | 8 | ARG ROS_PKG=ros_base 9 | ENV ROS_DISTRO=eloquent 10 | ENV ROS_ROOT=/opt/ros/${ROS_DISTRO} 11 | 12 | ENV DEBIAN_FRONTEND=noninteractive 13 | 14 | WORKDIR /workspace 15 | 16 | # change the locale from POSIX to UTF-8 17 | RUN locale-gen en_US en_US.UTF-8 && update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 18 | ENV LANG=en_US.UTF-8 19 | 20 | # add the ROS deb repo to the apt sources list 21 | RUN apt-get update && \ 22 | apt-get install -y --no-install-recommends \ 23 | git \ 24 | cmake \ 25 | build-essential \ 26 | curl \ 27 | wget \ 28 | gnupg2 \ 29 | lsb-release \ 30 | && rm -rf /var/lib/apt/lists/* 31 | 32 | RUN wget --no-check-certificate https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc && apt-key add ros.asc 33 | RUN sh -c 'echo "deb [arch=$(dpkg --print-architecture)] http://packages.ros.org/ros2/ubuntu $(lsb_release -cs) main" > /etc/apt/sources.list.d/ros2-latest.list' 34 | 35 | # install ROS packages 36 | RUN apt-get update && \ 37 | apt-get install -y --no-install-recommends \ 38 | ros-eloquent-ros-base \ 39 | ros-eloquent-launch-xml \ 40 | ros-eloquent-launch-yaml \ 41 | ros-eloquent-vision-msgs \ 42 | ros-eloquent-image-tools \ 43 | libpython3-dev \ 44 | python3-colcon-common-extensions \ 45 | python3-rosdep \ 46 | && rm -rf /var/lib/apt/lists/* 47 | 48 | # init/update rosdep 49 | RUN apt-get update && \ 50 | cd ${ROS_ROOT} && \ 51 | rosdep init && \ 52 | rosdep update && \ 53 | rm -rf /var/lib/apt/lists/* 54 | 55 | # compile yaml-cpp-0.6, which some ROS packages may use (but is not in the 18.04 apt repo) 56 | RUN git clone --branch yaml-cpp-0.6.0 https://github.com/jbeder/yaml-cpp yaml-cpp-0.6 && \ 57 | cd yaml-cpp-0.6 && \ 58 | mkdir build && \ 59 | cd build && \ 60 | cmake -DBUILD_SHARED_LIBS=ON .. && \ 61 | make -j$(nproc) && \ 62 | cp libyaml-cpp.so.0.6.0 /usr/lib/aarch64-linux-gnu/ && \ 63 | ln -s /usr/lib/aarch64-linux-gnu/libyaml-cpp.so.0.6.0 /usr/lib/aarch64-linux-gnu/libyaml-cpp.so.0.6 64 | 65 | # setup entrypoint 66 | COPY ./packages/ros_entrypoint.sh /ros_entrypoint.sh 67 | RUN echo 'source ${ROS_ROOT}/setup.bash' >> /root/.bashrc 68 | RUN chmod +x /ros_entrypoint.sh 69 | ENTRYPOINT ["/ros_entrypoint.sh"] 70 | CMD ["bash"] 71 | WORKDIR / 72 | 73 | # 74 | # install OpenCV (with GStreamer support) 75 | # 76 | COPY jetson-ota-public.asc /etc/apt/trusted.gpg.d/jetson-ota-public.asc 77 | 78 | RUN echo "deb https://repo.download.nvidia.com/jetson/common r32.4 main" > /etc/apt/sources.list.d/nvidia-l4t-apt-source.list && \ 79 | apt-get update && \ 80 | apt-get install -y --no-install-recommends \ 81 | libopencv-python \ 82 | && rm /etc/apt/sources.list.d/nvidia-l4t-apt-source.list \ 83 | && rm -rf /var/lib/apt/lists/* 84 | 85 | # 86 | # PyCUDA 87 | # 88 | ENV PATH="/usr/local/cuda/bin:${PATH}" 89 | ENV LD_LIBRARY_PATH="/usr/local/cuda/lib64:${LD_LIBRARY_PATH}" 90 | RUN echo "$PATH" && echo "$LD_LIBRARY_PATH" 91 | 92 | RUN apt-get update && apt-get install -y python3 python3-pip 93 | RUN pip3 install pycuda --verbose 94 | 95 | # 96 | # Gst Python 97 | # 98 | RUN apt update && apt install python3-gi python3-dev python3-gst-1.0 -y 99 | -------------------------------------------------------------------------------- /docker/packages/ros_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # setup ros2 environment 5 | source "/opt/ros/$ROS_DISTRO/setup.bash" 6 | exec "$@" 7 | 8 | -------------------------------------------------------------------------------- /images/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/images/.gitkeep -------------------------------------------------------------------------------- /images/DS_publisher.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/images/DS_publisher.PNG -------------------------------------------------------------------------------- /images/blue_bmw_sedan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/images/blue_bmw_sedan.png -------------------------------------------------------------------------------- /images/blue_sedan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/images/blue_sedan.png -------------------------------------------------------------------------------- /images/two_stream.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/images/two_stream.png -------------------------------------------------------------------------------- /multi_stream_pkg/multi_stream_pkg/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/multi_stream_pkg/multi_stream_pkg/__init__.py -------------------------------------------------------------------------------- /multi_stream_pkg/multi_stream_pkg/multi_stream.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import rclpy 24 | from multi_stream_pkg.multi_stream_class import InferencePublisher 25 | 26 | def main(args=None): 27 | rclpy.init(args=args) 28 | 29 | inference_publisher = InferencePublisher() 30 | 31 | inference_publisher.start_pipeline() 32 | 33 | # Destroy the node explicitly 34 | # (optional - otherwise it will be done automatically 35 | # when the garbage collector destroys the node object) 36 | inference_publisher.destroy_node() 37 | rclpy.shutdown() 38 | 39 | if __name__ == '__main__': 40 | main() 41 | 42 | -------------------------------------------------------------------------------- /multi_stream_pkg/multi_stream_pkg/multi_stream_class.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # This node performs detection and classification inference on multiple input video files and publishes results to topics multi_detection and multi_classification respectively 24 | 25 | # Required ROS2 imports 26 | import rclpy 27 | from rclpy.node import Node 28 | from std_msgs.msg import String 29 | from vision_msgs.msg import Classification2D, ObjectHypothesis, ObjectHypothesisWithPose, BoundingBox2D, Detection2D, Detection2DArray 30 | 31 | import os 32 | import sys 33 | sys.path.append('/opt/nvidia/deepstream/deepstream/lib') 34 | import platform 35 | import configparser 36 | 37 | import gi 38 | gi.require_version('Gst', '1.0') 39 | from gi.repository import GObject, Gst 40 | 41 | import pyds 42 | 43 | sys.path.insert(0, './src/ros2_deepstream') 44 | from common.is_aarch_64 import is_aarch64 45 | from common.bus_call import bus_call 46 | from common.FPS import GETFPS 47 | 48 | from ctypes import * 49 | import time 50 | import math 51 | import numpy as np 52 | import cv2 53 | import os 54 | fps_streams = {} 55 | frame_count = {} 56 | saved_count = {} 57 | 58 | MAX_DISPLAY_LEN=64 59 | MUXER_OUTPUT_WIDTH=1920 60 | MUXER_OUTPUT_HEIGHT=1080 61 | MUXER_BATCH_TIMEOUT_USEC=4000000 62 | TILED_OUTPUT_WIDTH=1920 63 | TILED_OUTPUT_HEIGHT=1080 64 | GST_CAPS_FEATURES_NVMM="memory:NVMM" 65 | pgie_classes_str= ["Vehicle", "TwoWheeler", "Person","RoadSign"] 66 | 67 | 68 | PGIE_CLASS_ID_VEHICLE = 0 69 | PGIE_CLASS_ID_BICYCLE = 1 70 | PGIE_CLASS_ID_PERSON = 2 71 | PGIE_CLASS_ID_ROADSIGN = 3 72 | 73 | location = os.getcwd() + "/src/ros2_deepstream/config_files/" 74 | class_obj = (open(location+'object_labels.txt').readline().rstrip('\n')).split(';') 75 | 76 | class_color = (open(location+'color_labels.txt').readline().rstrip('\n')).split(';') 77 | 78 | class_make = (open(location+'make_labels.txt').readline().rstrip('\n')).split(';') 79 | 80 | class_type = (open(location+'type_labels.txt').readline().rstrip('\n')).split(';') 81 | 82 | 83 | class InferencePublisher(Node): 84 | # tiler_sink_pad_buffer_probe will extract metadata received on tiler src pad 85 | # and update params for drawing rectangle, object information etc. 86 | def tiler_sink_pad_buffer_probe(self,pad,info,u_data): 87 | frame_number=0 88 | num_rects=0 89 | gst_buffer = info.get_buffer() 90 | if not gst_buffer: 91 | print("Unable to get GstBuffer ") 92 | return 93 | 94 | # Retrieve batch metadata from the gst_buffer 95 | # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the 96 | # C address of gst_buffer as input, which is obtained with hash(gst_buffer) 97 | batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) 98 | 99 | l_frame = batch_meta.frame_meta_list 100 | while l_frame is not None: 101 | try: 102 | # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta 103 | # The casting is done by pyds.NvDsFrameMeta.cast() 104 | # The casting also keeps ownership of the underlying memory 105 | # in the C code, so the Python garbage collector will leave 106 | # it alone. 107 | frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) 108 | except StopIteration: 109 | break 110 | 111 | frame_number=frame_meta.frame_num 112 | l_obj=frame_meta.obj_meta_list 113 | num_rects = frame_meta.num_obj_meta 114 | is_first_obj = True 115 | save_image = False 116 | obj_counter = { 117 | PGIE_CLASS_ID_VEHICLE:0, 118 | PGIE_CLASS_ID_BICYCLE:0, 119 | PGIE_CLASS_ID_PERSON:0, 120 | PGIE_CLASS_ID_ROADSIGN:0 121 | } 122 | 123 | 124 | # Message for output of detection inference 125 | msg = Detection2DArray() 126 | while l_obj is not None: 127 | try: 128 | # Casting l_obj.data to pyds.NvDsObjectMeta 129 | obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data) 130 | l_classifier = obj_meta.classifier_meta_list 131 | # If object is a car (class ID 0), perform attribute classification 132 | if obj_meta.class_id == 0 and l_classifier is not None: 133 | # Creating and publishing message with output of classification inference 134 | msg2 = Classification2D() 135 | 136 | while l_classifier is not None: 137 | result = ObjectHypothesis() 138 | try: 139 | classifier_meta = pyds.glist_get_nvds_classifier_meta(l_classifier.data) 140 | except StopIteration: 141 | print('Could not parse MetaData: ') 142 | break 143 | 144 | classifier_id = classifier_meta.unique_component_id 145 | l_label = classifier_meta.label_info_list 146 | label_info = pyds.glist_get_nvds_label_info(l_label.data) 147 | classifier_class = label_info.result_class_id 148 | 149 | if classifier_id == 2: 150 | result.id = class_color[classifier_class] 151 | elif classifier_id == 3: 152 | result.id = class_make[classifier_class] 153 | else: 154 | result.id = class_type[classifier_class] 155 | 156 | result.score = label_info.result_prob 157 | msg2.results.append(result) 158 | l_classifier = l_classifier.next 159 | 160 | self.publisher_classification.publish(msg2) 161 | 162 | except StopIteration: 163 | break 164 | 165 | obj_counter[obj_meta.class_id] += 1 166 | 167 | # Creating message for output of detection inference 168 | result = ObjectHypothesisWithPose() 169 | result.id = str(class_obj[obj_meta.class_id]) 170 | result.score = obj_meta.confidence 171 | 172 | left = obj_meta.rect_params.left 173 | top = obj_meta.rect_params.top 174 | width = obj_meta.rect_params.width 175 | height = obj_meta.rect_params.height 176 | bounding_box = BoundingBox2D() 177 | bounding_box.center.x = float(left + (width/2)) 178 | bounding_box.center.y = float(top - (height/2)) 179 | bounding_box.size_x = width 180 | bounding_box.size_y = height 181 | 182 | detection = Detection2D() 183 | detection.results.append(result) 184 | detection.bbox = bounding_box 185 | msg.detections.append(detection) 186 | 187 | 188 | # Periodically check for objects with borderline confidence value that may be false positive detections. 189 | # If such detections are found, annotate the frame with bboxes and confidence value. 190 | # Save the annotated frame to file. 191 | if((saved_count["stream_"+str(frame_meta.pad_index)]%30==0) and (obj_meta.confidence>0.3 and obj_meta.confidence<0.31)): 192 | if is_first_obj: 193 | is_first_obj = False 194 | # Getting Image data using nvbufsurface 195 | # the input should be address of buffer and batch_id 196 | n_frame=pyds.get_nvds_buf_surface(hash(gst_buffer),frame_meta.batch_id) 197 | #convert python array into numy array format. 198 | frame_image=np.array(n_frame,copy=True,order='C') 199 | #covert the array into cv2 default color format 200 | frame_image=cv2.cvtColor(frame_image,cv2.COLOR_RGBA2BGRA) 201 | 202 | save_image = True 203 | frame_image=draw_bounding_boxes(frame_image,obj_meta,obj_meta.confidence) 204 | try: 205 | l_obj=l_obj.next 206 | except StopIteration: 207 | break 208 | 209 | 210 | # Get frame rate through this probe 211 | fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() 212 | 213 | # Publishing message with output of detection inference 214 | self.publisher_detection.publish(msg) 215 | 216 | 217 | if save_image: 218 | cv2.imwrite(folder_name+"/stream_"+str(frame_meta.pad_index)+"/frame_"+str(frame_number)+".jpg",frame_image) 219 | saved_count["stream_"+str(frame_meta.pad_index)]+=1 220 | try: 221 | l_frame=l_frame.next 222 | except StopIteration: 223 | break 224 | 225 | return Gst.PadProbeReturn.OK 226 | 227 | def draw_bounding_boxes(image,obj_meta,confidence): 228 | confidence='{0:.2f}'.format(confidence) 229 | rect_params=obj_meta.rect_params 230 | top=int(rect_params.top) 231 | left=int(rect_params.left) 232 | width=int(rect_params.width) 233 | height=int(rect_params.height) 234 | obj_name=pgie_classes_str[obj_meta.class_id] 235 | image=cv2.rectangle(image,(left,top),(left+width,top+height),(0,0,255,0),2) 236 | # Note that on some systems cv2.putText erroneously draws horizontal lines across the image 237 | image=cv2.putText(image,obj_name+',C='+str(confidence),(left-10,top-10),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255,0),2) 238 | return image 239 | 240 | def cb_newpad(self, decodebin, decoder_src_pad,data): 241 | print("In cb_newpad\n") 242 | caps=decoder_src_pad.get_current_caps() 243 | gststruct=caps.get_structure(0) 244 | gstname=gststruct.get_name() 245 | source_bin=data 246 | features=caps.get_features(0) 247 | 248 | # Need to check if the pad created by the decodebin is for video and not 249 | # audio. 250 | if(gstname.find("video")!=-1): 251 | # Link the decodebin pad only if decodebin has picked nvidia 252 | # decoder plugin nvdec_*. We do this by checking if the pad caps contain 253 | # NVMM memory features. 254 | if features.contains("memory:NVMM"): 255 | # Get the source bin ghost pad 256 | bin_ghost_pad=source_bin.get_static_pad("src") 257 | if not bin_ghost_pad.set_target(decoder_src_pad): 258 | sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n") 259 | else: 260 | sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n") 261 | 262 | def decodebin_child_added(self,child_proxy,Object,name,user_data): 263 | print("Decodebin child added:", name, "\n") 264 | if(name.find("decodebin") != -1): 265 | Object.connect("child-added",decodebin_child_added,user_data) 266 | if(is_aarch64() and name.find("nvv4l2decoder") != -1): 267 | print("Seting bufapi_version\n") 268 | Object.set_property("bufapi-version",True) 269 | 270 | def create_source_bin(self,index,uri): 271 | print("Creating source bin") 272 | 273 | # Create a source GstBin to abstract this bin's content from the rest of the 274 | # pipeline 275 | bin_name="source-bin-%02d" %index 276 | print(bin_name) 277 | nbin=Gst.Bin.new(bin_name) 278 | if not nbin: 279 | sys.stderr.write(" Unable to create source bin \n") 280 | 281 | # Source element for reading from the uri. 282 | # We will use decodebin and let it figure out the container format of the 283 | # stream and the codec and plug the appropriate demux and decode plugins. 284 | uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin") 285 | if not uri_decode_bin: 286 | sys.stderr.write(" Unable to create uri decode bin \n") 287 | # We set the input uri to the source element 288 | uri_decode_bin.set_property("uri",uri) 289 | # Connect to the "pad-added" signal of the decodebin which generates a 290 | # callback once a new pad for raw data has beed created by the decodebin 291 | uri_decode_bin.connect("pad-added",self.cb_newpad,nbin) 292 | uri_decode_bin.connect("child-added",self.decodebin_child_added,nbin) 293 | 294 | # We need to create a ghost pad for the source bin which will act as a proxy 295 | # for the video decoder src pad. The ghost pad will not have a target right 296 | # now. Once the decode bin creates the video decoder and generates the 297 | # cb_newpad callback, we will set the ghost pad target to the video decoder 298 | # src pad. 299 | Gst.Bin.add(nbin,uri_decode_bin) 300 | bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC)) 301 | if not bin_pad: 302 | sys.stderr.write(" Failed to add ghost pad in source bin \n") 303 | return None 304 | return nbin 305 | 306 | 307 | def __init__(self): 308 | super().__init__('inference_publisher') 309 | 310 | self.declare_parameter('input_sources') 311 | input_sources = self.get_parameter('input_sources').value 312 | number_sources = len(input_sources) 313 | 314 | for i in range(number_sources): 315 | fps_streams["stream{0}".format(i)]=GETFPS(i) 316 | 317 | 318 | self.publisher_detection = self.create_publisher(Detection2DArray, 'multi_detection', 10) 319 | 320 | self.publisher_classification = self.create_publisher(Classification2D, 'multi_classification', 10) 321 | 322 | # Standard GStreamer initialization 323 | GObject.threads_init() 324 | Gst.init(None) 325 | 326 | # Create gstreamer elements 327 | # Create Pipeline element that will form a connection of other elements 328 | print("Creating Pipeline \n ") 329 | self.pipeline = Gst.Pipeline() 330 | if not self.pipeline: 331 | sys.stderr.write(" Unable to create Pipeline \n") 332 | 333 | print("Creating streamux \n ") 334 | # Create nvstreammux instance to form batches from one or more sources. 335 | streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") 336 | if not streammux: 337 | sys.stderr.write(" Unable to create NvStreamMux \n") 338 | self.pipeline.add(streammux) 339 | 340 | 341 | for i in range(number_sources): 342 | frame_count["stream_"+str(i)]=0 343 | saved_count["stream_"+str(i)]=0 344 | print("Creating source_bin ",i," \n ") 345 | uri_name=input_sources[i] 346 | if uri_name.find("rtsp://") == 0 : 347 | is_live = True 348 | source_bin=self.create_source_bin(i, uri_name) 349 | if not source_bin: 350 | sys.stderr.write("Unable to create source bin \n") 351 | self.pipeline.add(source_bin) 352 | padname="sink_%u" %i 353 | sinkpad= streammux.get_request_pad(padname) 354 | if not sinkpad: 355 | sys.stderr.write("Unable to create sink pad bin \n") 356 | srcpad=source_bin.get_static_pad("src") 357 | if not srcpad: 358 | sys.stderr.write("Unable to create src pad bin \n") 359 | srcpad.link(sinkpad) 360 | 361 | 362 | # Use nvinfer to run inferencing on decoder's output, 363 | # behaviour of inferencing is set through config file 364 | pgie = Gst.ElementFactory.make("nvinfer", "primary-inference1") 365 | if not pgie: 366 | sys.stderr.write(" Unable to create pgie1 \n") 367 | 368 | tracker = Gst.ElementFactory.make("nvtracker", "tracker") 369 | if not tracker: 370 | sys.stderr.write(" Unable to create tracker \n") 371 | 372 | sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") 373 | if not sgie1: 374 | sys.stderr.write(" Unable to make sgie1 \n") 375 | 376 | sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") 377 | if not sgie1: 378 | sys.stderr.write(" Unable to make sgie2 \n") 379 | 380 | sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") 381 | if not sgie3: 382 | sys.stderr.write(" Unable to make sgie3 \n") 383 | 384 | pgie2 = Gst.ElementFactory.make("nvinfer", "primary-inference2") 385 | if not pgie2: 386 | sys.stderr.write(" Unable to create pgie2 \n") 387 | 388 | nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") 389 | if not nvvidconv1: 390 | sys.stderr.write(" Unable to create nvvidconv1 \n") 391 | 392 | print("Creating filter1 \n ") 393 | caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") 394 | filter1 = Gst.ElementFactory.make("capsfilter", "filter1") 395 | if not filter1: 396 | sys.stderr.write(" Unable to get the caps filter1 \n") 397 | filter1.set_property("caps", caps1) 398 | 399 | print("Creating tiler1 \n ") 400 | tiler1=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler1") 401 | if not tiler1: 402 | sys.stderr.write(" Unable to create tiler1 \n") 403 | 404 | print("Creating nvvidconv_1 \n ") 405 | nvvidconv_1 = Gst.ElementFactory.make("nvvideoconvert", "convertor_1") 406 | if not nvvidconv_1: 407 | sys.stderr.write(" Unable to create nvvidconv_1 \n") 408 | 409 | nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "convertor2") 410 | if not nvvidconv2: 411 | sys.stderr.write(" Unable to create nvvidconv2 \n") 412 | 413 | caps2 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") 414 | filter2 = Gst.ElementFactory.make("capsfilter", "filter2") 415 | if not filter2: 416 | sys.stderr.write(" Unable to get the caps filter2 \n") 417 | filter2.set_property("caps", caps2) 418 | 419 | print("Creating tiler2 \n ") 420 | tiler2=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler2") 421 | if not tiler2: 422 | sys.stderr.write(" Unable to create tiler2 \n") 423 | 424 | print("Creating nvvidconv_2 \n ") 425 | nvvidconv_2 = Gst.ElementFactory.make("nvvideoconvert", "convertor_2") 426 | if not nvvidconv_2: 427 | sys.stderr.write(" Unable to create nvvidconv_2 \n") 428 | 429 | 430 | 431 | # Create OSD to draw on the converted RGBA buffer 432 | nvosd1 = Gst.ElementFactory.make("nvdsosd", "onscreendisplay1") 433 | if not nvosd1: 434 | sys.stderr.write(" Unable to create nvosd1 \n") 435 | 436 | nvosd2 = Gst.ElementFactory.make("nvdsosd", "onscreendisplay2") 437 | if not nvosd2: 438 | sys.stderr.write(" Unable to create nvosd2 \n") 439 | 440 | # Finally render the osd output 441 | if is_aarch64(): 442 | transform1 = Gst.ElementFactory.make("nvegltransform", "nvegl-transform1") 443 | transform2 = Gst.ElementFactory.make("nvegltransform", "nvegl-transform2") 444 | 445 | print("Creating EGLSink \n") 446 | sink1 = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer1") 447 | if not sink1: 448 | sys.stderr.write(" Unable to create egl sink1 \n") 449 | 450 | sink2 = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer2") 451 | if not sink2: 452 | sys.stderr.write(" Unable to create egl sink2 \n") 453 | 454 | streammux.set_property('width', 1920) 455 | streammux.set_property('height', 1080) 456 | streammux.set_property('batch-size', number_sources) 457 | streammux.set_property('batched-push-timeout', 4000000) 458 | 459 | #Set properties of pgie and sgie 460 | location = os.getcwd() + "/src/ros2_deepstream/config_files/" 461 | pgie.set_property('config-file-path', location+"dstest2_pgie_config.txt") 462 | pgie_batch_size=pgie.get_property("batch-size") 463 | if(pgie_batch_size != number_sources): 464 | print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n") 465 | pgie.set_property("batch-size",number_sources) 466 | 467 | sgie1.set_property('config-file-path', location+"dstest2_sgie1_config.txt") 468 | sgie2.set_property('config-file-path', location+"dstest2_sgie2_config.txt") 469 | sgie3.set_property('config-file-path', location+"dstest2_sgie3_config.txt") 470 | pgie2.set_property('config-file-path', location+"dstest1_pgie_config.txt") 471 | sink1.set_property('sync', False) 472 | sink2.set_property('sync', False) 473 | 474 | 475 | tiler_rows=int(math.sqrt(number_sources)) 476 | tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows)) 477 | tiler1.set_property("rows",tiler_rows) 478 | tiler1.set_property("columns",tiler_columns) 479 | tiler1.set_property("width", TILED_OUTPUT_WIDTH) 480 | tiler1.set_property("height", TILED_OUTPUT_HEIGHT) 481 | 482 | tiler2.set_property("rows",tiler_rows) 483 | tiler2.set_property("columns",tiler_columns) 484 | tiler2.set_property("width", TILED_OUTPUT_WIDTH) 485 | tiler2.set_property("height", TILED_OUTPUT_HEIGHT) 486 | 487 | #Set properties of tracker 488 | config = configparser.ConfigParser() 489 | config.read(location+'dstest2_tracker_config.txt') 490 | config.sections() 491 | 492 | for key in config['tracker']: 493 | if key == 'tracker-width' : 494 | tracker_width = config.getint('tracker', key) 495 | tracker.set_property('tracker-width', tracker_width) 496 | if key == 'tracker-height' : 497 | tracker_height = config.getint('tracker', key) 498 | tracker.set_property('tracker-height', tracker_height) 499 | if key == 'gpu-id' : 500 | tracker_gpu_id = config.getint('tracker', key) 501 | tracker.set_property('gpu_id', tracker_gpu_id) 502 | if key == 'll-lib-file' : 503 | tracker_ll_lib_file = config.get('tracker', key) 504 | tracker.set_property('ll-lib-file', tracker_ll_lib_file) 505 | if key == 'll-config-file' : 506 | tracker_ll_config_file = config.get('tracker', key) 507 | tracker.set_property('ll-config-file', tracker_ll_config_file) 508 | if key == 'enable-batch-process' : 509 | tracker_enable_batch_process = config.getint('tracker', key) 510 | tracker.set_property('enable_batch_process', tracker_enable_batch_process) 511 | 512 | tee = Gst.ElementFactory.make('tee', 'tee') 513 | queue1 = Gst.ElementFactory.make('queue','infer1') 514 | queue2 = Gst.ElementFactory.make('queue','infer2') 515 | 516 | print("Adding elements to Pipeline \n") 517 | self.pipeline.add(pgie) 518 | self.pipeline.add(pgie2) 519 | self.pipeline.add(tracker) 520 | self.pipeline.add(sgie1) 521 | self.pipeline.add(sgie2) 522 | self.pipeline.add(sgie3) 523 | self.pipeline.add(nvvidconv1) 524 | self.pipeline.add(nvvidconv2) 525 | self.pipeline.add(nvosd1) 526 | self.pipeline.add(nvosd2) 527 | self.pipeline.add(sink1) 528 | self.pipeline.add(sink2) 529 | self.pipeline.add(tee) 530 | self.pipeline.add(queue1) 531 | self.pipeline.add(queue2) 532 | self.pipeline.add(tiler1) 533 | self.pipeline.add(tiler2) 534 | self.pipeline.add(filter1) 535 | self.pipeline.add(filter2) 536 | self.pipeline.add(nvvidconv_1) 537 | self.pipeline.add(nvvidconv_2) 538 | 539 | 540 | if is_aarch64(): 541 | self.pipeline.add(transform1) 542 | self.pipeline.add(transform2) 543 | 544 | # Link the elements together 545 | print("Linking elements in the Pipeline \n") 546 | streammux.link(tee) 547 | tee.link(queue1) 548 | tee.link(queue2) 549 | queue1.link(pgie) 550 | queue2.link(pgie2) 551 | pgie.link(tracker) 552 | tracker.link(sgie1) 553 | sgie1.link(sgie2) 554 | sgie2.link(sgie3) 555 | sgie3.link(nvvidconv1) 556 | nvvidconv1.link(filter1) 557 | filter1.link(tiler1) 558 | tiler1.link(nvvidconv_1) 559 | nvvidconv_1.link(nvosd1) 560 | 561 | pgie2.link(nvvidconv2) 562 | nvvidconv2.link(filter2) 563 | filter2.link(tiler2) 564 | tiler2.link(nvvidconv_2) 565 | nvvidconv_2.link(nvosd2) 566 | 567 | 568 | if is_aarch64(): 569 | nvosd1.link(transform1) 570 | transform1.link(sink1) 571 | nvosd2.link(transform2) 572 | transform2.link(sink2) 573 | else: 574 | nvosd1.link(sink1) 575 | nvosd2.link(sink2) 576 | 577 | 578 | # create and event loop and feed gstreamer bus mesages to it 579 | self.loop = GObject.MainLoop() 580 | bus = self.pipeline.get_bus() 581 | bus.add_signal_watch() 582 | bus.connect ("message", bus_call, self.loop) 583 | 584 | # Lets add probe to get informed of the meta data generated, we add probe to 585 | # the sink pad of the osd element, since by that time, the buffer would have 586 | # had got all the metadata. 587 | 588 | tiler_sink_pad_1=tiler1.get_static_pad("sink") 589 | if not tiler_sink_pad_1: 590 | sys.stderr.write(" Unable to get src pad \n") 591 | else: 592 | tiler_sink_pad_1.add_probe(Gst.PadProbeType.BUFFER, self.tiler_sink_pad_buffer_probe, 0) 593 | 594 | tiler_sink_pad_2=tiler2.get_static_pad("sink") 595 | if not tiler_sink_pad_2: 596 | sys.stderr.write(" Unable to get src pad \n") 597 | else: 598 | tiler_sink_pad_2.add_probe(Gst.PadProbeType.BUFFER, self.tiler_sink_pad_buffer_probe, 0) 599 | 600 | 601 | def start_pipeline(self): 602 | print("Starting pipeline \n") 603 | # start play back and listen to events 604 | self.pipeline.set_state(Gst.State.PLAYING) 605 | try: 606 | self.loop.run() 607 | except: 608 | pass 609 | # cleanup 610 | self.pipeline.set_state(Gst.State.NULL) 611 | -------------------------------------------------------------------------------- /multi_stream_pkg/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | multi_stream_pkg 5 | 0.0.0 6 | TODO: Package description 7 | asawaree 8 | TODO: License declaration 9 | 10 | ament_python 11 | 12 | ament_copyright 13 | ament_flake8 14 | ament_pep257 15 | python3-pytest 16 | 17 | 18 | ament_python 19 | 20 | 21 | -------------------------------------------------------------------------------- /multi_stream_pkg/resource/multi_stream_pkg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/multi_stream_pkg/resource/multi_stream_pkg -------------------------------------------------------------------------------- /multi_stream_pkg/setup.cfg: -------------------------------------------------------------------------------- 1 | [develop] 2 | script-dir=$base/lib/multi_stream_pkg 3 | [install] 4 | install-scripts=$base/lib/multi_stream_pkg 5 | -------------------------------------------------------------------------------- /multi_stream_pkg/setup.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | from setuptools import setup 24 | 25 | package_name = 'multi_stream_pkg' 26 | 27 | setup( 28 | name=package_name, 29 | version='0.0.0', 30 | packages=[package_name], 31 | data_files=[ 32 | ('share/ament_index/resource_index/packages', 33 | ['resource/' + package_name]), 34 | ('share/' + package_name, ['package.xml']), 35 | ], 36 | install_requires=['setuptools'], 37 | zip_safe=True, 38 | maintainer='asawaree', 39 | maintainer_email='asawareeb@nvidia.com', 40 | description='ROS2 publisher for multiple input streams', 41 | license='MIT License', 42 | tests_require=['pytest'], 43 | entry_points={ 44 | 'console_scripts': [ 45 | 'multi_stream = multi_stream_pkg.multi_stream:main' 46 | ], 47 | }, 48 | ) 49 | -------------------------------------------------------------------------------- /multi_stream_pkg/test/test_copyright.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Open Source Robotics Foundation, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from ament_copyright.main import main 16 | import pytest 17 | 18 | 19 | @pytest.mark.copyright 20 | @pytest.mark.linter 21 | def test_copyright(): 22 | rc = main(argv=['.', 'test']) 23 | assert rc == 0, 'Found errors' 24 | -------------------------------------------------------------------------------- /multi_stream_pkg/test/test_flake8.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 Open Source Robotics Foundation, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from ament_flake8.main import main 16 | import pytest 17 | 18 | 19 | @pytest.mark.flake8 20 | @pytest.mark.linter 21 | def test_flake8(): 22 | rc = main(argv=[]) 23 | assert rc == 0, 'Found errors' 24 | -------------------------------------------------------------------------------- /multi_stream_pkg/test/test_pep257.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Open Source Robotics Foundation, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from ament_pep257.main import main 16 | import pytest 17 | 18 | 19 | @pytest.mark.linter 20 | @pytest.mark.pep257 21 | def test_pep257(): 22 | rc = main(argv=['.', 'test']) 23 | assert rc == 0, 'Found code style errors / warnings' 24 | -------------------------------------------------------------------------------- /single_stream_pkg/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | single_stream_pkg 5 | 0.0.0 6 | TODO: Package description 7 | asawaree 8 | TODO: License declaration 9 | 10 | ament_python 11 | 12 | ament_copyright 13 | ament_flake8 14 | ament_pep257 15 | python3-pytest 16 | 17 | 18 | ament_python 19 | 20 | 21 | -------------------------------------------------------------------------------- /single_stream_pkg/resource/single_stream_pkg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/single_stream_pkg/resource/single_stream_pkg -------------------------------------------------------------------------------- /single_stream_pkg/setup.cfg: -------------------------------------------------------------------------------- 1 | [develop] 2 | script-dir=$base/lib/single_stream_pkg 3 | [install] 4 | install-scripts=$base/lib/single_stream_pkg 5 | -------------------------------------------------------------------------------- /single_stream_pkg/setup.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | from setuptools import setup 24 | 25 | package_name = 'single_stream_pkg' 26 | 27 | setup( 28 | name=package_name, 29 | version='0.0.0', 30 | packages=[package_name], 31 | data_files=[ 32 | ('share/ament_index/resource_index/packages', 33 | ['resource/' + package_name]), 34 | ('share/' + package_name, ['package.xml']), 35 | ], 36 | install_requires=['setuptools'], 37 | zip_safe=True, 38 | maintainer='asawaree', 39 | maintainer_email='asawareeb@nvidia.com', 40 | description='DeepStream detection and classification publisher for single input stream', 41 | license='MIT License', 42 | tests_require=['pytest'], 43 | entry_points={ 44 | 'console_scripts': [ 45 | 'single_stream = single_stream_pkg.single_stream:main' 46 | ], 47 | }, 48 | ) 49 | -------------------------------------------------------------------------------- /single_stream_pkg/single_stream_pkg/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/single_stream_pkg/single_stream_pkg/__init__.py -------------------------------------------------------------------------------- /single_stream_pkg/single_stream_pkg/single_stream.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import rclpy 24 | from single_stream_pkg.single_stream_class import InferencePublisher 25 | 26 | def main(args=None): 27 | rclpy.init(args=args) 28 | 29 | inference_publisher = InferencePublisher() 30 | 31 | inference_publisher.start_pipeline() 32 | 33 | # Destroy the node explicitly 34 | # (optional - otherwise it will be done automatically 35 | # when the garbage collector destroys the node object) 36 | inference_publisher.destroy_node() 37 | rclpy.shutdown() 38 | 39 | if __name__ == '__main__': 40 | main() 41 | -------------------------------------------------------------------------------- /single_stream_pkg/single_stream_pkg/single_stream_class.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # This node performs detection and classification inference on a single input stream and publishes results to topics infer_detection and infer_classification respectively 24 | 25 | # Required ROS2 imports 26 | import rclpy 27 | from rclpy.node import Node 28 | from std_msgs.msg import String 29 | from vision_msgs.msg import Classification2D, ObjectHypothesis, ObjectHypothesisWithPose, BoundingBox2D, Detection2D, Detection2DArray 30 | 31 | import os 32 | import sys 33 | sys.path.append('/opt/nvidia/deepstream/deepstream/lib') 34 | import platform 35 | import configparser 36 | 37 | import gi 38 | gi.require_version('Gst', '1.0') 39 | from gi.repository import GObject, Gst 40 | 41 | import pyds 42 | 43 | sys.path.insert(0, './src/ros2_deepstream') 44 | from common.is_aarch_64 import is_aarch64 45 | from common.bus_call import bus_call 46 | from common.FPS import GETFPS 47 | 48 | PGIE_CLASS_ID_VEHICLE = 0 49 | PGIE_CLASS_ID_BICYCLE = 1 50 | PGIE_CLASS_ID_PERSON = 2 51 | PGIE_CLASS_ID_ROADSIGN = 3 52 | 53 | location = os.getcwd() + "/src/ros2_deepstream/config_files/" 54 | class_obj = (open(location+'object_labels.txt').readline().rstrip('\n')).split(';') 55 | 56 | class_color = (open(location+'color_labels.txt').readline().rstrip('\n')).split(';') 57 | 58 | class_make = (open(location+'make_labels.txt').readline().rstrip('\n')).split(';') 59 | 60 | class_type = (open(location+'type_labels.txt').readline().rstrip('\n')).split(';') 61 | 62 | class InferencePublisher(Node): 63 | def osd_sink_pad_buffer_probe(self,pad,info,u_data): 64 | frame_number=0 65 | #Intializing object counter with 0. 66 | obj_counter = { 67 | PGIE_CLASS_ID_VEHICLE:0, 68 | PGIE_CLASS_ID_BICYCLE:0, 69 | PGIE_CLASS_ID_PERSON:0, 70 | PGIE_CLASS_ID_ROADSIGN:0 71 | } 72 | 73 | 74 | num_rects=0 75 | 76 | gst_buffer = info.get_buffer() 77 | if not gst_buffer: 78 | print("Unable to get GstBuffer ") 79 | return 80 | 81 | # Retrieve batch metadata from the gst_buffer 82 | # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the 83 | # C address of gst_buffer as input, which is obtained with hash(gst_buffer) 84 | batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) 85 | l_frame = batch_meta.frame_meta_list 86 | while l_frame is not None: 87 | try: 88 | # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta 89 | # The casting is done by pyds.NvDsFrameMeta.cast() 90 | # The casting also keeps ownership of the underlying memory 91 | # in the C code, so the Python garbage collector will leave 92 | # it alone. 93 | frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) 94 | except StopIteration: 95 | break 96 | 97 | frame_number=frame_meta.frame_num 98 | num_rects = frame_meta.num_obj_meta 99 | l_obj=frame_meta.obj_meta_list 100 | 101 | # Message for output of detection inference 102 | msg = Detection2DArray() 103 | while l_obj is not None: 104 | try: 105 | # Casting l_obj.data to pyds.NvDsObjectMeta 106 | obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data) 107 | l_classifier = obj_meta.classifier_meta_list 108 | 109 | # If object is a car (class ID 0), perform attribute classification 110 | if obj_meta.class_id == 0 and l_classifier is not None: 111 | # Creating and publishing message with output of classification inference 112 | msg2 = Classification2D() 113 | 114 | while l_classifier is not None: 115 | result = ObjectHypothesis() 116 | try: 117 | classifier_meta = pyds.glist_get_nvds_classifier_meta(l_classifier.data) 118 | 119 | except StopIteration: 120 | print('Could not parse MetaData: ') 121 | break 122 | 123 | classifier_id = classifier_meta.unique_component_id 124 | l_label = classifier_meta.label_info_list 125 | label_info = pyds.glist_get_nvds_label_info(l_label.data) 126 | classifier_class = label_info.result_class_id 127 | 128 | if classifier_id == 2: 129 | result.id = class_color[classifier_class] 130 | elif classifier_id == 3: 131 | result.id = class_make[classifier_class] 132 | else: 133 | result.id = class_type[classifier_class] 134 | 135 | result.score = label_info.result_prob 136 | msg2.results.append(result) 137 | l_classifier = l_classifier.next 138 | 139 | self.publisher_classification.publish(msg2) 140 | except StopIteration: 141 | break 142 | 143 | obj_counter[obj_meta.class_id] += 1 144 | 145 | # Creating message for output of detection inference 146 | result = ObjectHypothesisWithPose() 147 | result.id = str(class_obj[obj_meta.class_id]) 148 | result.score = obj_meta.confidence 149 | 150 | left = obj_meta.rect_params.left 151 | top = obj_meta.rect_params.top 152 | width = obj_meta.rect_params.width 153 | height = obj_meta.rect_params.height 154 | bounding_box = BoundingBox2D() 155 | bounding_box.center.x = float(left + (width/2)) 156 | bounding_box.center.y = float(top - (height/2)) 157 | bounding_box.size_x = width 158 | bounding_box.size_y = height 159 | 160 | detection = Detection2D() 161 | detection.results.append(result) 162 | detection.bbox = bounding_box 163 | msg.detections.append(detection) 164 | 165 | try: 166 | l_obj=l_obj.next 167 | except StopIteration: 168 | break 169 | 170 | # Publishing message with output of detection inference 171 | self.publisher_detection.publish(msg) 172 | 173 | 174 | # Acquiring a display meta object. The memory ownership remains in 175 | # the C code so downstream plugins can still access it. Otherwise 176 | # the garbage collector will claim it when this probe function exits. 177 | display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) 178 | display_meta.num_labels = 1 179 | py_nvosd_text_params = display_meta.text_params[0] 180 | # Setting display text to be shown on screen 181 | # Note that the pyds module allocates a buffer for the string, and the 182 | # memory will not be claimed by the garbage collector. 183 | # Reading the display_text field here will return the C address of the 184 | # allocated string. Use pyds.get_string() to get the string content. 185 | py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) 186 | 187 | # Now set the offsets where the string should appear 188 | py_nvosd_text_params.x_offset = 10 189 | py_nvosd_text_params.y_offset = 12 190 | 191 | # Font , font-color and font-size 192 | py_nvosd_text_params.font_params.font_name = "Serif" 193 | py_nvosd_text_params.font_params.font_size = 10 194 | # set(red, green, blue, alpha); set to White 195 | py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) 196 | 197 | # Text background color 198 | py_nvosd_text_params.set_bg_clr = 1 199 | # set(red, green, blue, alpha); set to Black 200 | py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) 201 | # Using pyds.get_string() to get display_text as string 202 | pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) 203 | try: 204 | l_frame=l_frame.next 205 | except StopIteration: 206 | break 207 | 208 | return Gst.PadProbeReturn.OK 209 | 210 | 211 | def __init__(self): 212 | super().__init__('inference_publisher') 213 | # Taking name of input source from user 214 | self.declare_parameter('input_source') 215 | param_ip_src = self.get_parameter('input_source').value 216 | 217 | self.publisher_detection = self.create_publisher(Detection2DArray, 'infer_detection', 10) 218 | self.publisher_classification = self.create_publisher(Classification2D, 'infer_classification', 10) 219 | 220 | # Standard GStreamer initialization 221 | GObject.threads_init() 222 | Gst.init(None) 223 | 224 | # Create gstreamer elements 225 | # Create Pipeline element that will form a connection of other elements 226 | print("Creating Pipeline \n ") 227 | self.pipeline = Gst.Pipeline() 228 | if not self.pipeline: 229 | sys.stderr.write(" Unable to create Pipeline \n") 230 | 231 | 232 | print("Creating Source \n ") 233 | source = Gst.ElementFactory.make("v4l2src", "usb-cam-source") 234 | if not source: 235 | sys.stderr.write(" Unable to create Source \n") 236 | 237 | caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps") 238 | if not caps_v4l2src: 239 | sys.stderr.write(" Unable to create v4l2src capsfilter \n") 240 | 241 | 242 | print("Creating Video Converter \n") 243 | 244 | # videoconvert to make sure a superset of raw formats are supported 245 | vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1") 246 | if not vidconvsrc: 247 | sys.stderr.write(" Unable to create videoconvert \n") 248 | 249 | # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API) 250 | nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2") 251 | if not nvvidconvsrc: 252 | sys.stderr.write(" Unable to create Nvvideoconvert \n") 253 | 254 | caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps") 255 | if not caps_vidconvsrc: 256 | sys.stderr.write(" Unable to create capsfilter \n") 257 | 258 | # Create nvstreammux instance to form batches from one or more sources. 259 | streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") 260 | if not streammux: 261 | sys.stderr.write(" Unable to create NvStreamMux \n") 262 | 263 | # Use nvinfer to run inferencing on decoder's output, 264 | # behaviour of inferencing is set through config file 265 | pgie = Gst.ElementFactory.make("nvinfer", "primary-inference1") 266 | if not pgie: 267 | sys.stderr.write(" Unable to create pgie1 \n") 268 | 269 | tracker = Gst.ElementFactory.make("nvtracker", "tracker") 270 | if not tracker: 271 | sys.stderr.write(" Unable to create tracker \n") 272 | 273 | sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") 274 | if not sgie1: 275 | sys.stderr.write(" Unable to make sgie1 \n") 276 | 277 | sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") 278 | if not sgie1: 279 | sys.stderr.write(" Unable to make sgie2 \n") 280 | 281 | sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") 282 | if not sgie3: 283 | sys.stderr.write(" Unable to make sgie3 \n") 284 | 285 | pgie2 = Gst.ElementFactory.make("nvinfer", "primary-inference2") 286 | if not pgie2: 287 | sys.stderr.write(" Unable to create pgie2 \n") 288 | 289 | nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") 290 | if not nvvidconv1: 291 | sys.stderr.write(" Unable to create nvvidconv1 \n") 292 | 293 | nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "convertor2") 294 | if not nvvidconv2: 295 | sys.stderr.write(" Unable to create nvvidconv2 \n") 296 | 297 | # Create OSD to draw on the converted RGBA buffer 298 | nvosd1 = Gst.ElementFactory.make("nvdsosd", "onscreendisplay1") 299 | if not nvosd1: 300 | sys.stderr.write(" Unable to create nvosd1 \n") 301 | 302 | nvosd2 = Gst.ElementFactory.make("nvdsosd", "onscreendisplay2") 303 | if not nvosd2: 304 | sys.stderr.write(" Unable to create nvosd2 \n") 305 | 306 | # Finally render the osd output 307 | if is_aarch64(): 308 | transform1 = Gst.ElementFactory.make("nvegltransform", "nvegl-transform1") 309 | transform2 = Gst.ElementFactory.make("nvegltransform", "nvegl-transform2") 310 | 311 | print("Creating EGLSink \n") 312 | sink1 = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer1") 313 | if not sink1: 314 | sys.stderr.write(" Unable to create egl sink1 \n") 315 | 316 | sink2 = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer2") 317 | if not sink2: 318 | sys.stderr.write(" Unable to create egl sink2 \n") 319 | 320 | 321 | source.set_property('device', param_ip_src) 322 | caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1")) 323 | caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)")) 324 | streammux.set_property('width', 1920) 325 | streammux.set_property('height', 1080) 326 | streammux.set_property('batch-size', 1) 327 | streammux.set_property('batched-push-timeout', 4000000) 328 | 329 | #Set properties of pgie and sgie 330 | location = os.getcwd() + "/src/ros2_deepstream/config_files/" 331 | pgie.set_property('config-file-path', location+"dstest2_pgie_config.txt") 332 | sgie1.set_property('config-file-path', location+"dstest2_sgie1_config.txt") 333 | sgie2.set_property('config-file-path', location+"dstest2_sgie2_config.txt") 334 | sgie3.set_property('config-file-path', location+"dstest2_sgie3_config.txt") 335 | pgie2.set_property('config-file-path', location+"dstest1_pgie_config.txt") 336 | sink1.set_property('sync', False) 337 | sink2.set_property('sync', False) 338 | 339 | #Set properties of tracker 340 | config = configparser.ConfigParser() 341 | config.read(location+'dstest2_tracker_config.txt') 342 | config.sections() 343 | 344 | for key in config['tracker']: 345 | if key == 'tracker-width' : 346 | tracker_width = config.getint('tracker', key) 347 | tracker.set_property('tracker-width', tracker_width) 348 | if key == 'tracker-height' : 349 | tracker_height = config.getint('tracker', key) 350 | tracker.set_property('tracker-height', tracker_height) 351 | if key == 'gpu-id' : 352 | tracker_gpu_id = config.getint('tracker', key) 353 | tracker.set_property('gpu_id', tracker_gpu_id) 354 | if key == 'll-lib-file' : 355 | tracker_ll_lib_file = config.get('tracker', key) 356 | tracker.set_property('ll-lib-file', tracker_ll_lib_file) 357 | if key == 'll-config-file' : 358 | tracker_ll_config_file = config.get('tracker', key) 359 | tracker.set_property('ll-config-file', tracker_ll_config_file) 360 | if key == 'enable-batch-process' : 361 | tracker_enable_batch_process = config.getint('tracker', key) 362 | tracker.set_property('enable_batch_process', tracker_enable_batch_process) 363 | 364 | tee = Gst.ElementFactory.make('tee', 'tee') 365 | queue1 = Gst.ElementFactory.make('queue','infer1') 366 | queue2 = Gst.ElementFactory.make('queue','infer2') 367 | 368 | print("Adding elements to Pipeline \n") 369 | self.pipeline.add(source) 370 | self.pipeline.add(caps_v4l2src) 371 | self.pipeline.add(vidconvsrc) 372 | self.pipeline.add(nvvidconvsrc) 373 | self.pipeline.add(caps_vidconvsrc) 374 | self.pipeline.add(streammux) 375 | self.pipeline.add(pgie) 376 | self.pipeline.add(pgie2) 377 | self.pipeline.add(tracker) 378 | self.pipeline.add(sgie1) 379 | self.pipeline.add(sgie2) 380 | self.pipeline.add(sgie3) 381 | self.pipeline.add(nvvidconv1) 382 | self.pipeline.add(nvvidconv2) 383 | self.pipeline.add(nvosd1) 384 | self.pipeline.add(nvosd2) 385 | self.pipeline.add(sink1) 386 | self.pipeline.add(sink2) 387 | self.pipeline.add(tee) 388 | self.pipeline.add(queue1) 389 | self.pipeline.add(queue2) 390 | 391 | if is_aarch64(): 392 | self.pipeline.add(transform1) 393 | self.pipeline.add(transform2) 394 | 395 | # Link the elements together 396 | print("Linking elements in the Pipeline \n") 397 | source.link(caps_v4l2src) 398 | caps_v4l2src.link(vidconvsrc) 399 | vidconvsrc.link(nvvidconvsrc) 400 | nvvidconvsrc.link(caps_vidconvsrc) 401 | 402 | sinkpad = streammux.get_request_pad("sink_0") 403 | if not sinkpad: 404 | sys.stderr.write(" Unable to get the sink pad of streammux \n") 405 | 406 | srcpad = caps_vidconvsrc.get_static_pad("src") 407 | if not srcpad: 408 | sys.stderr.write(" Unable to get source pad of decoder \n") 409 | srcpad.link(sinkpad) 410 | streammux.link(tee) 411 | tee.link(queue1) 412 | tee.link(queue2) 413 | queue1.link(pgie) 414 | queue2.link(pgie2) 415 | pgie.link(tracker) 416 | tracker.link(sgie1) 417 | sgie1.link(sgie2) 418 | sgie2.link(sgie3) 419 | sgie3.link(nvvidconv1) 420 | nvvidconv1.link(nvosd1) 421 | 422 | pgie2.link(nvvidconv2) 423 | nvvidconv2.link(nvosd2) 424 | 425 | if is_aarch64(): 426 | nvosd1.link(transform1) 427 | transform1.link(sink1) 428 | nvosd2.link(transform2) 429 | transform2.link(sink2) 430 | else: 431 | nvosd1.link(sink1) 432 | nvosd2.link(sink2) 433 | 434 | 435 | # create and event loop and feed gstreamer bus mesages to it 436 | self.loop = GObject.MainLoop() 437 | bus = self.pipeline.get_bus() 438 | bus.add_signal_watch() 439 | bus.connect ("message", bus_call, self.loop) 440 | 441 | # Lets add probe to get informed of the meta data generated, we add probe to 442 | # the sink pad of the osd element, since by that time, the buffer would have 443 | # had got all the metadata. 444 | osdsinkpad1 = nvosd1.get_static_pad("sink") 445 | if not osdsinkpad1: 446 | sys.stderr.write(" Unable to get sink pad of nvosd \n") 447 | osdsinkpad1.add_probe(Gst.PadProbeType.BUFFER, self.osd_sink_pad_buffer_probe, 0) 448 | 449 | osdsinkpad2 = nvosd2.get_static_pad("sink") 450 | if not osdsinkpad2: 451 | sys.stderr.write(" Unable to get sink pad of nvosd \n") 452 | osdsinkpad2.add_probe(Gst.PadProbeType.BUFFER, self.osd_sink_pad_buffer_probe, 0) 453 | 454 | 455 | def start_pipeline(self): 456 | print("Starting pipeline \n") 457 | # start play back and listen to events 458 | self.pipeline.set_state(Gst.State.PLAYING) 459 | try: 460 | self.loop.run() 461 | except: 462 | pass 463 | # cleanup 464 | self.pipeline.set_state(Gst.State.NULL) 465 | 466 | -------------------------------------------------------------------------------- /single_stream_pkg/test/test_copyright.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Open Source Robotics Foundation, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from ament_copyright.main import main 16 | import pytest 17 | 18 | 19 | @pytest.mark.copyright 20 | @pytest.mark.linter 21 | def test_copyright(): 22 | rc = main(argv=['.', 'test']) 23 | assert rc == 0, 'Found errors' 24 | -------------------------------------------------------------------------------- /single_stream_pkg/test/test_flake8.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 Open Source Robotics Foundation, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from ament_flake8.main import main 16 | import pytest 17 | 18 | 19 | @pytest.mark.flake8 20 | @pytest.mark.linter 21 | def test_flake8(): 22 | rc = main(argv=[]) 23 | assert rc == 0, 'Found errors' 24 | -------------------------------------------------------------------------------- /single_stream_pkg/test/test_pep257.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Open Source Robotics Foundation, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from ament_pep257.main import main 16 | import pytest 17 | 18 | 19 | @pytest.mark.linter 20 | @pytest.mark.pep257 21 | def test_pep257(): 22 | rc = main(argv=['.', 'test']) 23 | assert rc == 0, 'Found code style errors / warnings' 24 | -------------------------------------------------------------------------------- /subscriber_pkg/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | subscriber_pkg 5 | 0.0.0 6 | TODO: Package description 7 | asawaree 8 | TODO: License declaration 9 | 10 | ament_python 11 | 12 | ament_copyright 13 | ament_flake8 14 | ament_pep257 15 | python3-pytest 16 | 17 | 18 | ament_python 19 | 20 | 21 | -------------------------------------------------------------------------------- /subscriber_pkg/resource/subscriber_pkg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/subscriber_pkg/resource/subscriber_pkg -------------------------------------------------------------------------------- /subscriber_pkg/setup.cfg: -------------------------------------------------------------------------------- 1 | [develop] 2 | script-dir=$base/lib/subscriber_pkg 3 | [install] 4 | install-scripts=$base/lib/subscriber_pkg 5 | -------------------------------------------------------------------------------- /subscriber_pkg/setup.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | from setuptools import setup 24 | 25 | package_name = 'subscriber_pkg' 26 | 27 | setup( 28 | name=package_name, 29 | version='0.0.0', 30 | packages=[package_name], 31 | data_files=[ 32 | ('share/ament_index/resource_index/packages', 33 | ['resource/' + package_name]), 34 | ('share/' + package_name, ['package.xml']), 35 | ], 36 | install_requires=['setuptools'], 37 | zip_safe=True, 38 | maintainer='asawaree', 39 | maintainer_email='asawareeb@nvidia.com', 40 | description='ROS2 sample subscriber nodes', 41 | license='MIT License', 42 | tests_require=['pytest'], 43 | entry_points={ 44 | 'console_scripts': [ 45 | 'sub_detection = subscriber_pkg.sub_detection:main', 46 | 'sub_classification = subscriber_pkg.sub_classification:main', 47 | 'sub_multi_detection = subscriber_pkg.sub_multi_detection:main', 48 | 'sub_multi_classification = subscriber_pkg.sub_multi_classification:main' 49 | ], 50 | }, 51 | ) 52 | -------------------------------------------------------------------------------- /subscriber_pkg/subscriber_pkg/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVIDIA-AI-IOT/ros2_deepstream/2aa6218416189312e7913365fd73921b7101041d/subscriber_pkg/subscriber_pkg/__init__.py -------------------------------------------------------------------------------- /subscriber_pkg/subscriber_pkg/sub_classification.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import rclpy 24 | from rclpy.node import Node 25 | 26 | from std_msgs.msg import String 27 | from sensor_msgs.msg import Image 28 | from sensor_msgs.msg import CameraInfo 29 | from vision_msgs.msg import Classification2D 30 | 31 | import cv2 32 | import numpy as np 33 | 34 | class MinimalSubscriber(Node): 35 | def __init__(self): 36 | super().__init__('minimal_subscriber') 37 | self.subscription = self.create_subscription( 38 | Classification2D, 39 | 'infer_classification', 40 | self.listener_callback, 41 | 10) 42 | self.subscription # prevent unused variable warning 43 | 44 | def listener_callback(self, msg): 45 | print(msg.results) 46 | 47 | 48 | def main(args=None): 49 | rclpy.init(args=args) 50 | 51 | minimal_subscriber = MinimalSubscriber() 52 | 53 | rclpy.spin(minimal_subscriber) 54 | 55 | # Destroy the node explicitly 56 | # (optional - otherwise it will be done automatically 57 | # when the garbage collector destroys the node object) 58 | minimal_subscriber.destroy_node() 59 | rclpy.shutdown() 60 | 61 | 62 | if __name__ == '__main__': 63 | main() 64 | -------------------------------------------------------------------------------- /subscriber_pkg/subscriber_pkg/sub_detection.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import rclpy 24 | from rclpy.node import Node 25 | 26 | from std_msgs.msg import String 27 | from sensor_msgs.msg import Image 28 | from sensor_msgs.msg import CameraInfo 29 | from vision_msgs.msg import Detection2DArray 30 | 31 | import cv2 32 | import numpy as np 33 | 34 | class MinimalSubscriber(Node): 35 | def __init__(self): 36 | super().__init__('minimal_subscriber') 37 | self.subscription = self.create_subscription( 38 | Detection2DArray, 39 | 'infer_detection', 40 | self.listener_callback, 41 | 10) 42 | self.subscription # prevent unused variable warning 43 | 44 | def listener_callback(self, msg): 45 | print(msg.detections) 46 | 47 | 48 | def main(args=None): 49 | rclpy.init(args=args) 50 | 51 | minimal_subscriber = MinimalSubscriber() 52 | 53 | rclpy.spin(minimal_subscriber) 54 | 55 | # Destroy the node explicitly 56 | # (optional - otherwise it will be done automatically 57 | # when the garbage collector destroys the node object) 58 | minimal_subscriber.destroy_node() 59 | rclpy.shutdown() 60 | 61 | 62 | if __name__ == '__main__': 63 | main() 64 | -------------------------------------------------------------------------------- /subscriber_pkg/subscriber_pkg/sub_multi_classification.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import rclpy 24 | from rclpy.node import Node 25 | 26 | from std_msgs.msg import String 27 | from sensor_msgs.msg import Image 28 | from sensor_msgs.msg import CameraInfo 29 | from vision_msgs.msg import Classification2D 30 | 31 | import cv2 32 | import numpy as np 33 | 34 | class MinimalSubscriber(Node): 35 | def __init__(self): 36 | super().__init__('minimal_subscriber') 37 | self.subscription = self.create_subscription( 38 | Classification2D, 39 | 'multi_classification', 40 | self.listener_callback, 41 | 10) 42 | self.subscription # prevent unused variable warning 43 | 44 | def listener_callback(self, msg): 45 | print(msg.results) 46 | 47 | 48 | def main(args=None): 49 | rclpy.init(args=args) 50 | 51 | minimal_subscriber = MinimalSubscriber() 52 | 53 | rclpy.spin(minimal_subscriber) 54 | 55 | # Destroy the node explicitly 56 | # (optional - otherwise it will be done automatically 57 | # when the garbage collector destroys the node object) 58 | minimal_subscriber.destroy_node() 59 | rclpy.shutdown() 60 | 61 | 62 | if __name__ == '__main__': 63 | main() 64 | -------------------------------------------------------------------------------- /subscriber_pkg/subscriber_pkg/sub_multi_detection.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import rclpy 24 | from rclpy.node import Node 25 | 26 | from std_msgs.msg import String 27 | from sensor_msgs.msg import Image 28 | from sensor_msgs.msg import CameraInfo 29 | from vision_msgs.msg import Detection2DArray 30 | 31 | import cv2 32 | import numpy as np 33 | 34 | class MinimalSubscriber(Node): 35 | def __init__(self): 36 | super().__init__('minimal_subscriber') 37 | self.subscription = self.create_subscription( 38 | Detection2DArray, 39 | 'multi_detection', 40 | self.listener_callback, 41 | 10) 42 | self.subscription # prevent unused variable warning 43 | 44 | def listener_callback(self, msg): 45 | print(msg.detections) 46 | 47 | 48 | def main(args=None): 49 | rclpy.init(args=args) 50 | 51 | minimal_subscriber = MinimalSubscriber() 52 | 53 | rclpy.spin(minimal_subscriber) 54 | 55 | # Destroy the node explicitly 56 | # (optional - otherwise it will be done automatically 57 | # when the garbage collector destroys the node object) 58 | minimal_subscriber.destroy_node() 59 | rclpy.shutdown() 60 | 61 | 62 | if __name__ == '__main__': 63 | main() 64 | -------------------------------------------------------------------------------- /subscriber_pkg/test/test_copyright.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Open Source Robotics Foundation, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from ament_copyright.main import main 16 | import pytest 17 | 18 | 19 | @pytest.mark.copyright 20 | @pytest.mark.linter 21 | def test_copyright(): 22 | rc = main(argv=['.', 'test']) 23 | assert rc == 0, 'Found errors' 24 | -------------------------------------------------------------------------------- /subscriber_pkg/test/test_flake8.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 Open Source Robotics Foundation, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from ament_flake8.main import main 16 | import pytest 17 | 18 | 19 | @pytest.mark.flake8 20 | @pytest.mark.linter 21 | def test_flake8(): 22 | rc = main(argv=[]) 23 | assert rc == 0, 'Found errors' 24 | -------------------------------------------------------------------------------- /subscriber_pkg/test/test_pep257.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Open Source Robotics Foundation, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from ament_pep257.main import main 16 | import pytest 17 | 18 | 19 | @pytest.mark.linter 20 | @pytest.mark.pep257 21 | def test_pep257(): 22 | rc = main(argv=['.', 'test']) 23 | assert rc == 0, 'Found code style errors / warnings' 24 | --------------------------------------------------------------------------------