├── .gitignore ├── README.md ├── common ├── FPS.py ├── bus_call.py ├── create_element_or_error.py ├── is_aarch_64.py ├── object_detection.py └── utils.py ├── display-egl-with-inferance.py ├── display-egl-with-tracker.py ├── display-screen-with-egl.py ├── display-screen-wth-inferance.py ├── display-screen.py ├── display-tracker.py ├── mkv-file-to-rtmp.py ├── mkv-file-to-screen.py ├── record-and-split.py ├── record-and-stream-rtmp.py ├── record-to-mkv.py ├── record-to-mp4.py ├── restream-to-rtmp.py ├── stream-rtsp-server-with-inferance.py ├── stream-rtsp.py ├── stream-to-rtmp-server.py ├── stream-to-rtmp-tracker.py ├── stream-to-rtmp-with-inferance.py ├── test-peoplenet.py └── webrtc.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.mp4 2 | *.mkv 3 | **/__pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | .env 7 | .DS_Store 8 | ben-example 9 | streamit-detect-model-class 10 | streamit-relay-handler-test -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deepstream + Gstreamer Examples 2 | 3 | **Author:**\ 4 | Frank Sepulveda\ 5 | frank@streamit.live 6 | 7 | This repository contains real using examples for Gstreamer + Nvidia Deepstream plugins. 8 | 9 | - Display live video on screen using overlaysink 10 | - Display live video on screen using Nvidia EGL plugin 11 | - Display on screen and read inferance config file with nvinfer plugin 12 | - Setup a RTSP Streaming Server 13 | - Setup a RTSP Streaming Server and read inferance 14 | - Publish live video stream to RTMP Server 15 | - Publish live video stream to RTMP Server and read inferance 16 | - Record mp4 video 17 | - Record mp4 video and split on chunks 18 | - Threading and Queues, Streaming and Recording in the same time plus read inferance 19 | - Decode any HTTP Live Source to a Pipeline 20 | 21 | 22 | Reset Json Clocks 23 | ``` 24 | sudo nvpmodel -m 0 25 | sudo jetson_clocks 26 | ``` 27 | 28 | Clear Cache 29 | ``` 30 | rm ${HOME}/.cache/gstreamer-1.0/registry.aarch64.bin 31 | ``` -------------------------------------------------------------------------------- /common/FPS.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import time 24 | start_time=time.time() 25 | frame_count=0 26 | 27 | class GETFPS: 28 | def __init__(self,stream_id): 29 | global start_time 30 | self.start_time=start_time 31 | self.is_first=True 32 | global frame_count 33 | self.frame_count=frame_count 34 | self.stream_id=stream_id 35 | def get_fps(self): 36 | end_time=time.time() 37 | if(self.is_first): 38 | self.start_time=end_time 39 | self.is_first=False 40 | if(end_time-self.start_time>5): 41 | print("**********************FPS*****************************************") 42 | print("Fps of stream",self.stream_id,"is ", float(self.frame_count)/5.0) 43 | self.frame_count=0 44 | self.start_time=end_time 45 | else: 46 | self.frame_count=self.frame_count+1 47 | def print_data(self): 48 | print('frame_count=',self.frame_count) 49 | print('start_time=',self.start_time) 50 | 51 | -------------------------------------------------------------------------------- /common/bus_call.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import gi 24 | import sys 25 | gi.require_version('Gst', '1.0') 26 | from gi.repository import GObject, Gst 27 | def bus_call(bus, message, loop): 28 | t = message.type 29 | if t == Gst.MessageType.EOS: 30 | sys.stdout.write("End-of-stream\n") 31 | loop.quit() 32 | elif t==Gst.MessageType.WARNING: 33 | err, debug = message.parse_warning() 34 | sys.stderr.write("Warning: %s: %s\n" % (err, debug)) 35 | elif t == Gst.MessageType.ERROR: 36 | err, debug = message.parse_error() 37 | sys.stderr.write("Error: %s: %s\n" % (err, debug)) 38 | loop.quit() 39 | return True 40 | -------------------------------------------------------------------------------- /common/create_element_or_error.py: -------------------------------------------------------------------------------- 1 | import gi 2 | gi.require_version('Gst', '1.0') 3 | from gi.repository import GObject, Gst 4 | 5 | def create_element_or_error(elemntId, name): 6 | 7 | print("Creating Element: " + elemntId) 8 | 9 | element = Gst.ElementFactory.make(elemntId, name) 10 | 11 | if not element: 12 | print(" Unable to create " + elemntId) 13 | exit(0) 14 | 15 | return element -------------------------------------------------------------------------------- /common/is_aarch_64.py: -------------------------------------------------------------------------------- 1 | import platform 2 | import sys 3 | 4 | def is_aarch64(): 5 | return platform.uname()[4] == 'aarch64' 6 | 7 | sys.path.append('/opt/nvidia/deepstream/deepstream/lib') 8 | -------------------------------------------------------------------------------- /common/object_detection.py: -------------------------------------------------------------------------------- 1 | import gi 2 | gi.require_version('Gst', '1.0') 3 | from gi.repository import Gst 4 | import pyds 5 | 6 | PGIE_CLASS_ID_VEHICLE = 0 7 | PGIE_CLASS_ID_BICYCLE = 1 8 | PGIE_CLASS_ID_PERSON = 2 9 | PGIE_CLASS_ID_ROADSIGN = 3 10 | 11 | def osd_sink_pad_buffer_probe(pad,info,u_data): 12 | frame_number=0 13 | #Intiallizing object counter with 0. 14 | obj_counter = { 15 | PGIE_CLASS_ID_VEHICLE:0, 16 | PGIE_CLASS_ID_PERSON:0, 17 | PGIE_CLASS_ID_BICYCLE:0, 18 | PGIE_CLASS_ID_ROADSIGN:0 19 | } 20 | num_rects=0 21 | 22 | gst_buffer = info.get_buffer() 23 | if not gst_buffer: 24 | print("Unable to get GstBuffer ") 25 | return 26 | 27 | 28 | batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) 29 | l_frame = batch_meta.frame_meta_list 30 | while l_frame is not None: 31 | try: 32 | frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) 33 | except StopIteration: 34 | break 35 | 36 | frame_number=frame_meta.frame_num 37 | num_rects = frame_meta.num_obj_meta 38 | l_obj=frame_meta.obj_meta_list 39 | while l_obj is not None: 40 | try: 41 | obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data) 42 | except StopIteration: 43 | break 44 | obj_counter[obj_meta.class_id] += 1 45 | try: 46 | l_obj=l_obj.next 47 | except StopIteration: 48 | break 49 | 50 | display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) 51 | display_meta.num_labels = 1 52 | py_nvosd_text_params = display_meta.text_params[0] 53 | py_nvosd_text_params.display_text = "Frames: {} | Objects: {} | Vehicles: {} | Persons: {}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) 54 | 55 | py_nvosd_text_params.x_offset = 10 56 | py_nvosd_text_params.y_offset = 12 57 | 58 | py_nvosd_text_params.font_params.font_name = "Serif" 59 | py_nvosd_text_params.font_params.font_size = 12 60 | py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) 61 | 62 | py_nvosd_text_params.set_bg_clr = 1 63 | py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) 64 | pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) 65 | try: 66 | l_frame=l_frame.next 67 | except StopIteration: 68 | break 69 | 70 | return Gst.PadProbeReturn.OK -------------------------------------------------------------------------------- /common/utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('/opt/nvidia/deepstream/deepstream/lib') 3 | 4 | def long_to_int(l): 5 | value = ctypes.c_int(l & 0xffffffff).value 6 | return value 7 | -------------------------------------------------------------------------------- /display-egl-with-inferance.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # Display the Image on the Screen using the EGL Sink of Nvidia 4 | # 5 | # 6 | import argparse 7 | import sys 8 | sys.path.append('../') 9 | 10 | import gi 11 | gi.require_version('Gst', '1.0') 12 | from gi.repository import GObject, Gst 13 | from common.is_aarch_64 import is_aarch64 14 | from common.bus_call import bus_call 15 | from common.create_element_or_error import create_element_or_error 16 | from common.object_detection import osd_sink_pad_buffer_probe 17 | import pyds 18 | 19 | def main(): 20 | 21 | # Standard GStreamer initialization 22 | GObject.threads_init() 23 | Gst.init(None) 24 | 25 | 26 | # Create Pipeline Element 27 | pipeline = Gst.Pipeline() 28 | if not pipeline: 29 | sys.stderr.write(" Unable to create Pipeline") 30 | return 31 | 32 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 33 | streammux = create_element_or_error("nvstreammux", "Stream-muxer") 34 | pgie = create_element_or_error("nvinfer", "primary-inference") 35 | convertor = create_element_or_error("nvvideoconvert", "convertor-1") 36 | nvosd = create_element_or_error("nvdsosd", "onscreendisplay") 37 | convertor2 = create_element_or_error("nvvideoconvert", "converter-2") 38 | transform = create_element_or_error("nvegltransform", "nvegl-transform") 39 | sink = create_element_or_error("nveglglessink", "egl-overlay") 40 | 41 | # Set Element Properties 42 | source.set_property('sensor-id', 0) 43 | source.set_property('bufapi-version', True) 44 | 45 | streammux.set_property('live-source', 1) 46 | streammux.set_property('width', 1280) 47 | streammux.set_property('height', 720) 48 | streammux.set_property('num-surfaces-per-frame', 1) 49 | streammux.set_property('batch-size', 1) 50 | streammux.set_property('batched-push-timeout', 4000000) 51 | 52 | pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt") 53 | # convertor.set_property('flip-method', 2) 54 | 55 | 56 | # Add Elemements to Pipielin 57 | print("Adding elements to Pipeline") 58 | pipeline.add(source) 59 | pipeline.add(streammux) 60 | pipeline.add(pgie) 61 | pipeline.add(convertor) 62 | pipeline.add(nvosd) 63 | pipeline.add(convertor2) 64 | pipeline.add(transform) 65 | pipeline.add(sink) 66 | 67 | sinkpad = streammux.get_request_pad("sink_0") 68 | if not sinkpad: 69 | sys.stderr.write(" Unable to get the sink pad of streammux") 70 | 71 | # Link the elements together 72 | print("Linking elements in the Pipeline") 73 | source.link(streammux) 74 | streammux.link(pgie) 75 | pgie.link(convertor) 76 | convertor.link(nvosd) 77 | nvosd.link(convertor2) 78 | convertor2.link(transform) 79 | transform.link(sink) 80 | 81 | # Create an event loop and feed gstreamer bus mesages to it 82 | loop = GObject.MainLoop() 83 | bus = pipeline.get_bus() 84 | bus.add_signal_watch() 85 | bus.connect ("message", bus_call, loop) 86 | 87 | print('Create OSD Sink Pad') 88 | osdsinkpad = nvosd.get_static_pad("sink") 89 | if not osdsinkpad: 90 | sys.stderr.write(" Unable to get sink pad of nvosd") 91 | 92 | osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) 93 | 94 | # Start play back and listen to events 95 | print("Starting pipeline") 96 | pipeline.set_state(Gst.State.PLAYING) 97 | 98 | try: 99 | loop.run() 100 | except: 101 | pass 102 | 103 | 104 | # Cleanup 105 | pipeline.set_state(Gst.State.NULL) 106 | 107 | if __name__ == "__main__": 108 | sys.exit(main()) 109 | -------------------------------------------------------------------------------- /display-egl-with-tracker.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # Display the Image on the Screen using the EGL Sink of Nvidia 4 | # 5 | # 6 | import argparse 7 | import sys 8 | sys.path.append('../') 9 | 10 | import gi 11 | gi.require_version('Gst', '1.0') 12 | from gi.repository import GObject, Gst 13 | from common.is_aarch_64 import is_aarch64 14 | from common.bus_call import bus_call 15 | from common.create_element_or_error import create_element_or_error 16 | from common.object_detection import osd_sink_pad_buffer_probe 17 | import pyds 18 | 19 | detectedObjectsCount = [] 20 | 21 | def sink_pad_buffer_probe(pad,info,u_data): 22 | 23 | gst_buffer = info.get_buffer() 24 | 25 | if not gst_buffer: 26 | sys.stderr.write("Unable to get GstBuffer") 27 | 28 | batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) 29 | frame_list = batch_meta.frame_meta_list 30 | 31 | while frame_list is not None: 32 | try: 33 | frame_meta = pyds.NvDsFrameMeta.cast(frame_list.data) 34 | except StopIteration: 35 | break 36 | 37 | list_of_objects = frame_meta.obj_meta_list 38 | 39 | while list_of_objects is not None: 40 | 41 | try: 42 | object_meta = pyds.NvDsObjectMeta.cast(list_of_objects.data) 43 | # # https://docs.nvidia.com/metropolis/deepstream/5.0DP/python-api/NvDsMeta/NvDsObjectMeta.html 44 | if object_meta.object_id not in detectedObjectsCount: 45 | detectedObjectsCount.append(object_meta.object_id) 46 | print('Detected "' + object_meta.obj_label + '" with ID: ' + str(object_meta.object_id)) 47 | 48 | except StopIteration: 49 | break 50 | # obj_counter[object_meta.class_id] += 1 51 | try: 52 | list_of_objects = list_of_objects.next 53 | except StopIteration: 54 | break 55 | try: 56 | frame_list = frame_list.next 57 | except StopIteration: 58 | break 59 | 60 | return Gst.PadProbeReturn.OK 61 | 62 | def main(): 63 | print('Tracker Example') 64 | 65 | # Standard GStreamer initialization 66 | GObject.threads_init() 67 | Gst.init(None) 68 | 69 | 70 | # Create Pipeline Element 71 | pipeline = Gst.Pipeline() 72 | if not pipeline: 73 | sys.stderr.write(" Unable to create Pipeline") 74 | return 75 | 76 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 77 | streammux = create_element_or_error("nvstreammux", "Stream-muxer") 78 | pgie = create_element_or_error("nvinfer", "primary-inference") 79 | tracker = create_element_or_error("nvtracker", "tracker") 80 | convertor = create_element_or_error("nvvideoconvert", "convertor-1") 81 | nvosd = create_element_or_error("nvdsosd", "onscreendisplay") 82 | convertor2 = create_element_or_error("nvvideoconvert", "converter-2") 83 | transform = create_element_or_error("nvegltransform", "nvegl-transform") 84 | sink = create_element_or_error("nveglglessink", "egl-overlay") 85 | 86 | # Set Element Properties 87 | source.set_property('sensor-id', 0) 88 | source.set_property('bufapi-version', True) 89 | 90 | streammux.set_property('live-source', 1) 91 | streammux.set_property('width', 1280) 92 | streammux.set_property('height', 720) 93 | streammux.set_property('num-surfaces-per-frame', 1) 94 | streammux.set_property('batch-size', 1) 95 | streammux.set_property('batched-push-timeout', 4000000) 96 | 97 | pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt") 98 | 99 | tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so') 100 | tracker.set_property('gpu-id', 0) 101 | tracker.set_property('enable-batch-process', 1) 102 | tracker.set_property('ll-config-file', '/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/tracker_config.yml') 103 | 104 | 105 | # Add Elemements to Pipielin 106 | print("Adding elements to Pipeline") 107 | pipeline.add(source) 108 | pipeline.add(streammux) 109 | pipeline.add(pgie) 110 | pipeline.add(tracker) 111 | pipeline.add(convertor) 112 | pipeline.add(nvosd) 113 | pipeline.add(convertor2) 114 | pipeline.add(transform) 115 | pipeline.add(sink) 116 | 117 | sinkpad = streammux.get_request_pad("sink_0") 118 | if not sinkpad: 119 | sys.stderr.write(" Unable to get the sink pad of streammux") 120 | 121 | # Link the elements together 122 | print("Linking elements in the Pipeline") 123 | source.link(streammux) 124 | streammux.link(pgie) 125 | pgie.link(tracker) 126 | tracker.link(convertor) 127 | convertor.link(nvosd) 128 | nvosd.link(convertor2) 129 | convertor2.link(transform) 130 | transform.link(sink) 131 | 132 | # Create an event loop and feed gstreamer bus mesages to it 133 | loop = GObject.MainLoop() 134 | bus = pipeline.get_bus() 135 | bus.add_signal_watch() 136 | bus.connect ("message", bus_call, loop) 137 | 138 | print('Create OSD Sink Pad') 139 | osdsinkpad = nvosd.get_static_pad("sink") 140 | if not osdsinkpad: 141 | sys.stderr.write("Unable to get sink pad of nvosd") 142 | 143 | osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, sink_pad_buffer_probe, 0) 144 | 145 | # Start play back and listen to events 146 | print("Starting pipeline") 147 | pipeline.set_state(Gst.State.PLAYING) 148 | 149 | try: 150 | loop.run() 151 | except: 152 | pass 153 | 154 | # Cleanup 155 | pipeline.set_state(Gst.State.NULL) 156 | 157 | if __name__ == "__main__": 158 | sys.exit(main()) 159 | -------------------------------------------------------------------------------- /display-screen-with-egl.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # Display the Image on the Screen using the EGL Sink of Nvidia 4 | # 5 | # 6 | import argparse 7 | import sys 8 | sys.path.append('../') 9 | 10 | import gi 11 | gi.require_version('Gst', '1.0') 12 | from gi.repository import GObject, Gst 13 | from common.is_aarch_64 import is_aarch64 14 | from common.bus_call import bus_call 15 | from common.create_element_or_error import create_element_or_error 16 | 17 | def main(): 18 | 19 | # Standard GStreamer initialization 20 | GObject.threads_init() 21 | Gst.init(None) 22 | 23 | 24 | # Create Pipeline Element 25 | print("Creating Pipeline") 26 | pipeline = Gst.Pipeline() 27 | if not pipeline: 28 | sys.stderr.write(" Unable to create Pipeline") 29 | 30 | # Create Elements 31 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 32 | convertor = create_element_or_error("nvvidconv", "converter-1") 33 | transform = create_element_or_error("nvegltransform", "nvegl-transform") 34 | sink = create_element_or_error("nveglglessink", "egl-overlay") 35 | 36 | # Set Element Properties 37 | source.set_property('sensor-id', 0) 38 | source.set_property('bufapi-version', True) 39 | 40 | # Add Elemements to Pipielin 41 | print("Adding elements to Pipeline") 42 | pipeline.add(source) 43 | pipeline.add(convertor) 44 | pipeline.add(sink) 45 | pipeline.add(transform) 46 | 47 | 48 | # Link the elements together: 49 | print("Linking elements in the Pipeline") 50 | source.link(convertor) 51 | convertor.link(transform) 52 | transform.link(sink) 53 | 54 | # Create an event loop and feed gstreamer bus mesages to it 55 | loop = GObject.MainLoop() 56 | bus = pipeline.get_bus() 57 | bus.add_signal_watch() 58 | bus.connect ("message", bus_call, loop) 59 | 60 | 61 | # Start play back and listen to events 62 | print("Starting pipeline") 63 | pipeline.set_state(Gst.State.PLAYING) 64 | 65 | try: 66 | loop.run() 67 | except: 68 | pass 69 | 70 | 71 | # Cleanup 72 | pipeline.set_state(Gst.State.NULL) 73 | 74 | if __name__ == "__main__": 75 | sys.exit(main()) 76 | -------------------------------------------------------------------------------- /display-screen-wth-inferance.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # Display the Image on the Screen with Object Detections 4 | # 5 | # 6 | import argparse 7 | import sys 8 | sys.path.append('../') 9 | 10 | import gi 11 | gi.require_version('Gst', '1.0') 12 | from gi.repository import GObject, Gst 13 | from common.is_aarch_64 import is_aarch64 14 | from common.bus_call import bus_call 15 | from common.object_detection import osd_sink_pad_buffer_probe 16 | from common.create_element_or_error import create_element_or_error 17 | 18 | def main(): 19 | 20 | # Standard GStreamer initialization 21 | GObject.threads_init() 22 | Gst.init(None) 23 | 24 | # Create Pipeline Element 25 | print("Creating Pipeline") 26 | pipeline = Gst.Pipeline() 27 | if not pipeline: 28 | sys.stderr.write(" Unable to create Pipeline") 29 | 30 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 31 | streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") 32 | pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") 33 | convertor = Gst.ElementFactory.make("nvvideoconvert", "convertor-1") 34 | nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") 35 | convertor2 = Gst.ElementFactory.make("nvvidconv", "converter-2") 36 | transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") 37 | sink = Gst.ElementFactory.make("nvoverlaysink", "egl-overlay") 38 | 39 | # Set Element Properties 40 | source.set_property('sensor-id', 0) 41 | source.set_property('bufapi-version', True) 42 | 43 | streammux.set_property('live-source', 1) 44 | streammux.set_property('width', 1280) 45 | streammux.set_property('height', 720) 46 | streammux.set_property('num-surfaces-per-frame', 1) 47 | streammux.set_property('batch-size', 1) 48 | streammux.set_property('batched-push-timeout', 4000000) 49 | 50 | pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt") 51 | pgie.set_property('batch-size', 1) 52 | pgie.set_property('unique-id', 1) 53 | 54 | 55 | # Add Elemements to Pipielin 56 | print("Adding elements to Pipeline") 57 | pipeline.add(source) 58 | pipeline.add(streammux) 59 | pipeline.add(pgie) 60 | pipeline.add(convertor) 61 | pipeline.add(nvosd) 62 | pipeline.add(convertor2) 63 | pipeline.add(sink) 64 | # if is_aarch64(): 65 | # pipeline.add(transform) 66 | 67 | sinkpad = streammux.get_request_pad("sink_0") 68 | if not sinkpad: 69 | sys.stderr.write(" Unable to get the sink pad of streammux") 70 | 71 | 72 | # Link the elements together: 73 | print("Linking elements in the Pipeline") 74 | source.link(streammux) 75 | streammux.link(pgie) 76 | pgie.link(convertor) 77 | convertor.link(nvosd) 78 | # nvosd.link(convertor2) 79 | # if is_aarch64(): 80 | # nvosd.link(transform) 81 | # transform.link(sink) 82 | # else: 83 | nvosd.link(sink) 84 | 85 | 86 | # Create an event loop and feed gstreamer bus mesages to it 87 | loop = GObject.MainLoop() 88 | bus = pipeline.get_bus() 89 | bus.add_signal_watch() 90 | bus.connect ("message", bus_call, loop) 91 | 92 | # Lets add probe to get informed of the meta data generated, we add probe to 93 | # the sink pad of the osd element, since by that time, the buffer would have 94 | # had got all the metadata. 95 | print('Create OSD Sink Pad') 96 | osdsinkpad = nvosd.get_static_pad("sink") 97 | if not osdsinkpad: 98 | sys.stderr.write(" Unable to get sink pad of nvosd") 99 | 100 | osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) 101 | 102 | 103 | # Start play back and listen to events 104 | print("Starting pipeline") 105 | pipeline.set_state(Gst.State.PLAYING) 106 | 107 | try: 108 | loop.run() 109 | except: 110 | pass 111 | 112 | 113 | # Cleanup 114 | pipeline.set_state(Gst.State.NULL) 115 | 116 | if __name__ == "__main__": 117 | sys.exit(main()) 118 | -------------------------------------------------------------------------------- /display-screen.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # Display the Image on the Screen 4 | # 5 | # 6 | import argparse 7 | import sys 8 | sys.path.append('../') 9 | 10 | import gi 11 | gi.require_version('Gst', '1.0') 12 | from gi.repository import GObject, Gst 13 | from common.is_aarch_64 import is_aarch64 14 | from common.bus_call import bus_call 15 | from common.create_element_or_error import create_element_or_error 16 | 17 | def main(): 18 | 19 | # Standard GStreamer initialization 20 | Gst.debug_set_active(True) 21 | Gst.debug_set_default_threshold(4) 22 | GObject.threads_init() 23 | Gst.init(None) 24 | 25 | pipeline = Gst.Pipeline() 26 | 27 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 28 | sink = create_element_or_error("nvoverlaysink", "overlay") 29 | 30 | source.set_property('sensor-id', 0) 31 | 32 | pipeline.add(source) 33 | pipeline.add(sink) 34 | 35 | source.link(sink) 36 | 37 | loop = GObject.MainLoop() 38 | bus = pipeline.get_bus() 39 | bus.add_signal_watch() 40 | bus.connect ("message", bus_call, loop) 41 | 42 | pipeline.set_state(Gst.State.PLAYING) 43 | 44 | try: 45 | loop.run() 46 | except: 47 | pass 48 | 49 | # Cleanup 50 | pipeline.set_state(Gst.State.NULL) 51 | 52 | if __name__ == "__main__": 53 | sys.exit(main()) 54 | -------------------------------------------------------------------------------- /display-tracker.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # Display the Image on the Screen using the EGL Sink of Nvidia 4 | # 5 | # 6 | import argparse 7 | import sys 8 | sys.path.append('../') 9 | 10 | import gi 11 | gi.require_version('Gst', '1.0') 12 | from gi.repository import GObject, Gst 13 | from common.is_aarch_64 import is_aarch64 14 | from common.bus_call import bus_call 15 | from common.create_element_or_error import create_element_or_error 16 | from common.object_detection import osd_sink_pad_buffer_probe 17 | 18 | def main(): 19 | 20 | # Standard GStreamer initialization 21 | GObject.threads_init() 22 | Gst.init(None) 23 | 24 | # Create Pipeline Element 25 | pipeline = Gst.Pipeline() 26 | if not pipeline: 27 | sys.stderr.write(" Unable to create Pipeline") 28 | return 29 | 30 | # source = create_element_or_error("nvarguscamerasrc", "camera-source") 31 | # src_caps = create_element_or_error("capsfilter", "source-caps-definition") 32 | # src_caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, framerate=30/1, format=(string)NV12")) 33 | 34 | streammux = create_element_or_error("nvstreammux", "Stream-muxer") 35 | pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") 36 | tracker = create_element_or_error("nvtracker", "tracker") 37 | convertor = Gst.ElementFactory.make("nvvideoconvert", "convertor-1") 38 | nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") 39 | convertor2 = Gst.ElementFactory.make("nvvidconv", "converter-2") 40 | transform = create_element_or_error("nvegltransform", "nvegl-transform") 41 | sink = create_element_or_error("nveglglessink", "egl-overlay") 42 | 43 | # Set Element Properties 44 | source.set_property('sensor-id', 0) 45 | source.set_property('bufapi-version', True) 46 | 47 | streammux.set_property('live-source', 1) 48 | streammux.set_property('width', 1280) 49 | streammux.set_property('height', 720) 50 | streammux.set_property('num-surfaces-per-frame', 1) 51 | streammux.set_property('batch-size', 1) 52 | streammux.set_property('batched-push-timeout', 4000000) 53 | 54 | pgie.set_property('config-file-path', "./nv-inferance-config-files/config_infer_primary_trafficcamnet.txt") 55 | 56 | #Set properties of tracker 57 | tracker.set_property('tracker-width', 640) 58 | tracker.set_property('tracker-height', 384) 59 | tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so') 60 | tracker.set_property('gpu-id', 0) 61 | tracker.set_property('enable-batch-process', 1) 62 | tracker.set_property('enable-past-frame', 1) 63 | tracker.set_property('ll-config-file', './tracker_config.yml') 64 | 65 | # Add Elemements to Pipielin 66 | pipeline.add(source) 67 | # pipeline.add(src_caps) 68 | pipeline.add(streammux) 69 | pipeline.add(pgie) 70 | pipeline.add(tracker) 71 | pipeline.add(convertor) 72 | pipeline.add(nvosd) 73 | pipeline.add(convertor2) 74 | pipeline.add(transform) 75 | pipeline.add(sink) 76 | 77 | sinkpad = streammux.get_request_pad("sink_0") 78 | if not sinkpad: 79 | sys.stderr.write(" Unable to get the sink pad of streammux") 80 | 81 | # Link the elements together: 82 | source.link(streammux) 83 | # src_caps.link(streammux) 84 | streammux.link(pgie) 85 | pgie.link(tracker) 86 | tracker.link(convertor) 87 | convertor.link(nvosd) 88 | nvosd.link(convertor2) 89 | convertor2.link(transform) 90 | transform.link(sink) 91 | 92 | # Create an event loop and feed gstreamer bus mesages to it 93 | loop = GObject.MainLoop() 94 | bus = pipeline.get_bus() 95 | bus.add_signal_watch() 96 | bus.connect ("message", bus_call, loop) 97 | 98 | #Feed tracker 99 | tracker_sinkpad = tracker.get_static_pad("sink") 100 | if not tracker_sinkpad: 101 | sys.stderr.write(" Unable to get sink pad of nvosd") 102 | 103 | tracker_sinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) 104 | 105 | # Start play back and listen to events 106 | pipeline.set_state(Gst.State.PLAYING) 107 | 108 | try: 109 | loop.run() 110 | except: 111 | pass 112 | 113 | 114 | # Cleanup 115 | pipeline.set_state(Gst.State.NULL) 116 | 117 | if __name__ == "__main__": 118 | sys.exit(main()) 119 | -------------------------------------------------------------------------------- /mkv-file-to-rtmp.py: -------------------------------------------------------------------------------- 1 | # 2 | # Publish video to Ant Server 3 | # 4 | import argparse 5 | import sys 6 | sys.path.append('../') 7 | 8 | import gi 9 | gi.require_version('Gst', '1.0') 10 | from gi.repository import GObject, Gst 11 | from common.is_aarch_64 import is_aarch64 12 | from common.bus_call import bus_call 13 | from common.create_element_or_error import create_element_or_error 14 | # gst-launch-1.0 -v filesrc location=../streamit-virtual-edge-appliance/storage/tests/concourse/1.MKV ! matroskademux ! h264parse ! flvmux ! rtmpsink location=rtmp://media.streamit.live/LiveApp/streaming-test 15 | def main(): 16 | 17 | # Standard GStreamer initialization 18 | GObject.threads_init() 19 | Gst.init(None) 20 | 21 | # Create Pipeline Element 22 | pipeline = Gst.Pipeline() 23 | if not pipeline: 24 | print("Unable to create Pipeline") 25 | return False 26 | 27 | # Create GST Elements 28 | source = create_element_or_error("filesrc", "file-source") 29 | 30 | demuxer = create_element_or_error("matroskademux", "demuxer") 31 | parser = create_element_or_error("h264parse", "parser") 32 | muxer = create_element_or_error("flvmux", "muxer") 33 | sink = create_element_or_error("rtmpsink", "sink") 34 | 35 | if not (source or demuxer or parseer or muxer or sink): 36 | return 37 | 38 | # Set Element Properties 39 | source.set_property('location', '../streamit-virtual-edge-appliance/storage/tests/concourse/1.MKV') 40 | sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/stream-test') 41 | 42 | # Add Elemements to Pipielin 43 | print("Adding elements to Pipeline") 44 | pipeline.add(source) 45 | pipeline.add(demuxer) 46 | pipeline.add(parser) 47 | pipeline.add(muxer) 48 | pipeline.add(sink) 49 | 50 | # Link the elements together: 51 | print("Linking elements in the Pipeline") 52 | source.link(demuxer) 53 | demuxer.link(parser) 54 | parser.link(muxer) 55 | muxer.link(sink) 56 | 57 | # Create an event loop and feed gstreamer bus mesages to it 58 | loop = GObject.MainLoop() 59 | bus = pipeline.get_bus() 60 | bus.add_signal_watch() 61 | bus.connect ("message", bus_call, loop) 62 | 63 | # Start play back and listen to events 64 | print("Starting pipeline") 65 | pipeline.set_state(Gst.State.PLAYING) 66 | 67 | try: 68 | loop.run() 69 | except: 70 | pass 71 | 72 | # Cleanup 73 | pipeline.set_state(Gst.State.NULL) 74 | 75 | if __name__ == "__main__": 76 | sys.exit(main()) 77 | 78 | -------------------------------------------------------------------------------- /mkv-file-to-screen.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # Display the MKV File on the Screen using the EGL Sink of Nvidia 4 | # 5 | # 6 | import argparse 7 | import sys 8 | sys.path.append('../') 9 | 10 | import gi 11 | gi.require_version('Gst', '1.0') 12 | from gi.repository import GObject, Gst 13 | from common.is_aarch_64 import is_aarch64 14 | from common.bus_call import bus_call 15 | from common.create_element_or_error import create_element_or_error 16 | 17 | def main(): 18 | 19 | # Standard GStreamer initialization 20 | GObject.threads_init() 21 | Gst.init(None) 22 | 23 | 24 | # Create Pipeline Element 25 | print("Creating Pipeline") 26 | pipeline = Gst.Pipeline() 27 | if not pipeline: 28 | sys.stderr.write(" Unable to create Pipeline") 29 | 30 | # gst-launch-1.0 -v filesrc location=1.MKV ! matroskademux ! h264parse ! nvoverlaysink 31 | # ______________________________ 32 | # Create Elements 33 | source = create_element_or_error("filesink", "camera-source") 34 | convertor = create_element_or_error("nvvidconv", "converter-1") 35 | transform = create_element_or_error("nvegltransform", "nvegl-transform") 36 | sink = create_element_or_error("nveglglessink", "egl-overlay") 37 | 38 | # Set Element Properties 39 | source.set_property('sensor-id', 0) 40 | source.set_property('bufapi-version', True) 41 | 42 | # Add Elemements to Pipielin 43 | print("Adding elements to Pipeline") 44 | pipeline.add(source) 45 | pipeline.add(convertor) 46 | pipeline.add(sink) 47 | pipeline.add(transform) 48 | 49 | 50 | # Link the elements together: 51 | print("Linking elements in the Pipeline") 52 | source.link(convertor) 53 | convertor.link(transform) 54 | transform.link(sink) 55 | 56 | # Create an event loop and feed gstreamer bus mesages to it 57 | loop = GObject.MainLoop() 58 | bus = pipeline.get_bus() 59 | bus.add_signal_watch() 60 | bus.connect ("message", bus_call, loop) 61 | 62 | 63 | # Start play back and listen to events 64 | print("Starting pipeline") 65 | pipeline.set_state(Gst.State.PLAYING) 66 | 67 | try: 68 | loop.run() 69 | except: 70 | pass 71 | 72 | 73 | # Cleanup 74 | pipeline.set_state(Gst.State.NULL) 75 | 76 | if __name__ == "__main__": 77 | sys.exit(main()) 78 | -------------------------------------------------------------------------------- /record-and-split.py: -------------------------------------------------------------------------------- 1 | # 2 | # Example of recording videos in chunks 3 | # 4 | # Pipeline Example: 5 | # gst-launch-1.0 nvarguscamerasrc ! 'video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, format=(string)NV12, framerate=(fraction)30/1' ! nvv4l2h264enc ! h264parse ! splitmuxsink location=video%02d.mkv max-size-time=1000000 muxer-factory=matroskamux muxer-properties="properties,streamable=true" 6 | # splitmuxsink location=video%02d.mkv max-size-time=1000000 muxer-factory=matroskamux muxer-properties="properties,streamable=true" 7 | 8 | # 9 | # def on_format_location (self, splitmux, fragment_id, user_data): 10 | # filename = str(datetime.datetime.utcnow().strftime('%Y-%m-%d %H-%M-%S')) + '.mkv' 11 | # print(filename) 12 | # return filename 13 | # 14 | # The folowing example publish the video to ant server and record the video locally using a quees 15 | # 16 | import argparse 17 | import sys 18 | sys.path.append('./../') 19 | import datetime 20 | import gi 21 | gi.require_version('Gst', '1.0') 22 | from gi.repository import GObject, Gst 23 | from common.is_aarch_64 import is_aarch64 24 | from common.bus_call import bus_call 25 | from common.create_element_or_error import create_element_or_error 26 | 27 | def __location(splitmux, frag): 28 | print('Creating new video segment') 29 | print(datetime.datetime.now()) 30 | return 'v-' + str(datetime.datetime.utcnow()) + '.mkv' 31 | 32 | def main(): 33 | 34 | # Standard GStreamer initialization 35 | GObject.threads_init() 36 | Gst.init(None) 37 | print(Gst) 38 | 39 | # Create Pipeline Element 40 | print("Creating Pipeline") 41 | pipeline = Gst.Pipeline() 42 | if not pipeline: 43 | sys.stderr.write("Unable to create Pipeline") 44 | 45 | # Create GST Source 46 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 47 | caps = Gst.ElementFactory.make("capsfilter", "source-caps") 48 | caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, format=(string)NV12, framerate=(fraction)30/1")) 49 | 50 | # Create Gst Threads 51 | tee = create_element_or_error("tee", "tee") 52 | streaming_queue = create_element_or_error("queue", "streaming_queue") 53 | recording_queue = create_element_or_error("queue", "recording_queue") 54 | display_queue = create_element_or_error("queue", "display_queue") 55 | 56 | # Create Gst Elements for Streaming Branch 57 | s_encoder = create_element_or_error("nvv4l2h264enc", "streaming-encoder") 58 | s_parser = create_element_or_error("h264parse", "streaming-parser") 59 | s_muxer = create_element_or_error("flvmux", "streaming-muxer") 60 | s_sink = create_element_or_error("rtmpsink", "streaming-sink") 61 | 62 | # Create Gst Elements for Recording Branch 63 | r_encoder = create_element_or_error('nvv4l2h264enc', 'recording-encoder') 64 | r_parser = create_element_or_error('h264parse', 'recording-parser') 65 | r_sink = create_element_or_error('splitmuxsink', 'recording-sink') 66 | 67 | # Create Gst Elements for Display Branch 68 | d_sink = create_element_or_error("nvoverlaysink", "display-sink") 69 | 70 | # Set Source Properties 71 | source.set_property('sensor-id', 0) 72 | source.set_property('saturation', 1.2) 73 | source.set_property('exposurecompensation', 1.2) 74 | source.set_property('wbmode', 0) 75 | 76 | # Set Streaming Properties 77 | s_sink.set_property('location', 'rtmp://media.streamit.link/LiveApp/streaming-test') 78 | 79 | # Set Display Properties 80 | d_sink.set_property('overlay', 1) 81 | d_sink.set_property('overlay-x', 0) 82 | d_sink.set_property('overlay-y', 0) 83 | d_sink.set_property('overlay-w', 640) 84 | d_sink.set_property('overlay-h', 360) 85 | 86 | # Set Streaming Properties 87 | five_minutes = 900000000000 88 | r_sink.set_property('max-size-time', 30000000000) 89 | r_sink.connect('format-location', __location) 90 | 91 | # Add Elemements to Pipielin 92 | print("Adding elements to Pipeline") 93 | pipeline.add(source) 94 | pipeline.add(caps) 95 | pipeline.add(tee) 96 | pipeline.add(streaming_queue) 97 | pipeline.add(s_encoder) 98 | pipeline.add(s_parser) 99 | pipeline.add(s_muxer) 100 | pipeline.add(s_sink) 101 | pipeline.add(recording_queue) 102 | pipeline.add(r_encoder) 103 | pipeline.add(r_parser) 104 | pipeline.add(r_sink) 105 | pipeline.add(display_queue) 106 | pipeline.add(d_sink) 107 | 108 | # Link the elements together: 109 | print("Linking elements in the Pipeline") 110 | source.link(caps) 111 | caps.link(tee) 112 | 113 | # Streaming Queue 114 | streaming_queue.link(s_encoder) 115 | s_encoder.link(s_parser) 116 | s_parser.link(s_muxer) 117 | s_muxer.link(s_sink) 118 | 119 | # Recording Queue 120 | recording_queue.link(r_encoder) 121 | r_encoder.link(r_parser) 122 | r_parser.link(r_sink) 123 | 124 | # Display Queue 125 | display_queue.link(d_sink) 126 | 127 | # Get pad templates from source 128 | tee_src_pad_template = tee.get_pad_template("src_%u") 129 | 130 | # Get source to Streaming Queue 131 | tee_streaming_pad = tee.request_pad(tee_src_pad_template, None, None) 132 | streaming_queue_pad = streaming_queue.get_static_pad("sink") 133 | 134 | # Get source to Recording Queue 135 | tee_recording_pad = tee.request_pad(tee_src_pad_template, None, None) 136 | recording_queue_pad = recording_queue.get_static_pad("sink") 137 | 138 | # Get source to Display Queue 139 | tee_display_pad = tee.request_pad(tee_src_pad_template, None, None) 140 | display_queue_pad = display_queue.get_static_pad("sink") 141 | 142 | # Link sources 143 | if (tee_streaming_pad.link(streaming_queue_pad) != Gst.PadLinkReturn.OK or tee_recording_pad.link(recording_queue_pad) != Gst.PadLinkReturn.OK or tee_display_pad.link(display_queue_pad) != Gst.PadLinkReturn.OK): 144 | print("ERROR: Tee streaming could not be linked") 145 | sys.exit(1) 146 | 147 | # Create an event loop and feed gstreamer bus mesages to it 148 | loop = GObject.MainLoop() 149 | bus = pipeline.get_bus() 150 | bus.add_signal_watch() 151 | bus.connect ("message", bus_call, loop) 152 | 153 | # Start play back and listen to events 154 | print("Starting pipeline") 155 | pipeline.set_state(Gst.State.PLAYING) 156 | 157 | try: 158 | loop.run() 159 | except: 160 | pass 161 | 162 | 163 | # Cleanup 164 | pipeline.set_state(Gst.State.NULL) 165 | 166 | if __name__ == "__main__": 167 | sys.exit(main()) 168 | -------------------------------------------------------------------------------- /record-and-stream-rtmp.py: -------------------------------------------------------------------------------- 1 | # 2 | # The folowing example publish the video to ant server and record the video locally using a quees 3 | # 4 | import argparse 5 | import sys 6 | sys.path.append('./') 7 | import datetime 8 | import gi 9 | gi.require_version('Gst', '1.0') 10 | from gi.repository import GObject, Gst 11 | from common.is_aarch_64 import is_aarch64 12 | from common.bus_call import bus_call 13 | from common.create_element_or_error import create_element_or_error 14 | 15 | def main(): 16 | 17 | # Standard GStreamer initialization 18 | GObject.threads_init() 19 | Gst.init(None) 20 | 21 | # Create Pipeline Element 22 | print("Creating Pipeline") 23 | pipeline = Gst.Pipeline() 24 | if not pipeline: 25 | sys.stderr.write(" Unable to create Pipeline") 26 | 27 | # Create GST Source 28 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 29 | streammux = create_element_or_error("nvstreammux", "Stream-muxer") 30 | pgie = create_element_or_error("nvinfer", "primary-inference") 31 | convertor = create_element_or_error("nvvideoconvert", "convertor-1") 32 | nvosd = create_element_or_error("nvdsosd", "onscreendisplay") 33 | convertor2 = create_element_or_error("nvvideoconvert", "convertor-2") 34 | 35 | # Create Gst Threads 36 | tee = create_element_or_error("tee", "tee") 37 | streaming_queue = create_element_or_error("queue", "streaming_queue") 38 | recording_queue = create_element_or_error("queue", "recording_queue") 39 | 40 | # Create Gst Elements for Streaming Branch 41 | s_encoder = create_element_or_error("nvv4l2h264enc", "streaming-encoder") 42 | s_parser = create_element_or_error("h264parse", "streaming-parser") 43 | s_muxer = create_element_or_error("flvmux", "streaming-muxer") 44 | s_sink = create_element_or_error("rtmpsink", "streaming-sink") 45 | 46 | # Create Gst Elements for Recording Branch 47 | r_encoder = create_element_or_error('nvv4l2h265enc', 'encoder') 48 | r_parser = create_element_or_error('h265parse', 'parser') 49 | r_sink = create_element_or_error('filesink', 'sink') 50 | 51 | # Set Element Properties 52 | source.set_property('sensor-id', 0) 53 | source.set_property('bufapi-version', True) 54 | streammux.set_property('live-source', 1) 55 | streammux.set_property('width', 1280) 56 | streammux.set_property('height', 720) 57 | streammux.set_property('num-surfaces-per-frame', 1) 58 | streammux.set_property('batch-size', 1) 59 | streammux.set_property('batched-push-timeout', 4000000) 60 | pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt") 61 | s_sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/streaming-test') 62 | r_encoder.set_property('bitrate', 8000000) 63 | r_sink.set_property('location', 'video_' + str(datetime.datetime.utcnow().date()) + '.mp4') 64 | 65 | # Add Elemements to Pipielin 66 | print("Adding elements to Pipeline") 67 | pipeline.add(source) 68 | pipeline.add(streammux) 69 | pipeline.add(pgie) 70 | pipeline.add(convertor) 71 | pipeline.add(nvosd) 72 | pipeline.add(convertor2) 73 | pipeline.add(tee) 74 | pipeline.add(streaming_queue) 75 | pipeline.add(s_encoder) 76 | pipeline.add(s_parser) 77 | pipeline.add(s_muxer) 78 | pipeline.add(s_sink) 79 | pipeline.add(recording_queue) 80 | pipeline.add(r_encoder) 81 | pipeline.add(r_parser) 82 | pipeline.add(r_sink) 83 | 84 | sinkpad = streammux.get_request_pad("sink_0") 85 | if not sinkpad: 86 | sys.stderr.write(" Unable to get the sink pad of streammux") 87 | 88 | # Link the elements together: 89 | print("Linking elements in the Pipeline") 90 | source.link(streammux) 91 | streammux.link(pgie) 92 | pgie.link(convertor) 93 | convertor.link(nvosd) 94 | nvosd.link(convertor2) 95 | convertor2.link(tee) 96 | 97 | # Streaming Queue 98 | streaming_queue.link(s_encoder) 99 | s_encoder.link(s_parser) 100 | s_parser.link(s_muxer) 101 | s_muxer.link(s_sink) 102 | 103 | # Recording Queue 104 | recording_queue.link(r_encoder) 105 | r_encoder.link(r_parser) 106 | r_parser.link(r_sink) 107 | 108 | # Get pad templates from source 109 | tee_src_pad_template = tee.get_pad_template("src_%u") 110 | 111 | # Get source to Streaming Queue 112 | tee_streaming_pad = tee.request_pad(tee_src_pad_template, None, None) 113 | streaming_queue_pad = streaming_queue.get_static_pad("sink") 114 | 115 | # Get source to recording Queue 116 | tee_recording_pad = tee.request_pad(tee_src_pad_template, None, None) 117 | recording_queue_pad = recording_queue.get_static_pad("sink") 118 | 119 | # Link sources 120 | if (tee_streaming_pad.link(streaming_queue_pad) != Gst.PadLinkReturn.OK or tee_recording_pad.link(recording_queue_pad) != Gst.PadLinkReturn.OK): 121 | print("ERROR: Tees could not be linked") 122 | sys.exit(1) 123 | 124 | # Create an event loop and feed gstreamer bus mesages to it 125 | loop = GObject.MainLoop() 126 | bus = pipeline.get_bus() 127 | bus.add_signal_watch() 128 | bus.connect ("message", bus_call, loop) 129 | 130 | # Start play back and listen to events 131 | print("Starting pipeline") 132 | pipeline.set_state(Gst.State.PLAYING) 133 | 134 | try: 135 | loop.run() 136 | except: 137 | pass 138 | 139 | 140 | # Cleanup 141 | pipeline.set_state(Gst.State.NULL) 142 | 143 | if __name__ == "__main__": 144 | sys.exit(main()) -------------------------------------------------------------------------------- /record-to-mkv.py: -------------------------------------------------------------------------------- 1 | # 2 | # The folowing example records the video of the CSI Camera to a MP4 File, Encoded h265 3 | # 4 | # The Gstreamer pipeline representation of this code is: 5 | # gst-launch-1.0 nvarguscamerasrc ! nvv4l2h265enc bitrate=8000000 ! h265parse ! filesink location=1280.mp4 -e 6 | # 7 | # 8 | import argparse 9 | import sys 10 | sys.path.append('./../') 11 | import datetime 12 | import gi 13 | gi.require_version('Gst', '1.0') 14 | from gi.repository import GObject, Gst 15 | from common.is_aarch_64 import is_aarch64 16 | from common.bus_call import bus_call 17 | from common.create_element_or_error import create_element_or_error 18 | 19 | def __location(splitmux, frag): 20 | print('Creating new video segment') 21 | print(datetime.datetime.now()) 22 | return 'v-' + str(datetime.datetime.utcnow()) + '.mkv' 23 | 24 | def main(): 25 | 26 | # Standard GStreamer initialization 27 | GObject.threads_init() 28 | Gst.init(None) 29 | # Gst.debug_set_active(False) 30 | # Gst.debug_set_default_threshold(3) 31 | 32 | # Create Pipeline Element 33 | print("Creating Pipeline") 34 | pipeline = Gst.Pipeline() 35 | if not pipeline: 36 | sys.stderr.write(" Unable to create Pipeline") 37 | 38 | # Create Gst Threads 39 | tee = create_element_or_error("tee", "tee") 40 | recording_queue = create_element_or_error("queue", "recording_queue") 41 | 42 | # Create Source Element 43 | source = create_element_or_error('nvarguscamerasrc', 'source') 44 | caps = create_element_or_error('capsfilter', 'source-capsfilter') 45 | encoder = create_element_or_error('nvv4l2h265enc', 'encoder') 46 | parser = create_element_or_error('h265parse', 'parser') 47 | # demuxer = create_element_or_error('matroskamux', 'matroxdemux') 48 | sink = create_element_or_error('splitmuxsink', 'sink') 49 | 50 | # Set Element Properties 51 | source.set_property('sensor-id', 0) 52 | caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, format=(string)NV12, framerate=(fraction)30/1")) 53 | sink.set_property('max-size-time', 30000000000) 54 | sink.set_property('muxer', 'matroskamux') 55 | sink.connect('format-location', __location) 56 | 57 | # Add Elemements to Pipielin 58 | print("Adding elements to Pipeline") 59 | pipeline.add(source) 60 | pipeline.add(caps) 61 | pipeline.add(tee) 62 | pipeline.add(recording_queue) 63 | pipeline.add(encoder) 64 | pipeline.add(parser) 65 | # pipeline.add(demuxer) 66 | pipeline.add(sink) 67 | 68 | # Link the elements together: 69 | print("Linking elements in the Pipeline") 70 | source.link(caps) 71 | caps.link(tee) 72 | recording_queue.link(encoder) 73 | encoder.link(parser) 74 | # parser.link(demuxer) 75 | parser.link(sink) 76 | 77 | # Get pad templates from source 78 | tee_src_pad_template = tee.get_pad_template("src_%u") 79 | 80 | # Get source to Recording Queue 81 | tee_pad = tee.request_pad(tee_src_pad_template, None, None) 82 | queue_pad = recording_queue.get_static_pad("sink") 83 | 84 | if (tee_pad.link(queue_pad) != Gst.PadLinkReturn.OK): 85 | print("ERROR: Tee streaming could not be linked") 86 | sys.exit(1) 87 | 88 | # Create an event loop and feed gstreamer bus mesages to it 89 | loop = GObject.MainLoop() 90 | bus = pipeline.get_bus() 91 | bus.add_signal_watch() 92 | bus.connect ("message", bus_call, loop) 93 | 94 | 95 | # Start play back and listen to events 96 | print("Starting pipeline") 97 | pipeline.set_state(Gst.State.PLAYING) 98 | 99 | try: 100 | loop.run() 101 | except: 102 | pass 103 | 104 | # Cleanup 105 | pipeline.set_state(Gst.State.NULL) 106 | 107 | if __name__ == "__main__": 108 | sys.exit(main()) 109 | 110 | # gst-launch-1.0 nvarguscamerasrc ! 'video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, format=(string)NV12, framerate=(fraction)30/1' ! nvv4l2h265enc ! h265parse ! matroskamux ! filesink location=test.mkv 111 | # gst-launch-1.0 nvarguscamerasrc ! 'video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, format=(string)NV12, framerate=(fraction)30/1' ! nvv4l2h265enc ! h265parse ! splitmuxsink muxer=matroskamux location=test.mkv max-size-time=30000000000 -------------------------------------------------------------------------------- /record-to-mp4.py: -------------------------------------------------------------------------------- 1 | # 2 | # The folowing example records the video of the CSI Camera to a MP4 File, Encoded h265 3 | # 4 | # The Gstreamer pipeline representation of this code is: 5 | # gst-launch-1.0 nvarguscamerasrc ! nvv4l2h265enc bitrate=8000000 ! h265parse ! filesink location=1280.mp4 -e 6 | # 7 | # 8 | import argparse 9 | import sys 10 | sys.path.append('./../') 11 | import datetime 12 | import gi 13 | gi.require_version('Gst', '1.0') 14 | from gi.repository import GObject, Gst 15 | from common.is_aarch_64 import is_aarch64 16 | from common.bus_call import bus_call 17 | from common.create_element_or_error import create_element_or_error 18 | 19 | def main(): 20 | 21 | # Standard GStreamer initialization 22 | GObject.threads_init() 23 | Gst.init(None) 24 | 25 | # Create Pipeline Element 26 | print("Creating Pipeline") 27 | pipeline = Gst.Pipeline() 28 | if not pipeline: 29 | sys.stderr.write(" Unable to create Pipeline") 30 | 31 | # Create Source Element 32 | source = create_element_or_error('nvarguscamerasrc', 'camera-source') 33 | encoder = create_element_or_error('nvv4l2h265enc', 'encoder') 34 | parser = create_element_or_error('h265parse', 'parser') 35 | sink = create_element_or_error('filesink', 'sink') 36 | 37 | # Set Element Properties 38 | source.set_property('sensor-id', 0) 39 | encoder.set_property('bitrate', 8000000) 40 | sink.set_property('location', 'prueba.mp4') 41 | 42 | # Add Elemements to Pipielin 43 | print("Adding elements to Pipeline") 44 | pipeline.add(source) 45 | pipeline.add(encoder) 46 | pipeline.add(parser) 47 | pipeline.add(sink) 48 | 49 | # Link the elements together: 50 | print("Linking elements in the Pipeline") 51 | source.link(encoder) 52 | encoder.link(parser) 53 | parser.link(sink) 54 | 55 | 56 | # Create an event loop and feed gstreamer bus mesages to it 57 | loop = GObject.MainLoop() 58 | bus = pipeline.get_bus() 59 | bus.add_signal_watch() 60 | bus.connect ("message", bus_call, loop) 61 | 62 | 63 | # Start play back and listen to events 64 | print("Starting pipeline") 65 | pipeline.set_state(Gst.State.PLAYING) 66 | 67 | try: 68 | loop.run() 69 | except: 70 | pass 71 | 72 | # Cleanup 73 | pipeline.set_state(Gst.State.NULL) 74 | 75 | if __name__ == "__main__": 76 | sys.exit(main()) 77 | -------------------------------------------------------------------------------- /restream-to-rtmp.py: -------------------------------------------------------------------------------- 1 | # 2 | # Publish video to Ant Server 3 | # 4 | import argparse 5 | import sys 6 | sys.path.append('../') 7 | 8 | import gi 9 | gi.require_version('Gst', '1.0') 10 | from gi.repository import GObject, Gst 11 | from common.is_aarch_64 import is_aarch64 12 | from common.bus_call import bus_call 13 | from common.create_element_or_error import create_element_or_error 14 | 15 | def main(): 16 | 17 | # Standard GStreamer initialization 18 | GObject.threads_init() 19 | Gst.init(None) 20 | 21 | # Create Pipeline Element 22 | pipeline = Gst.Pipeline() 23 | if not pipeline: 24 | print("Unable to create Pipeline") 25 | return False 26 | 27 | # Create GST Elements 28 | source = create_element_or_error("filesrc", "file-source") 29 | 30 | encoder = create_element_or_error("nvv4l2h264enc", "encoder") 31 | parser = create_element_or_error("h264parse", "parser") 32 | muxer = create_element_or_error("flvmux", "muxer") 33 | sink = create_element_or_error("rtmpsink", "sink") 34 | 35 | if not (source or encoder or parseer or muxer or sink): 36 | return 37 | 38 | # Set Element Properties 39 | source.set_property('location', 'http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4') 40 | sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/stream-test') 41 | 42 | # Add Elemements to Pipielin 43 | print("Adding elements to Pipeline") 44 | pipeline.add(source) 45 | pipeline.add(encoder) 46 | pipeline.add(parser) 47 | pipeline.add(muxer) 48 | pipeline.add(sink) 49 | 50 | # Link the elements together: 51 | print("Linking elements in the Pipeline") 52 | source.link(encoder) 53 | encoder.link(parser) 54 | parser.link(muxer) 55 | muxer.link(sink) 56 | 57 | # Create an event loop and feed gstreamer bus mesages to it 58 | loop = GObject.MainLoop() 59 | bus = pipeline.get_bus() 60 | bus.add_signal_watch() 61 | bus.connect ("message", bus_call, loop) 62 | 63 | # Start play back and listen to events 64 | print("Starting pipeline") 65 | pipeline.set_state(Gst.State.PLAYING) 66 | 67 | try: 68 | loop.run() 69 | except: 70 | pass 71 | 72 | # Cleanup 73 | pipeline.set_state(Gst.State.NULL) 74 | 75 | if __name__ == "__main__": 76 | sys.exit(main()) 77 | 78 | -------------------------------------------------------------------------------- /stream-rtsp-server-with-inferance.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # Stream over RTSP the camera with Object Detection 4 | # 5 | # 6 | import argparse 7 | import sys 8 | sys.path.append('../') 9 | 10 | import gi 11 | gi.require_version('Gst', '1.0') 12 | gi.require_version('GstRtspServer', '1.0') 13 | from gi.repository import GObject, Gst, GstRtspServer 14 | from common.is_aarch_64 import is_aarch64 15 | from common.bus_call import bus_call 16 | from common.object_detection import osd_sink_pad_buffer_probe 17 | from common.create_element_or_error import create_element_or_error 18 | 19 | def main(): 20 | 21 | # Standard GStreamer initialization 22 | GObject.threads_init() 23 | Gst.init(None) 24 | 25 | # Create Pipeline Element 26 | print("Creating Pipeline") 27 | pipeline = Gst.Pipeline() 28 | if not pipeline: 29 | sys.stderr.write("Unable to create Pipeline") 30 | 31 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 32 | streammux = create_element_or_error("nvstreammux", "Stream-muxer") 33 | pgie = create_element_or_error("nvinfer", "primary-inference") 34 | convertor = create_element_or_error("nvvideoconvert", "convertor-1") 35 | nvosd = create_element_or_error("nvdsosd", "onscreendisplay") 36 | convertor2 = create_element_or_error("nvvideoconvert", "convertor-2") 37 | caps = create_element_or_error("capsfilter", "filter-convertor-2") 38 | encoder = create_element_or_error("nvv4l2h265enc", "encoder") 39 | parser = create_element_or_error("h265parse", "h265-parser") 40 | rtppay = create_element_or_error("rtph265pay", "rtppay") 41 | sink = create_element_or_error("udpsink", "udpsink") 42 | 43 | 44 | # Set Element Properties 45 | source.set_property('sensor-id', 0) 46 | source.set_property('bufapi-version', True) 47 | 48 | encoder.set_property('insert-sps-pps', True) 49 | encoder.set_property('bitrate', 4000000) 50 | 51 | streammux.set_property('live-source', 1) 52 | streammux.set_property('width', 1280) 53 | streammux.set_property('height', 720) 54 | streammux.set_property('num-surfaces-per-frame', 1) 55 | streammux.set_property('batch-size', 1) 56 | streammux.set_property('batched-push-timeout', 4000000) 57 | 58 | pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt") 59 | 60 | rtppay.set_property('pt', 96) 61 | 62 | updsink_port_num = 5400 63 | 64 | sink.set_property('host', '127.0.0.1') 65 | sink.set_property('port', updsink_port_num) 66 | sink.set_property('async', False) 67 | sink.set_property('sync', 1) 68 | 69 | # Add Elemements to Pipielin 70 | print("Adding elements to Pipeline") 71 | pipeline.add(source) 72 | pipeline.add(streammux) 73 | pipeline.add(pgie) 74 | pipeline.add(convertor) 75 | pipeline.add(nvosd) 76 | pipeline.add(convertor2) 77 | pipeline.add(encoder) 78 | pipeline.add(parser) 79 | pipeline.add(rtppay) 80 | pipeline.add(sink) 81 | 82 | sinkpad = streammux.get_request_pad("sink_0") 83 | if not sinkpad: 84 | sys.stderr.write(" Unable to get the sink pad of streammux") 85 | 86 | # Link the elements together: 87 | print("Linking elements in the Pipeline") 88 | source.link(streammux) 89 | streammux.link(pgie) 90 | pgie.link(convertor) 91 | convertor.link(nvosd) 92 | nvosd.link(convertor2) 93 | convertor2.link(encoder) 94 | encoder.link(parser) 95 | parser.link(rtppay) 96 | rtppay.link(sink) 97 | 98 | # Create an event loop and feed gstreamer bus mesages to it 99 | loop = GObject.MainLoop() 100 | bus = pipeline.get_bus() 101 | bus.add_signal_watch() 102 | bus.connect ("message", bus_call, loop) 103 | 104 | 105 | # Start streaming 106 | rtsp_port_num = 8554 107 | 108 | server = GstRtspServer.RTSPServer.new() 109 | server.props.service = "%d" % rtsp_port_num 110 | server.attach(None) 111 | 112 | factory = GstRtspServer.RTSPMediaFactory.new() 113 | factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, 'H265')) 114 | factory.set_shared(True) 115 | server.get_mount_points().add_factory("/streaming", factory) 116 | 117 | print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/streaming ***\n\n" % rtsp_port_num) 118 | 119 | # Lets add probe to get informed of the meta data generated, we add probe to 120 | # the sink pad of the osd element, since by that time, the buffer would have 121 | # had got all the metadata. 122 | print('Create OSD Sink Pad') 123 | osdsinkpad = nvosd.get_static_pad("sink") 124 | if not osdsinkpad: 125 | sys.stderr.write(" Unable to get sink pad of nvosd") 126 | 127 | osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) 128 | 129 | # Start play back and listen to events 130 | print("Starting pipeline") 131 | pipeline.set_state(Gst.State.PLAYING) 132 | 133 | try: 134 | loop.run() 135 | except: 136 | pass 137 | 138 | 139 | # Cleanup 140 | pipeline.set_state(Gst.State.NULL) 141 | 142 | if __name__ == "__main__": 143 | sys.exit(main()) 144 | -------------------------------------------------------------------------------- /stream-rtsp.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # Stream over RTSP the camera 4 | # 5 | # 6 | import argparse 7 | import sys 8 | sys.path.append('../') 9 | 10 | import gi 11 | gi.require_version('Gst', '1.0') 12 | gi.require_version('GstRtspServer', '1.0') 13 | from gi.repository import GObject, Gst, GstRtspServer 14 | from common.is_aarch_64 import is_aarch64 15 | from common.bus_call import bus_call 16 | from common.create_element_or_error import create_element_or_error 17 | 18 | def main(): 19 | 20 | # Standard GStreamer initialization 21 | GObject.threads_init() 22 | Gst.init(None) 23 | 24 | 25 | # Create Pipeline Element 26 | print("Creating Pipeline") 27 | pipeline = Gst.Pipeline() 28 | if not pipeline: 29 | sys.stderr.write(" Unable to create Pipeline") 30 | 31 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 32 | encoder = create_element_or_error("nvv4l2h265enc", "encoder") 33 | parser = create_element_or_error("h265parse", "h265-parser") 34 | rtppay = create_element_or_error("rtph265pay", "rtppay") 35 | sink = create_element_or_error("udpsink", "udpsink") 36 | 37 | # Set Element Properties 38 | source.set_property('sensor-id', 0) 39 | 40 | encoder.set_property('insert-sps-pps', True) 41 | encoder.set_property('bitrate', 4000000) 42 | 43 | rtppay.set_property('pt', 96) 44 | updsink_port_num = 5400 45 | 46 | sink.set_property('host', '127.0.0.1') 47 | sink.set_property('port', updsink_port_num) 48 | sink.set_property('async', False) 49 | sink.set_property('sync', 1) 50 | 51 | # Add Elemements to Pipielin 52 | print("Adding elements to Pipeline") 53 | pipeline.add(source) 54 | pipeline.add(parser) 55 | pipeline.add(encoder) 56 | pipeline.add(rtppay) 57 | pipeline.add(sink) 58 | 59 | # Link the elements together: 60 | print("Linking elements in the Pipeline") 61 | source.link(encoder) 62 | encoder.link(parser) 63 | parser.link(rtppay) 64 | rtppay.link(sink) 65 | 66 | # Create an event loop and feed gstreamer bus mesages to it 67 | loop = GObject.MainLoop() 68 | bus = pipeline.get_bus() 69 | bus.add_signal_watch() 70 | bus.connect ("message", bus_call, loop) 71 | 72 | # Start streaming 73 | rtsp_port_num = 8554 74 | 75 | server = GstRtspServer.RTSPServer.new() 76 | server.props.service = "%d" % rtsp_port_num 77 | server.attach(None) 78 | 79 | factory = GstRtspServer.RTSPMediaFactory.new() 80 | factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, 'H265')) 81 | factory.set_shared(True) 82 | server.get_mount_points().add_factory("/streaming", factory) 83 | 84 | print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/streaming ***\n\n" % rtsp_port_num) 85 | 86 | # Start play back and listen to events 87 | print("Starting pipeline") 88 | pipeline.set_state(Gst.State.PLAYING) 89 | 90 | try: 91 | loop.run() 92 | except: 93 | pass 94 | 95 | 96 | # Cleanup 97 | pipeline.set_state(Gst.State.NULL) 98 | 99 | if __name__ == "__main__": 100 | sys.exit(main()) 101 | -------------------------------------------------------------------------------- /stream-to-rtmp-server.py: -------------------------------------------------------------------------------- 1 | # 2 | # Publish video to Ant Server 3 | # 4 | import argparse 5 | import sys 6 | sys.path.append('../') 7 | 8 | import gi 9 | gi.require_version('Gst', '1.0') 10 | from gi.repository import GObject, Gst 11 | from common.is_aarch_64 import is_aarch64 12 | from common.bus_call import bus_call 13 | from common.create_element_or_error import create_element_or_error 14 | 15 | def main(): 16 | 17 | # Standard GStreamer initialization 18 | GObject.threads_init() 19 | Gst.init(None) 20 | 21 | # Create Pipeline Element 22 | pipeline = Gst.Pipeline() 23 | if not pipeline: 24 | print("Unable to create Pipeline") 25 | return False 26 | 27 | # Create GST Elements 28 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 29 | 30 | encoder = create_element_or_error("nvv4l2h264enc", "encoder") 31 | parser = create_element_or_error("h264parse", "parser") 32 | muxer = create_element_or_error("flvmux", "muxer") 33 | sink = create_element_or_error("rtmpsink", "sink") 34 | 35 | # Set Element Properties 36 | source.set_property('sensor-id', 0) 37 | sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/stream-test') 38 | 39 | # Add Elemements to Pipielin 40 | print("Adding elements to Pipeline") 41 | pipeline.add(source) 42 | pipeline.add(encoder) 43 | pipeline.add(parser) 44 | pipeline.add(muxer) 45 | pipeline.add(sink) 46 | 47 | # Link the elements together: 48 | print("Linking elements in the Pipeline") 49 | source.link(encoder) 50 | encoder.link(parser) 51 | parser.link(muxer) 52 | muxer.link(sink) 53 | 54 | # Create an event loop and feed gstreamer bus mesages to it 55 | loop = GObject.MainLoop() 56 | bus = pipeline.get_bus() 57 | bus.add_signal_watch() 58 | bus.connect ("message", bus_call, loop) 59 | 60 | # Start play back and listen to events 61 | print("Starting pipeline") 62 | pipeline.set_state(Gst.State.PLAYING) 63 | 64 | try: 65 | loop.run() 66 | except: 67 | pass 68 | 69 | # Cleanup 70 | pipeline.set_state(Gst.State.NULL) 71 | 72 | if __name__ == "__main__": 73 | sys.exit(main()) 74 | -------------------------------------------------------------------------------- /stream-to-rtmp-tracker.py: -------------------------------------------------------------------------------- 1 | # 2 | # Publish video to Ant Server 3 | # 4 | import argparse 5 | import sys 6 | sys.path.append('./') 7 | 8 | import gi 9 | gi.require_version('Gst', '1.0') 10 | from gi.repository import GObject, Gst 11 | from common.is_aarch_64 import is_aarch64 12 | from common.bus_call import bus_call 13 | from common.create_element_or_error import create_element_or_error 14 | import pyds 15 | 16 | PGIE_CLASS_ID_VEHICLE = 0 17 | PGIE_CLASS_ID_BICYCLE = 1 18 | PGIE_CLASS_ID_PERSON = 2 19 | PGIE_CLASS_ID_ROADSIGN = 3 20 | 21 | object_counter = { 22 | PGIE_CLASS_ID_VEHICLE : 0, 23 | PGIE_CLASS_ID_PERSON : 0, 24 | PGIE_CLASS_ID_BICYCLE : 0, 25 | PGIE_CLASS_ID_ROADSIGN : 0 26 | } 27 | 28 | def _sink_pad_buffer_probe(pad, info, u_data): 29 | 30 | gst_buffer = info.get_buffer() 31 | if not gst_buffer: 32 | print("Unable to get GstBuffer") 33 | return 34 | 35 | batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) 36 | frame_list = batch_meta.frame_meta_list 37 | 38 | print(frame_list) 39 | # while frame_list is not None: 40 | # try: 41 | # frame_meta = pyds.NvDsFrameMeta.cast(frame_list.data) 42 | # except StopIteration: 43 | # break 44 | 45 | # list_of_objects = frame_meta.obj_meta_list 46 | 47 | # while list_of_objects is not None: 48 | 49 | # try: 50 | # object_meta = pyds.NvDsObjectMeta.cast(list_of_objects.data) 51 | # print(object_meta) 52 | # # https://docs.nvidia.com/metropolis/deepstream/5.0DP/python-api/NvDsMeta/NvDsObjectMeta.html 53 | # # print(object_meta.rect_params.top) 54 | # # print(object_meta.rect_params.left) 55 | # # print(object_meta.rect_params.width) 56 | # # print(object_meta.rect_params.height) 57 | # # print(object_meta.class_id) 58 | # # print(object_meta.obj_label) 59 | # # print(object_meta.object_id) 60 | 61 | # except StopIteration: 62 | # break 63 | 64 | # # object_counter[object_meta.class_id] += 1 65 | 66 | # try: 67 | # list_of_objects = list_of_objects.next 68 | # except StopIteration: 69 | # break 70 | 71 | # display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) 72 | # display_meta.num_rects = 1 73 | # py_nvosd_rect_params = display_meta.rect_params[0] 74 | # print('hello') 75 | # # py_nvosd_rect_params.top = object_meta.rect_params.top 76 | # # py_nvosd_rect_params.left = object_meta.rect_params.left 77 | # # py_nvosd_rect_params.width = object_meta.rect_params.width 78 | # # py_nvosd_rect_params.height = object_meta.rect_params.height 79 | # # pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) 80 | # print('llego') 81 | # try: 82 | # frame_list = frame_list.next 83 | # except StopIteration: 84 | # break 85 | 86 | return Gst.PadProbeReturn.OK 87 | 88 | def main(): 89 | 90 | print('Tracker Example') 91 | 92 | # Standard GStreamer initialization 93 | GObject.threads_init() 94 | Gst.init(None) 95 | 96 | # Create Pipeline Element 97 | pipeline = Gst.Pipeline() 98 | if not pipeline: 99 | print("Unable to create Pipeline") 100 | return False 101 | 102 | # Create GST Elements 103 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 104 | streammux = create_element_or_error("nvstreammux", "Stream-muxer") 105 | pgie = create_element_or_error("nvinfer", "primary-inference") 106 | tracker = create_element_or_error("nvtracker", "tracker") 107 | convertor = create_element_or_error("nvvideoconvert", "convertor-1") 108 | nvosd = create_element_or_error("nvdsosd", "onscreendisplay") 109 | convertor2 = create_element_or_error("nvvideoconvert", "convertor-2") 110 | encoder = create_element_or_error("nvv4l2h264enc", "encoder") 111 | parser = create_element_or_error("h264parse", "parser") 112 | muxer = create_element_or_error("flvmux", "muxer") 113 | sink = create_element_or_error("rtmpsink", "sink") 114 | 115 | # Set Element Properties 116 | source.set_property('sensor-id', 0) 117 | source.set_property('bufapi-version', True) 118 | 119 | encoder.set_property('insert-sps-pps', True) 120 | encoder.set_property('bitrate', 4000000) 121 | 122 | streammux.set_property('live-source', 1) 123 | streammux.set_property('width', 1920) 124 | streammux.set_property('height', 1080) 125 | streammux.set_property('num-surfaces-per-frame', 1) 126 | streammux.set_property('batch-size', 30) 127 | streammux.set_property('batched-push-timeout', 25000) 128 | 129 | pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt") 130 | 131 | tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so') 132 | tracker.set_property('gpu-id', 0) 133 | tracker.set_property('enable-batch-process', 1) 134 | tracker.set_property('ll-config-file', '/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/tracker_config.yml') 135 | 136 | sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/streaming-test') 137 | 138 | # Add Elemements to Pipielin 139 | print("Adding elements to Pipeline") 140 | pipeline.add(source) 141 | pipeline.add(streammux) 142 | pipeline.add(pgie) 143 | pipeline.add(tracker) 144 | pipeline.add(convertor) 145 | pipeline.add(nvosd) 146 | pipeline.add(convertor2) 147 | pipeline.add(encoder) 148 | pipeline.add(parser) 149 | pipeline.add(muxer) 150 | pipeline.add(sink) 151 | 152 | sinkpad = streammux.get_request_pad("sink_0") 153 | if not sinkpad: 154 | sys.stderr.write(" Unable to get the sink pad of streammux") 155 | 156 | # Link the elements together: 157 | print("Linking elements in the Pipeline") 158 | source.link(streammux) 159 | streammux.link(pgie) 160 | pgie.link(tracker) 161 | tracker.link(convertor) 162 | convertor.link(nvosd) 163 | nvosd.link(convertor2) 164 | convertor2.link(encoder) 165 | encoder.link(parser) 166 | parser.link(muxer) 167 | muxer.link(sink) 168 | 169 | # Create an event loop and feed gstreamer bus mesages to it 170 | loop = GObject.MainLoop() 171 | bus = pipeline.get_bus() 172 | bus.add_signal_watch() 173 | bus.connect ("message", bus_call, loop) 174 | 175 | print('Create OSD Sink Pad') 176 | nvosd_sinkpad = nvosd.get_static_pad("sink") 177 | if not nvosd_sinkpad: 178 | sys.stderr.write("Unable to get sink pad of nvosd") 179 | 180 | nvosd_sinkpad.add_probe(Gst.PadProbeType.BUFFER, _sink_pad_buffer_probe, 0) 181 | 182 | # Start play back and listen to events 183 | print("Starting pipeline") 184 | pipeline.set_state(Gst.State.PLAYING) 185 | 186 | try: 187 | loop.run() 188 | except: 189 | pass 190 | 191 | # Cleanup 192 | pipeline.set_state(Gst.State.NULL) 193 | 194 | if __name__ == "__main__": 195 | sys.exit(main()) 196 | -------------------------------------------------------------------------------- /stream-to-rtmp-with-inferance.py: -------------------------------------------------------------------------------- 1 | # 2 | # Publish video to Ant Server 3 | # 4 | import argparse 5 | import sys 6 | sys.path.append('./') 7 | 8 | import gi 9 | gi.require_version('Gst', '1.0') 10 | from gi.repository import GObject, Gst 11 | from common.is_aarch_64 import is_aarch64 12 | from common.bus_call import bus_call 13 | from common.create_element_or_error import create_element_or_error 14 | import pyds 15 | 16 | PGIE_CLASS_ID_VEHICLE = 0 17 | PGIE_CLASS_ID_BICYCLE = 1 18 | PGIE_CLASS_ID_PERSON = 2 19 | PGIE_CLASS_ID_ROADSIGN = 3 20 | 21 | 22 | def osd_sink_pad_buffer_probe(pad,info,u_data): 23 | frame_number=0 24 | 25 | obj_counter = { 26 | PGIE_CLASS_ID_VEHICLE:0, 27 | PGIE_CLASS_ID_PERSON:0, 28 | PGIE_CLASS_ID_BICYCLE:0, 29 | PGIE_CLASS_ID_ROADSIGN:0 30 | } 31 | 32 | num_rects=0 33 | 34 | gst_buffer = info.get_buffer() 35 | if not gst_buffer: 36 | print("Unable to get GstBuffer ") 37 | return 38 | 39 | 40 | batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) 41 | 42 | l_frame = batch_meta.frame_meta_list 43 | 44 | while l_frame is not None: 45 | 46 | try: 47 | frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) 48 | except StopIteration: 49 | break 50 | 51 | frame_number=frame_meta.frame_num 52 | num_rects = frame_meta.num_obj_meta 53 | l_obj=frame_meta.obj_meta_list 54 | 55 | while l_obj is not None: 56 | try: 57 | obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data) 58 | # print(obj_meta.rect_params.top) 59 | # print(obj_meta.rect_params.left) 60 | # print(obj_meta.rect_params.width) 61 | # print(obj_meta.rect_params.height) 62 | # print(obj_meta.class_id) 63 | # print(obj_meta.obj_label) 64 | # print(obj_meta.object_id) 65 | except StopIteration: 66 | break 67 | obj_counter[obj_meta.class_id] += 1 68 | try: 69 | l_obj=l_obj.next 70 | except StopIteration: 71 | break 72 | 73 | display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) 74 | display_meta.num_labels = 1 75 | py_nvosd_text_params = display_meta.text_params[0] 76 | py_nvosd_text_params.display_text = "Frames: {} | Objects: {} | Vehicles: {} | Persons: {}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) 77 | 78 | py_nvosd_text_params.x_offset = 10 79 | py_nvosd_text_params.y_offset = 12 80 | 81 | py_nvosd_text_params.font_params.font_name = "Serif" 82 | py_nvosd_text_params.font_params.font_size = 12 83 | py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) 84 | 85 | py_nvosd_text_params.set_bg_clr = 1 86 | py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) 87 | pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) 88 | try: 89 | l_frame=l_frame.next 90 | except StopIteration: 91 | break 92 | 93 | return Gst.PadProbeReturn.OK 94 | 95 | 96 | def main(): 97 | 98 | # Standard GStreamer initialization 99 | GObject.threads_init() 100 | Gst.init(None) 101 | 102 | # Create Pipeline Element 103 | pipeline = Gst.Pipeline() 104 | if not pipeline: 105 | print("Unable to create Pipeline") 106 | return False 107 | 108 | # Create GST Elements 109 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 110 | src_caps = create_element_or_error("capsfilter", "source-caps-definition") 111 | src_caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=(int)3264, height=(int)2464, framerate=30/1, format=(string)NV12")) 112 | 113 | streammux = create_element_or_error("nvstreammux", "Stream-muxer") 114 | pgie = create_element_or_error("nvinfer", "primary-inference") 115 | convertor = create_element_or_error("nvvideoconvert", "convertor-1") 116 | nvosd = create_element_or_error("nvdsosd", "onscreendisplay") 117 | convertor2 = create_element_or_error("nvvideoconvert", "convertor-2") 118 | 119 | encoder = create_element_or_error("nvv4l2h264enc", "encoder") 120 | parser = create_element_or_error("h264parse", "parser") 121 | muxer = create_element_or_error("flvmux", "muxer") 122 | sink = create_element_or_error("rtmpsink", "sink") 123 | 124 | 125 | # Set Element Properties 126 | source.set_property('sensor-id', 0) 127 | source.set_property('bufapi-version', True) 128 | 129 | encoder.set_property('insert-sps-pps', True) 130 | encoder.set_property('bitrate', 4000000) 131 | 132 | streammux.set_property('live-source', 1) 133 | streammux.set_property('width', 720) 134 | streammux.set_property('height', 480) 135 | streammux.set_property('num-surfaces-per-frame', 1) 136 | streammux.set_property('batch-size', 1) 137 | streammux.set_property('batched-push-timeout', 4000000) 138 | 139 | pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt") 140 | sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/streaming-test') 141 | 142 | # Add Elemements to Pipielin 143 | print("Adding elements to Pipeline") 144 | pipeline.add(source) 145 | pipeline.add(src_caps) 146 | pipeline.add(streammux) 147 | pipeline.add(pgie) 148 | pipeline.add(convertor) 149 | pipeline.add(nvosd) 150 | pipeline.add(convertor2) 151 | pipeline.add(encoder) 152 | pipeline.add(parser) 153 | pipeline.add(muxer) 154 | pipeline.add(sink) 155 | 156 | sinkpad = streammux.get_request_pad("sink_0") 157 | if not sinkpad: 158 | sys.stderr.write(" Unable to get the sink pad of streammux") 159 | 160 | # Link the elements together: 161 | print("Linking elements in the Pipeline") 162 | source.link(src_caps) 163 | src_caps.link(streammux) 164 | streammux.link(pgie) 165 | pgie.link(convertor) 166 | convertor.link(nvosd) 167 | nvosd.link(convertor2) 168 | convertor2.link(encoder) 169 | encoder.link(parser) 170 | parser.link(muxer) 171 | muxer.link(sink) 172 | 173 | # Create an event loop and feed gstreamer bus mesages to it 174 | loop = GObject.MainLoop() 175 | bus = pipeline.get_bus() 176 | bus.add_signal_watch() 177 | bus.connect ("message", bus_call, loop) 178 | 179 | print('Create OSD Sink Pad') 180 | osdsinkpad = nvosd.get_static_pad("sink") 181 | if not osdsinkpad: 182 | sys.stderr.write(" Unable to get sink pad of nvosd") 183 | 184 | osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) 185 | 186 | # Start play back and listen to events 187 | print("Starting pipeline") 188 | pipeline.set_state(Gst.State.PLAYING) 189 | 190 | try: 191 | loop.run() 192 | except: 193 | pass 194 | 195 | # Cleanup 196 | pipeline.set_state(Gst.State.NULL) 197 | 198 | if __name__ == "__main__": 199 | sys.exit(main()) 200 | -------------------------------------------------------------------------------- /test-peoplenet.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # Display the Image on the Screen with Object Detections 4 | # 5 | # 6 | import argparse 7 | import sys 8 | sys.path.append('../') 9 | 10 | import gi 11 | gi.require_version('Gst', '1.0') 12 | from gi.repository import GObject, Gst 13 | from common.is_aarch_64 import is_aarch64 14 | from common.bus_call import bus_call 15 | from common.object_detection import osd_sink_pad_buffer_probe 16 | from common.create_element_or_error import create_element_or_error 17 | 18 | def main(): 19 | 20 | # Standard GStreamer initialization 21 | GObject.threads_init() 22 | Gst.init(None) 23 | 24 | # Create Pipeline Element 25 | print("Creating Pipeline") 26 | pipeline = Gst.Pipeline() 27 | if not pipeline: 28 | sys.stderr.write(" Unable to create Pipeline") 29 | 30 | source = create_element_or_error("nvarguscamerasrc", "camera-source") 31 | streammux = create_element_or_error("nvstreammux", "Stream-muxer") 32 | pgie = create_element_or_error("nvinfer", "primary-inference") 33 | convertor = create_element_or_error("nvvideoconvert", "convertor-1") 34 | nvosd = create_element_or_error("nvdsosd", "onscreendisplay") 35 | sink = create_element_or_error("nvoverlaysink", "egl-overlay") 36 | 37 | # Set Element Properties 38 | source.set_property('sensor-id', 0) 39 | source.set_property('bufapi-version', True) 40 | source.set_property('bufapi-version', True) 41 | 42 | streammux.set_property('live-source', 1) 43 | streammux.set_property('width', 1280) 44 | streammux.set_property('height', 720) 45 | streammux.set_property('num-surfaces-per-frame', 1) 46 | streammux.set_property('batch-size', 1) 47 | streammux.set_property('batched-push-timeout', 4000000) 48 | 49 | pgie.set_property('config-file-path', "./nv-inferance-config-files/config_infer_primary_yolov3.txt") 50 | # pgie.set_property('config-file-path', "./nv-inferance-config-files/config_infer_primary_trafficcamnet.txt") 51 | sink.set_property('sync', 0) 52 | 53 | # Add Elemements to Pipielin 54 | print("Adding elements to Pipeline") 55 | pipeline.add(source) 56 | pipeline.add(streammux) 57 | pipeline.add(pgie) 58 | pipeline.add(convertor) 59 | pipeline.add(nvosd) 60 | pipeline.add(sink) 61 | 62 | sinkpad = streammux.get_request_pad("sink_0") 63 | if not sinkpad: 64 | sys.stderr.write(" Unable to get the sink pad of streammux") 65 | 66 | # Link the elements together: 67 | print("Linking elements in the Pipeline") 68 | source.link(streammux) 69 | streammux.link(pgie) 70 | pgie.link(convertor) 71 | convertor.link(nvosd) 72 | nvosd.link(sink) 73 | 74 | 75 | # Create an event loop and feed gstreamer bus mesages to it 76 | loop = GObject.MainLoop() 77 | bus = pipeline.get_bus() 78 | bus.add_signal_watch() 79 | bus.connect ("message", bus_call, loop) 80 | 81 | # Lets add probe to get informed of the meta data generated, we add probe to 82 | # the sink pad of the osd element, since by that time, the buffer would have 83 | # had got all the metadata. 84 | # print('Create OSD Sink Pad') 85 | # osdsinkpad = nvosd.get_static_pad("sink") 86 | # if not osdsinkpad: 87 | # sys.stderr.write(" Unable to get sink pad of nvosd") 88 | 89 | # osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) 90 | 91 | 92 | # Start play back and listen to events 93 | print("Starting pipeline") 94 | pipeline.set_state(Gst.State.PLAYING) 95 | 96 | try: 97 | loop.run() 98 | except: 99 | pass 100 | 101 | 102 | # Cleanup 103 | pipeline.set_state(Gst.State.NULL) 104 | 105 | if __name__ == "__main__": 106 | sys.exit(main()) -------------------------------------------------------------------------------- /webrtc.py: -------------------------------------------------------------------------------- 1 | import random 2 | import ssl 3 | import websockets 4 | import asyncio 5 | import os 6 | import sys 7 | import json 8 | import argparse 9 | import json 10 | 11 | import gi 12 | gi.require_version('Gst', '1.0') 13 | from gi.repository import Gst 14 | gi.require_version('GstWebRTC', '1.0') 15 | from gi.repository import GstWebRTC 16 | gi.require_version('GstSdp', '1.0') 17 | from gi.repository import GstSdp 18 | 19 | # gst-launch-1.0 nvarguscamerasrc sensor-id=0 ! nvv4l2h264enc ! h264parse ! flvmux ! rtmpsink location='rtmp://media.streamit.live/LiveApp/frank-edge live=1' 20 | # Folowwing pipeline is for CSI Live Camera 21 | # PIPELINE_DESC = ''' 22 | # nvarguscamerasrc ! nvvideoconvert ! queue ! vp8enc deadline=1 ! rtpvp8pay ! 23 | # queue ! application/x-rtp,media=video,encoding-name=VP8,payload=96 ! webrtcbin name=sendrecv 24 | # ''' 25 | 26 | # gst-launch-1.0 -v videotestsrc ! omxh264enc ! 'video/x-h264,stream-format=(string)avc' ! flvmux ! rtmpsink location='rtmp://media.streamit.live/LiveApp/bill-edge live=1' 27 | # This one can be used for testing 28 | PIPELINE_DESC = ''' 29 | videotestsrc is-live=true pattern=ball ! videoconvert ! queue ! vp8enc deadline=1 ! rtpvp8pay ! 30 | queue ! application/x-rtp,media=video,encoding-name=VP8,payload=96 ! webrtcbin name=sendrecv 31 | ''' 32 | 33 | WEBSOCKET_URL = 'wss://media.streamit.live:5443/LiveApp/websocket?rtmpForward=undefined' 34 | 35 | from websockets.version import version as wsv 36 | 37 | class WebRTCClient: 38 | def __init__(self, id): 39 | self.id = id 40 | self.conn = None 41 | self.pipe = None 42 | self.webrtc = None 43 | self.peer_id = None 44 | self.server = WEBSOCKET_URL 45 | 46 | async def connect(self): 47 | print('Client Connect') 48 | sslctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) 49 | self.conn = await websockets.connect(self.server, ssl=sslctx) 50 | await self.conn.send('{"command":"publish","streamId":"' + self.id + '", "token":"null","video":true,"audio":false}') 51 | 52 | def send_sdp_offer(self, offer): 53 | print('Send SDP Offer') 54 | sdp = offer.sdp.as_text() 55 | loop = asyncio.new_event_loop() 56 | loop.run_until_complete(self.conn.send('{"command":"takeConfiguration", "streamId": "' + self.id + '", "type": "offer", "sdp": "' + sdp +'"}')) 57 | loop.close() 58 | 59 | def on_offer_created(self, promise, _, __): 60 | print('Offer Created') 61 | promise.wait() 62 | reply = promise.get_reply() 63 | offer = reply.get_value('offer') #Please check -> https://github.com/centricular/gstwebrtc-demos/issues/42 64 | promise = Gst.Promise.new() 65 | self.webrtc.emit('set-local-description', offer, promise) 66 | promise.interrupt() 67 | self.send_sdp_offer(offer) 68 | 69 | def on_negotiation_needed(self, element): 70 | print('Negotiation Needed') 71 | promise = Gst.Promise.new_with_change_func(self.on_offer_created, element, None) 72 | element.emit('create-offer', None, promise) 73 | 74 | def send_ice_candidate_message(self, _, mlineindex, candidate): 75 | data = '{"command":"takeCandidate","streamId":"' + self.id + '","label":'+ str(mlineindex) +', "id":"' + str(mlineindex) +'" "candidate":"' + str(candidate) +'"}' 76 | loop = asyncio.new_event_loop() 77 | loop.run_until_complete(self.conn.send(data)) 78 | loop.close() 79 | 80 | def on_incoming_decodebin_stream(self, _, pad): 81 | print('Incoming Decodebin Stream') 82 | # if not pad.has_current_caps(): 83 | # print (pad, 'has no caps, ignoring') 84 | # return 85 | # caps = pad.get_current_caps() 86 | # assert (len(caps)) 87 | # s = caps[0] 88 | # name = s.get_name() 89 | # if name.startswith('video'): 90 | # q = Gst.ElementFactory.make('queue') 91 | # conv = Gst.ElementFactory.make('videoconvert') 92 | # sink = Gst.ElementFactory.make('autovideosink') 93 | # self.pipe.add(q, conv, sink) 94 | # self.pipe.sync_children_states() 95 | # pad.link(q.get_static_pad('sink')) 96 | # q.link(conv) 97 | # conv.link(sink) 98 | # elif name.startswith('audio'): 99 | # q = Gst.ElementFactory.make('queue') 100 | # conv = Gst.ElementFactory.make('audioconvert') 101 | # resample = Gst.ElementFactory.make('audioresample') 102 | # sink = Gst.ElementFactory.make('autoaudiosink') 103 | # self.pipe.add(q, conv, resample, sink) 104 | # self.pipe.sync_children_states() 105 | # pad.link(q.get_static_pad('sink')) 106 | # q.link(conv) 107 | # conv.link(resample) 108 | # resample.link(sink) 109 | 110 | def on_incoming_stream(self, _, pad): 111 | print('on_incoming_stream') 112 | # if pad.direction != Gst.PadDirection.SRC: 113 | # return 114 | # decodebin = Gst.ElementFactory.make('decodebin') 115 | # decodebin.connect('pad-added', self.on_incoming_decodebin_stream) 116 | # self.pipe.add(decodebin) 117 | # decodebin.sync_state_with_parent() 118 | # self.webrtc.link(decodebin) 119 | 120 | def start_pipeline(self): 121 | print('Creating WebRTC Pipeline') 122 | self.pipe = Gst.parse_launch(PIPELINE_DESC) 123 | self.webrtc = self.pipe.get_by_name('sendrecv') 124 | self.webrtc.connect('on-negotiation-needed', self.on_negotiation_needed) 125 | self.webrtc.connect('on-ice-candidate', self.send_ice_candidate_message) 126 | self.webrtc.connect('pad-added', self.on_incoming_stream) 127 | self.pipe.set_state(Gst.State.PLAYING) 128 | 129 | def notification(self, data): 130 | if(data['definition'] == 'publish_started'): 131 | print('Publish Started') 132 | else: 133 | print(data['definition']) 134 | 135 | def take_candidate(self, data): 136 | if(data['candidate'] and data['label']): 137 | self.webrtc.emit('add-ice-candidate', data['label'], data['candidate']) 138 | 139 | def take_configuration(self, data): 140 | assert (self.webrtc) 141 | assert(data['type'] == 'answer') 142 | res, sdpmsg = GstSdp.SDPMessage.new() 143 | GstSdp.sdp_message_parse_buffer(bytes(data['sdp'].encode()), sdpmsg) 144 | answer = GstWebRTC.WebRTCSessionDescription.new(GstWebRTC.WebRTCSDPType.ANSWER, sdpmsg) 145 | promise = Gst.Promise.new() 146 | self.webrtc.emit('set-remote-description', answer, promise) 147 | promise.interrupt() 148 | 149 | def close_pipeline(self): 150 | print('Close Pipeline') 151 | self.pipe.set_state(Gst.State.NULL) 152 | self.pipe = None 153 | self.webrtc = None 154 | 155 | async def loop(self): 156 | print('Inititialized') 157 | assert self.conn 158 | async for message in self.conn: 159 | 160 | data = json.loads(message) 161 | 162 | print('Message: ' + data['command']); 163 | 164 | if(data['command'] == 'start'): 165 | self.start_pipeline() 166 | elif(data['command'] == 'takeCandidate'): 167 | self.take_candidate(data) 168 | elif(data['command'] == 'takeConfiguration'): 169 | self.take_configuration(data) 170 | elif(data['command'] == 'notification'): 171 | self.notification(data) 172 | elif(data['command'] == 'error'): 173 | print('Message: ' + data['definition']); 174 | 175 | 176 | self.close_pipeline() 177 | return 0 178 | 179 | async def stop(self): 180 | if self.conn: 181 | await self.conn.close() 182 | self.conn = None 183 | 184 | 185 | def check_plugins(): 186 | needed = ["opus", "vpx", "nice", "webrtc", "dtls", "srtp", "rtp", 187 | "rtpmanager", "videotestsrc", "audiotestsrc"] 188 | missing = list(filter(lambda p: Gst.Registry.get().find_plugin(p) is None, needed)) 189 | if len(missing): 190 | print('Missing gstreamer plugins:', missing) 191 | return False 192 | return True 193 | 194 | 195 | if __name__=='__main__': 196 | Gst.init(None) 197 | if not check_plugins(): 198 | sys.exit(1) 199 | client = WebRTCClient('frank-edge') 200 | loop = asyncio.get_event_loop() 201 | loop.run_until_complete(client.connect()) 202 | res = loop.run_until_complete(client.loop()) 203 | sys.exit(res) --------------------------------------------------------------------------------