├── README.md ├── common ├── is_aarch_64.py ├── utils.py ├── bus_call.py └── FPS.py ├── gstreamer_test_h264.py ├── gstreamer_test_h265.py ├── dstest3_pgie_config.txt ├── deepstream_rtsp_h264.py ├── deepstream_videos_h264.py └── deepstream_rtsps_h264.py /README.md: -------------------------------------------------------------------------------- 1 | # deepstream python rtsp video h264 or gstreamer python rtsp h264 | h264 2 | 3 | deepstream python play rtsp h264 \n 4 | 5 | gstreamer python play rtsp h264 and h265 \n 6 | 7 | common=> deepstream_python_apps=>apps \n 8 | 9 | dstest3_pgie_config.txt : test pgie \n 10 | 11 | deepstream_rtsp_h264.py :play one rtsp h264 \n 12 | 13 | deepstream_rtsps_h264.py :play Multiple rtsp h264 \n 14 | 15 | deepstream_videos_h264.py :play Multiple video h264 \n 16 | 17 | gstreamer_test_h264.py :play one rtsp h264 \n 18 | 19 | gstreamer_test_h265.py :play one rtsp h265 \n 20 | -------------------------------------------------------------------------------- /common/is_aarch_64.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import platform 24 | import sys 25 | 26 | 27 | def is_aarch64(): 28 | return platform.uname()[4] == 'aarch64' 29 | 30 | sys.path.append('/opt/nvidia/deepstream/deepstream/lib') 31 | -------------------------------------------------------------------------------- /common/utils.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import ctypes 24 | import sys 25 | sys.path.append('/opt/nvidia/deepstream/deepstream/lib') 26 | 27 | def long_to_int(l): 28 | value = ctypes.c_int(l & 0xffffffff).value 29 | return value 30 | -------------------------------------------------------------------------------- /gstreamer_test_h264.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import gi 4 | gi.require_version('Gst', '1.0') 5 | # from gi.repository import Gst, GObject, GLib 6 | 7 | from gi.repository import GObject, Gst 8 | from gi.repository import GLib 9 | from common.bus_call import bus_call 10 | 11 | 12 | # rtspsrc的srcpad是随机衬垫,这里使用回调函数来连接衬垫。 13 | def on_pad_added(src, pad, des): 14 | vpad = des.get_static_pad("sink") 15 | pad.link(vpad) 16 | 17 | 18 | def main(rtsp): 19 | print(rstp) 20 | Gst.init(None) 21 | 22 | pipe = Gst.Pipeline() 23 | 24 | queue1 = Gst.ElementFactory.make("queue", "queue1") 25 | 26 | source = Gst.ElementFactory.make("rtspsrc", "src") 27 | source.set_property("short-header", "true") 28 | source.set_property("location", rstp) 29 | source.set_property("latency", 0) # 缓存时间毫秒 30 | source.connect("pad-added", on_pad_added, queue1) 31 | 32 | depay = Gst.ElementFactory.make("rtph264depay", "depay") 33 | 34 | decodebin = Gst.ElementFactory.make("avdec_h264", "decodea") 35 | 36 | sink = Gst.ElementFactory.make("xvimagesink", "sink") 37 | 38 | #添加元素 39 | pipe.add(source) 40 | pipe.add(depay) 41 | pipe.add(queue1) 42 | pipe.add(sink) 43 | pipe.add(decodebin) 44 | 45 | #拼接 46 | queue1.link(depay) 47 | depay.link(decodebin) 48 | decodebin.link(sink) 49 | 50 | 51 | #开始运行 52 | loop = GLib.MainLoop() 53 | bus = pipe.get_bus() 54 | bus.add_signal_watch() 55 | bus.connect("message", bus_call, loop) 56 | pipe.set_state(Gst.State.PLAYING) 57 | try: 58 | loop.run() 59 | except: 60 | pass 61 | # cleanup 62 | pipe.set_state(Gst.State.NULL) 63 | 64 | if __name__ == '__main__': 65 | #rstp = 'rtsp://admin:123456789a@192.168.2.71:554/cam/realmonitor?channel=1&subtype=0' 66 | rstp = 'rtsp://admin:123456789a@192.168.2.3:554/h264/ch1/main/av_stream' 67 | main(rstp) 68 | sys.exit() -------------------------------------------------------------------------------- /common/bus_call.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import gi 24 | import sys 25 | gi.require_version('Gst', '1.0') 26 | from gi.repository import GObject, Gst 27 | def bus_call(bus, message, loop): 28 | t = message.type 29 | if t == Gst.MessageType.EOS: 30 | sys.stdout.write("End-of-stream\n") 31 | loop.quit() 32 | elif t==Gst.MessageType.WARNING: 33 | err, debug = message.parse_warning() 34 | sys.stderr.write("Warning: %s: %s\n" % (err, debug)) 35 | elif t == Gst.MessageType.ERROR: 36 | err, debug = message.parse_error() 37 | sys.stderr.write("Error: %s: %s\n" % (err, debug)) 38 | loop.quit() 39 | return True 40 | -------------------------------------------------------------------------------- /gstreamer_test_h265.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import gi 4 | gi.require_version('Gst', '1.0') 5 | # from gi.repository import Gst, GObject, GLib 6 | 7 | from gi.repository import GObject, Gst 8 | from gi.repository import GLib 9 | from common.bus_call import bus_call 10 | 11 | 12 | # rtspsrc的srcpad是随机衬垫,这里使用回调函数来连接衬垫。 13 | def on_pad_added(src, pad, des): 14 | vpad = des.get_static_pad("sink") 15 | pad.link(vpad) 16 | 17 | 18 | def main(rtsp): 19 | print(rstp) 20 | Gst.init(None) 21 | 22 | pipe = Gst.Pipeline() 23 | 24 | queue1 = Gst.ElementFactory.make("queue", "queue1") 25 | 26 | source = Gst.ElementFactory.make("rtspsrc", "src") 27 | source.set_property("short-header", "true") 28 | source.set_property("location", rstp) 29 | source.set_property("latency", 0) # 缓存时间毫秒 30 | source.connect("pad-added", on_pad_added, queue1) 31 | 32 | depay = Gst.ElementFactory.make("rtph265depay", "depay") 33 | h265parser = Gst.ElementFactory.make("h265parse", "h265-parser") 34 | decodebin = Gst.ElementFactory.make("avdec_h265", "decodea") 35 | 36 | sink = Gst.ElementFactory.make("xvimagesink", "sink") 37 | 38 | #添加元素 39 | pipe.add(source) 40 | pipe.add(depay) 41 | pipe.add(h265parser) 42 | pipe.add(queue1) 43 | pipe.add(sink) 44 | pipe.add(decodebin) 45 | 46 | #拼接 47 | queue1.link(depay) 48 | depay.link(h265parser) 49 | h265parser.link(decodebin) 50 | decodebin.link(sink) 51 | 52 | #开始运行 53 | loop = GLib.MainLoop() 54 | bus = pipe.get_bus() 55 | bus.add_signal_watch() 56 | bus.connect("message", bus_call, loop) 57 | pipe.set_state(Gst.State.PLAYING) 58 | try: 59 | loop.run() 60 | except: 61 | pass 62 | # cleanup 63 | pipe.set_state(Gst.State.NULL) 64 | 65 | if __name__ == '__main__': 66 | # rstp = 'rtsp://admin:123456789a@192.168.2.71:554/cam/realmonitor?channel=1&subtype=0' 67 | rstp = 'rtsp://admin:123456789a@192.168.2.3:554/h265/ch1/main/av_stream' 68 | main(rstp) 69 | sys.exit() -------------------------------------------------------------------------------- /common/FPS.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | import time 24 | start_time=time.time() 25 | frame_count=0 26 | 27 | class GETFPS: 28 | def __init__(self,stream_id): 29 | global start_time 30 | self.start_time=start_time 31 | self.is_first=True 32 | global frame_count 33 | self.frame_count=frame_count 34 | self.stream_id=stream_id 35 | def get_fps(self): 36 | end_time=time.time() 37 | if(self.is_first): 38 | self.start_time=end_time 39 | self.is_first=False 40 | if(end_time-self.start_time>1): 41 | print("**********************FPS*****************************************") 42 | print("Fps of stream",self.stream_id,"is ", float(self.frame_count)/1.0) 43 | self.frame_count=0 44 | self.start_time=end_time 45 | else: 46 | self.frame_count=self.frame_count+1 47 | def print_data(self): 48 | print('frame_count=',self.frame_count) 49 | print('start_time=',self.start_time) 50 | 51 | -------------------------------------------------------------------------------- /dstest3_pgie_config.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # Following properties are mandatory when engine files are not specified: 24 | # int8-calib-file(Only in INT8) 25 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 26 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 27 | # ONNX: onnx-file 28 | # 29 | # Mandatory properties for detectors: 30 | # num-detected-classes 31 | # 32 | # Optional properties for detectors: 33 | # cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0) 34 | # custom-lib-path 35 | # parse-bbox-func-name 36 | # 37 | # Mandatory properties for classifiers: 38 | # classifier-threshold, is-classifier 39 | # 40 | # Optional properties for classifiers: 41 | # classifier-async-mode(Secondary mode only, Default=false) 42 | # 43 | # Optional properties in secondary mode: 44 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 45 | # input-object-min-width, input-object-min-height, input-object-max-width, 46 | # input-object-max-height 47 | # 48 | # Following properties are always recommended: 49 | # batch-size(Default=1) 50 | # 51 | # Other optional properties: 52 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 53 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 54 | # mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary), 55 | # custom-lib-path, network-mode(Default=0 i.e FP32) 56 | # 57 | # The values in the config file are overridden by values set through GObject 58 | # properties. 59 | 60 | [property] 61 | gpu-id=0 62 | net-scale-factor=0.0039215697906911373 63 | model-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/resnet10.caffemodel 64 | proto-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/resnet10.prototxt 65 | #model-engine-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_int8.engine 66 | labelfile-path=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/labels.txt 67 | int8-calib-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/cal_trt.bin 68 | force-implicit-batch-dim=1 69 | batch-size=1 70 | process-mode=1 71 | model-color-format=0 72 | network-mode=1 73 | num-detected-classes=4 74 | interval=0 75 | gie-unique-id=1 76 | output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid 77 | 78 | [class-attrs-all] 79 | pre-cluster-threshold=0.2 80 | eps=0.2 81 | group-threshold=1 82 | -------------------------------------------------------------------------------- /deepstream_rtsp_h264.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import gi 4 | 5 | gi.require_version('Gst', '1.0') 6 | # from gi.repository import Gst, GObject, GLib 7 | 8 | from gi.repository import GObject, Gst 9 | from gi.repository import GLib 10 | from common.bus_call import bus_call 11 | from common.is_aarch_64 import is_aarch64 12 | from common.FPS import GETFPS 13 | import pyds 14 | 15 | fps_streams = {} 16 | 17 | 18 | # tiler_sink_pad_buffer_probe将提取OSD接收到的元数据,并更新绘制矩形的参数,对象信息等. 19 | def tiler_src_pad_buffer_probe(pad, info, u_data): 20 | gst_buffer = info.get_buffer() 21 | if not gst_buffer: 22 | print("无法获取GstBuffer") 23 | return 24 | 25 | # 从gst_buffer U缓冲区检索批处理元数据 26 | # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the 27 | # C gst_缓冲区的地址作为输入,通过哈希(gst_缓冲区)获得 28 | batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) 29 | l_frame = batch_meta.frame_meta_list 30 | 31 | while l_frame is not None: 32 | try: 33 | # 请注意,l_frame.data需要转换为pyds.NvDsFrameMeta 34 | # 演员由pyds完成。glist_get_nvds_frame_meta() 35 | # 强制转换还保留底层内存的所有权 36 | # 在C代码中,因此Python垃圾收集器将离开 37 | # 只有它。 38 | # frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data) 39 | frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) 40 | except StopIteration as ex: 41 | print('异常') 42 | print(ex) 43 | break 44 | 45 | ''' 46 | print("Frame Number is ", frame_meta.frame_num) 47 | print("Source id is ", frame_meta.source_id) 48 | print("Batch id is ", frame_meta.batch_id) 49 | print("Source Frame Width ", frame_meta.source_frame_width) 50 | print("Source Frame Height ", frame_meta.source_frame_height) 51 | print("Num object meta ", frame_meta.num_obj_meta) 52 | ''' 53 | source_id = frame_meta.source_id 54 | batch_id = frame_meta.batch_id 55 | frame_number = frame_meta.frame_num # 帧序号 56 | 57 | l_obj = frame_meta.obj_meta_list # 检测结果 58 | num_rects = frame_meta.num_obj_meta # 检测数量 59 | # print('源ID='+str(source_id)+' ,批次ID='+str(batch_id)+' ,帧序号='+str(frame_number)+' ,检测数量='+str(num_rects)) 60 | 61 | while l_obj is not None: 62 | try: 63 | # Casting l_obj.data to pyds.NvDsObjectMeta 64 | obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) 65 | except StopIteration as ex: 66 | print('异常') 67 | print(ex) 68 | break 69 | if obj_meta.class_id < 0: 70 | print('类ID错误:' + str(obj_meta.class_id) + ' ,不能小于0!') 71 | break 72 | cls_name = obj_meta.obj_label 73 | org_bbox_coords = obj_meta.detector_bbox_info.org_bbox_coords # 检测信息 height , left, top, width 74 | tracker_bbox_coords = obj_meta.tracker_bbox_info.org_bbox_coords 75 | clas_conf = round(obj_meta.confidence, 3) # 检测置信度 76 | tracker_conf = round(obj_meta.tracker_confidence, 3) # 跟踪置信度,dcf 跟踪才有置信度 77 | tracker_id_src = obj_meta.object_id # 跟踪ID 78 | print('obj_meta.object_id=', obj_meta.object_id) 79 | 80 | conf = clas_conf 81 | if clas_conf <= 0 and tracker_conf > 0: 82 | conf = tracker_conf 83 | box_x = round(org_bbox_coords.left, 0) 84 | box_y = round(org_bbox_coords.top, 0) 85 | box_w = round(org_bbox_coords.width, 0) 86 | box_h = round(org_bbox_coords.height, 0) 87 | if box_w <= 0 and tracker_bbox_coords.width > 0: 88 | box_x = round(tracker_bbox_coords.left, 0) 89 | box_y = round(tracker_bbox_coords.top, 0) 90 | box_w = round(tracker_bbox_coords.width, 0) 91 | box_h = round(tracker_bbox_coords.height, 0) 92 | 93 | msg = '源ID={},批次ID={},帧序号={} , 类id={}, 类名称={} , 置信度={},x={},y={},w={},h={}'.format(source_id, 94 | batch_id, 95 | frame_number, 96 | obj_meta.class_id, 97 | cls_name, 98 | conf, box_x, box_y, 99 | box_w, box_h) 100 | print(msg) 101 | 102 | rect_params = obj_meta.rect_params # 对象的位置参数 103 | box_color = [255, 255, 255, 255] 104 | rect_params.border_color.set(box_color[0], box_color[1], box_color[2], 105 | box_color[3]) # 指定检测边界框的边框颜色,{ r: 1.0 g: 0.0 b: 0.0 a: 1.0 } 106 | 107 | # mask_params= obj_meta.rect_params #保存对象的遮罩参数,此蒙版覆盖在对象上 108 | text_params = obj_meta.text_params # 保存描述对象的文本,该文本可以覆盖在标识对象的标准文本上 109 | display_text = '' 110 | display_text = cls_name + ' ' + str(conf) 111 | 112 | text_params.display_text = display_text 113 | 114 | # text_params.y_offset = text_params.y_offset -4 #12 115 | 116 | text_params.font_params.font_color.set(0.0, 0.0, 0.0, 1.0) # 设置字体颜色,{ r: 1.0 g: 0.0 b: 0.0 a: 1.0 } 117 | text_params.set_bg_clr = 1 # 设置背景填充 118 | bg_color = [255, 255, 255, 255] 119 | text_params.text_bg_clr.set(bg_color[0], bg_color[1], bg_color[2], 120 | bg_color[3]) # 设置背景颜色,{ r: 1.0 g: 0.0 b: 0.0 a: 1.0 } 121 | 122 | # 偏移一下 y轴 123 | if text_params.y_offset < 0: 124 | text_params.y_offset = 0 125 | 126 | try: 127 | l_obj = l_obj.next 128 | except StopIteration as ex: 129 | print('异常') 130 | print(ex) 131 | break 132 | 133 | """display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) 134 | display_meta.num_labels = 1 135 | py_nvosd_text_params = display_meta.text_params[0] 136 | py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, vehicle_count, person) 137 | py_nvosd_text_params.x_offset = 10; 138 | py_nvosd_text_params.y_offset = 12; 139 | py_nvosd_text_params.font_params.font_name = "Serif" 140 | py_nvosd_text_params.font_params.font_size = 10 141 | py_nvosd_text_params.font_params.font_color.red = 1.0 142 | py_nvosd_text_params.font_params.font_color.green = 1.0 143 | py_nvosd_text_params.font_params.font_color.blue = 1.0 144 | py_nvosd_text_params.font_params.font_color.alpha = 1.0 145 | py_nvosd_text_params.set_bg_clr = 1 146 | py_nvosd_text_params.text_bg_clr.red = 0.0 147 | py_nvosd_text_params.text_bg_clr.green = 0.0 148 | py_nvosd_text_params.text_bg_clr.blue = 0.0 149 | py_nvosd_text_params.text_bg_clr.alpha = 1.0 150 | #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",vehicle_count,"Person_count=",person) 151 | pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)""" 152 | 153 | # msg = "帧序号={} , 检测数量={} , 未佩戴口罩计数={} , 正常佩戴口罩计数={} , 错误佩戴口罩={} ".format( 154 | # frame_number, num_rects, obj_counter[PGIE_CLASS_ID_1], obj_counter[PGIE_CLASS_ID_2], obj_counter[PGIE_CLASS_ID_3]) 155 | # print(msg) 156 | 157 | # Get frame rate through this probe 158 | fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() 159 | try: 160 | l_frame = l_frame.next 161 | except StopIteration: 162 | break 163 | 164 | return Gst.PadProbeReturn.OK 165 | 166 | 167 | def on_pad_added(element, element_src_pad, data): 168 | print("In cb_newpad\n"); 169 | caps = element_src_pad.get_current_caps() 170 | str = caps.get_structure(0) 171 | name = str.get_name() 172 | depay_elem = data 173 | 174 | media = str.get_string("media") 175 | is_video = media == 'video' 176 | if 'x-rtp' in name and is_video is True: 177 | print('开始绑定RTSP') 178 | sinkpad = depay_elem.get_static_pad("sink") 179 | state = element_src_pad.link(sinkpad) 180 | if state != Gst.PadLinkReturn.OK: 181 | print('无法将depay加载程序链接到rtsp src') 182 | else: 183 | print('绑定RTSP成功') 184 | else: 185 | print('不符合不能绑定,get_name=', name, ' , media=', media) 186 | 187 | 188 | def main(rtsp): 189 | print(rstp) 190 | # Standard GStreamer initialization 191 | Gst.init(None) 192 | 193 | fps_streams['stream0'] = GETFPS(0) 194 | 195 | # Create gstreamer elements 196 | # Create Pipeline element that will form a connection of other elements 197 | print("Creating Pipeline \n ") 198 | pipeline = Gst.Pipeline() 199 | 200 | if not pipeline: 201 | sys.stderr.write(" Unable to create Pipeline \n") 202 | 203 | # Source element for reading from the file 204 | print("Creating Source \n ") 205 | source = Gst.ElementFactory.make("rtspsrc", "rtsp-source") 206 | if not source: 207 | sys.stderr.write(" Unable to create Source \n") 208 | source.set_property("short-header", "true") 209 | source.set_property("latency", 0) # 缓存时间毫秒 210 | depay = Gst.ElementFactory.make('rtph264depay', "depay") 211 | if not depay: 212 | sys.stderr.write(" Unable to create depayer \n") 213 | 214 | source.connect('pad-added', on_pad_added, depay) 215 | 216 | # Since the data format in the input file is elementary h264 stream, 217 | # we need a h264parser 218 | print("Creating H264Parser \n") 219 | h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") 220 | if not h264parser: 221 | sys.stderr.write(" Unable to create h264 parser \n") 222 | 223 | # Use nvdec_h264 for hardware accelerated decode on GPU 224 | print("Creating Decoder \n") 225 | decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") 226 | if not decoder: 227 | sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") 228 | 229 | # decoder.set_property('mjpeg',1) 230 | 231 | # Create nvstreammux instance to form batches from one or more sources. 232 | streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") 233 | if not streammux: 234 | sys.stderr.write(" Unable to create NvStreamMux \n") 235 | 236 | # Use nvinfer to run inferencing on decoder's output, 237 | # behaviour of inferencing is set through config file 238 | pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") 239 | if not pgie: 240 | sys.stderr.write(" Unable to create pgie \n") 241 | 242 | # Use convertor to convert from NV12 to RGBA as required by nvosd 243 | nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") 244 | if not nvvidconv: 245 | sys.stderr.write(" Unable to create nvvidconv \n") 246 | 247 | # Create OSD to draw on the converted RGBA buffer 248 | nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") 249 | 250 | if not nvosd: 251 | sys.stderr.write(" Unable to create nvosd \n") 252 | 253 | # Finally render the osd output 254 | if is_aarch64(): 255 | transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") 256 | 257 | print("Creating EGLSink \n") 258 | sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") 259 | if not sink: 260 | sys.stderr.write(" Unable to create egl sink \n") 261 | 262 | source.set_property('location', rtsp) 263 | streammux.set_property('width', 1280) 264 | streammux.set_property('height', 720) 265 | streammux.set_property('batch-size', 1) 266 | streammux.set_property('batched-push-timeout', 4000000) 267 | pgie.set_property('config-file-path', "dstest3_pgie_config.txt") 268 | 269 | print("Adding elements to Pipeline \n") 270 | pipeline.add(source) 271 | pipeline.add(depay) 272 | pipeline.add(h264parser) 273 | pipeline.add(decoder) 274 | pipeline.add(streammux) 275 | pipeline.add(pgie) 276 | pipeline.add(nvvidconv) 277 | pipeline.add(nvosd) 278 | pipeline.add(sink) 279 | if is_aarch64(): 280 | pipeline.add(transform) 281 | 282 | # we link the elements together 283 | # file-source -> h264-parser -> nvh264-decoder -> 284 | # nvinfer -> nvvidconv -> nvosd -> video-renderer 285 | print("Linking elements in the Pipeline \n") 286 | source.link(depay) 287 | depay.link(h264parser) 288 | h264parser.link(decoder) 289 | 290 | sinkpad = streammux.get_request_pad("sink_0") 291 | if not sinkpad: 292 | sys.stderr.write(" Unable to get the sink pad of streammux \n") 293 | srcpad = decoder.get_static_pad("src") 294 | if not srcpad: 295 | sys.stderr.write(" Unable to get source pad of decoder \n") 296 | srcpad.link(sinkpad) 297 | 298 | streammux.link(pgie) 299 | pgie.link(nvvidconv) 300 | nvvidconv.link(nvosd) 301 | if is_aarch64(): 302 | nvosd.link(transform) 303 | transform.link(sink) 304 | else: 305 | nvosd.link(sink) 306 | 307 | # 开始运行 308 | loop = GLib.MainLoop() 309 | bus = pipeline.get_bus() 310 | bus.add_signal_watch() 311 | bus.connect("message", bus_call, loop) 312 | 313 | tiler_src_pad = pgie.get_static_pad("src") 314 | if not tiler_src_pad: 315 | sys.stderr.write(" 无法获取src pad \n") 316 | else: 317 | tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) 318 | 319 | pipeline.set_state(Gst.State.PLAYING) 320 | try: 321 | loop.run() 322 | except: 323 | pass 324 | # cleanup 325 | pipeline.set_state(Gst.State.NULL) 326 | 327 | 328 | if __name__ == '__main__': 329 | # rstp = 'rtsp://admin:123456789a@192.168.2.66:554/cam/realmonitor?channel=1&subtype=0' 330 | # rstp = 'rtsp://admin:123456789a@192.168.2.3:554/h264/ch1/sub/av_stream' 331 | rstp = 'rtsp://admin:123456789a@192.168.2.3:554/h264/ch1/main/av_stream' 332 | main(rstp) 333 | sys.exit() 334 | -------------------------------------------------------------------------------- /deepstream_videos_h264.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import gi 4 | 5 | gi.require_version('Gst', '1.0') 6 | # from gi.repository import Gst, GObject, GLib 7 | 8 | from gi.repository import GObject, Gst 9 | from gi.repository import GLib 10 | from common.bus_call import bus_call 11 | from common.is_aarch_64 import is_aarch64 12 | from common.FPS import GETFPS 13 | import pyds 14 | 15 | fps_streams = {} 16 | 17 | 18 | def prn_obj(obj): 19 | print(obj.__str__()) 20 | print('details:', dir(obj)) 21 | 22 | 23 | # tiler_sink_pad_buffer_probe将提取OSD接收到的元数据,并更新绘制矩形的参数,对象信息等. 24 | def tiler_src_pad_buffer_probe(pad, info, u_data): 25 | gst_buffer = info.get_buffer() 26 | if not gst_buffer: 27 | print("无法获取GstBuffer") 28 | return 29 | 30 | # 从gst_buffer U缓冲区检索批处理元数据 31 | # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the 32 | # C gst_缓冲区的地址作为输入,通过哈希(gst_缓冲区)获得 33 | batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) 34 | l_frame = batch_meta.frame_meta_list 35 | 36 | while l_frame is not None: 37 | try: 38 | # 请注意,l_frame.data需要转换为pyds.NvDsFrameMeta 39 | # 演员由pyds完成。glist_get_nvds_frame_meta() 40 | # 强制转换还保留底层内存的所有权 41 | # 在C代码中,因此Python垃圾收集器将离开 42 | # 只有它。 43 | # frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data) 44 | frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) 45 | except StopIteration as ex: 46 | print('异常') 47 | print(ex) 48 | break 49 | 50 | ''' 51 | print("Frame Number is ", frame_meta.frame_num) 52 | print("Source id is ", frame_meta.source_id) 53 | print("Batch id is ", frame_meta.batch_id) 54 | print("Source Frame Width ", frame_meta.source_frame_width) 55 | print("Source Frame Height ", frame_meta.source_frame_height) 56 | print("Num object meta ", frame_meta.num_obj_meta) 57 | ''' 58 | source_id = frame_meta.source_id 59 | batch_id = frame_meta.batch_id 60 | frame_number = frame_meta.frame_num # 帧序号 61 | 62 | l_obj = frame_meta.obj_meta_list # 检测结果 63 | num_rects = frame_meta.num_obj_meta # 检测数量 64 | # print('源ID='+str(source_id)+' ,批次ID='+str(batch_id)+' ,帧序号='+str(frame_number)+' ,检测数量='+str(num_rects)) 65 | 66 | while l_obj is not None: 67 | try: 68 | # Casting l_obj.data to pyds.NvDsObjectMeta 69 | obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) 70 | except StopIteration as ex: 71 | print('异常') 72 | print(ex) 73 | break 74 | if obj_meta.class_id < 0: 75 | print('类ID错误:' + str(obj_meta.class_id) + ' ,不能小于0!') 76 | break 77 | cls_name = obj_meta.obj_label 78 | org_bbox_coords = obj_meta.detector_bbox_info.org_bbox_coords # 检测信息 height , left, top, width 79 | tracker_bbox_coords = obj_meta.tracker_bbox_info.org_bbox_coords 80 | clas_conf = round(obj_meta.confidence, 3) # 检测置信度 81 | tracker_conf = round(obj_meta.tracker_confidence, 3) # 跟踪置信度,dcf 跟踪才有置信度 82 | tracker_id_src = obj_meta.object_id # 跟踪ID 83 | print('obj_meta.object_id=', obj_meta.object_id) 84 | 85 | conf = clas_conf 86 | if clas_conf <= 0 and tracker_conf > 0: 87 | conf = tracker_conf 88 | box_x = round(org_bbox_coords.left, 0) 89 | box_y = round(org_bbox_coords.top, 0) 90 | box_w = round(org_bbox_coords.width, 0) 91 | box_h = round(org_bbox_coords.height, 0) 92 | if box_w <= 0 and tracker_bbox_coords.width > 0: 93 | box_x = round(tracker_bbox_coords.left, 0) 94 | box_y = round(tracker_bbox_coords.top, 0) 95 | box_w = round(tracker_bbox_coords.width, 0) 96 | box_h = round(tracker_bbox_coords.height, 0) 97 | 98 | msg = '源ID={},批次ID={},帧序号={} , 类id={}, 类名称={} , 置信度={},x={},y={},w={},h={}'.format(source_id, 99 | batch_id, 100 | frame_number, 101 | obj_meta.class_id, 102 | cls_name, 103 | conf, box_x, box_y, 104 | box_w, box_h) 105 | print(msg) 106 | 107 | rect_params = obj_meta.rect_params # 对象的位置参数 108 | box_color = [255, 255, 255, 255] 109 | rect_params.border_color.set(box_color[0], box_color[1], box_color[2], 110 | box_color[3]) # 指定检测边界框的边框颜色,{ r: 1.0 g: 0.0 b: 0.0 a: 1.0 } 111 | 112 | # mask_params= obj_meta.rect_params #保存对象的遮罩参数,此蒙版覆盖在对象上 113 | text_params = obj_meta.text_params # 保存描述对象的文本,该文本可以覆盖在标识对象的标准文本上 114 | display_text = '' 115 | display_text = cls_name + ' ' + str(conf) 116 | 117 | text_params.display_text = display_text 118 | 119 | # text_params.y_offset = text_params.y_offset -4 #12 120 | 121 | text_params.font_params.font_color.set(0.0, 0.0, 0.0, 1.0) # 设置字体颜色,{ r: 1.0 g: 0.0 b: 0.0 a: 1.0 } 122 | text_params.set_bg_clr = 1 # 设置背景填充 123 | bg_color = [255, 255, 255, 255] 124 | text_params.text_bg_clr.set(bg_color[0], bg_color[1], bg_color[2], 125 | bg_color[3]) # 设置背景颜色,{ r: 1.0 g: 0.0 b: 0.0 a: 1.0 } 126 | 127 | # 偏移一下 y轴 128 | if text_params.y_offset < 0: 129 | text_params.y_offset = 0 130 | 131 | try: 132 | l_obj = l_obj.next 133 | except StopIteration as ex: 134 | print('异常') 135 | print(ex) 136 | break 137 | 138 | """display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) 139 | display_meta.num_labels = 1 140 | py_nvosd_text_params = display_meta.text_params[0] 141 | py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, vehicle_count, person) 142 | py_nvosd_text_params.x_offset = 10; 143 | py_nvosd_text_params.y_offset = 12; 144 | py_nvosd_text_params.font_params.font_name = "Serif" 145 | py_nvosd_text_params.font_params.font_size = 10 146 | py_nvosd_text_params.font_params.font_color.red = 1.0 147 | py_nvosd_text_params.font_params.font_color.green = 1.0 148 | py_nvosd_text_params.font_params.font_color.blue = 1.0 149 | py_nvosd_text_params.font_params.font_color.alpha = 1.0 150 | py_nvosd_text_params.set_bg_clr = 1 151 | py_nvosd_text_params.text_bg_clr.red = 0.0 152 | py_nvosd_text_params.text_bg_clr.green = 0.0 153 | py_nvosd_text_params.text_bg_clr.blue = 0.0 154 | py_nvosd_text_params.text_bg_clr.alpha = 1.0 155 | #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",vehicle_count,"Person_count=",person) 156 | pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)""" 157 | 158 | # msg = "帧序号={} , 检测数量={} , 未佩戴口罩计数={} , 正常佩戴口罩计数={} , 错误佩戴口罩={} ".format( 159 | # frame_number, num_rects, obj_counter[PGIE_CLASS_ID_1], obj_counter[PGIE_CLASS_ID_2], obj_counter[PGIE_CLASS_ID_3]) 160 | # print(msg) 161 | 162 | # Get frame rate through this probe 163 | fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() 164 | try: 165 | l_frame = l_frame.next 166 | except StopIteration: 167 | break 168 | 169 | return Gst.PadProbeReturn.OK 170 | 171 | 172 | def get_source(url, index, pipeline): 173 | # Source element for reading from the file 174 | print("Creating Source \n ") 175 | source = Gst.ElementFactory.make("filesrc", "file-source-" + str(index)) 176 | if not source: 177 | sys.stderr.write(" Unable to create Source \n") 178 | 179 | 180 | 181 | # Since the data format in the input file is elementary h264 stream, 182 | # we need a h264parser 183 | print("Creating H264Parser \n") 184 | h264parser = Gst.ElementFactory.make("h264parse", "h264-parser" + str(index)) 185 | if not h264parser: 186 | sys.stderr.write(" Unable to create h264 parser \n") 187 | 188 | # Use nvdec_h264 for hardware accelerated decode on GPU 189 | print("Creating Decoder \n") 190 | decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder" + str(index)) 191 | if not decoder: 192 | sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") 193 | 194 | source.set_property('location', url) 195 | 196 | pipeline.add(source) 197 | 198 | pipeline.add(h264parser) 199 | pipeline.add(decoder) 200 | 201 | # file-source -> h264-parser -> nvh264-decoder -> 202 | source.link(h264parser) 203 | 204 | h264parser.link(decoder) 205 | 206 | return decoder 207 | 208 | 209 | def main(rtsps, width, height, tiler_rows, tiler_columns, TILED_OUTPUT_WIDTH, TILED_OUTPUT_HEIGHT, config_file_path): 210 | print(rtsps) 211 | 212 | for i in range(len(rtsps)): 213 | str_key = "stream{0}".format(i) 214 | fps_streams[str_key] = GETFPS(i) 215 | 216 | # Standard GStreamer initialization 217 | Gst.init(None) 218 | 219 | # Create gstreamer elements 220 | # Create Pipeline element that will form a connection of other elements 221 | print("Creating Pipeline \n ") 222 | pipeline = Gst.Pipeline() 223 | 224 | if not pipeline: 225 | sys.stderr.write(" Unable to create Pipeline \n") 226 | 227 | # Create nvstreammux instance to form batches from one or more sources. 228 | streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") 229 | if not streammux: 230 | sys.stderr.write(" Unable to create NvStreamMux \n") 231 | 232 | pipeline.add(streammux) 233 | 234 | for i in range(len(rtsps)): 235 | url = rtsps[i] 236 | decoder = get_source(url, i, pipeline) 237 | sinkpad = streammux.get_request_pad("sink_" + str(i)) 238 | if not sinkpad: 239 | sys.stderr.write(" Unable to get the sink pad of streammux \n") 240 | srcpad = decoder.get_static_pad("src") 241 | if not srcpad: 242 | sys.stderr.write(" Unable to get source pad of decoder \n") 243 | srcpad.link(sinkpad) 244 | 245 | # Use nvinfer to run inferencing on decoder's output, 246 | # behaviour of inferencing is set through config file 247 | pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") 248 | if not pgie: 249 | sys.stderr.write(" Unable to create pgie \n") 250 | 251 | # Use convertor to convert from NV12 to RGBA as required by nvosd 252 | nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") 253 | if not nvvidconv: 254 | sys.stderr.write(" Unable to create nvvidconv \n") 255 | 256 | tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") 257 | if not tiler: 258 | sys.stderr.write(" Unable to create tiler \n") 259 | 260 | # Create OSD to draw on the converted RGBA buffer 261 | nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") 262 | 263 | if not nvosd: 264 | sys.stderr.write(" Unable to create nvosd \n") 265 | 266 | nvosd.set_property('process-mode', 0) 267 | nvosd.set_property('display-text', 1) 268 | 269 | # Finally render the osd output 270 | if is_aarch64(): 271 | transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") 272 | 273 | print("Creating EGLSink \n") 274 | # nveglglessink 275 | # nvoverlaysink 276 | sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") 277 | if not sink: 278 | sys.stderr.write(" Unable to create egl sink \n") 279 | 280 | streammux.set_property('width', width) 281 | streammux.set_property('height', height) 282 | streammux.set_property('batch-size', len(rtsps)) 283 | streammux.set_property('batched-push-timeout', 40000) 284 | # streammux.set_property('live-source', 1) 285 | pgie.set_property('config-file-path', config_file_path) 286 | 287 | tiler.set_property("rows", tiler_rows) 288 | tiler.set_property("columns", tiler_columns) 289 | tiler.set_property("width", TILED_OUTPUT_WIDTH) 290 | tiler.set_property("height", TILED_OUTPUT_HEIGHT) 291 | sink.set_property("qos",0) 292 | 293 | print("Adding elements to Pipeline \n") 294 | 295 | pipeline.add(pgie) 296 | pipeline.add(tiler) 297 | pipeline.add(nvvidconv) 298 | pipeline.add(nvosd) 299 | pipeline.add(sink) 300 | if is_aarch64(): 301 | pipeline.add(transform) 302 | 303 | # we link the elements together 304 | 305 | # nvinfer -> nvvidconv -> nvosd -> video-renderer 306 | print("Linking elements in the Pipeline \n") 307 | 308 | streammux.link(pgie) 309 | pgie.link(tiler) 310 | tiler.link(nvvidconv) 311 | nvvidconv.link(nvosd) 312 | if is_aarch64(): 313 | nvosd.link(transform) 314 | transform.link(sink) 315 | else: 316 | nvosd.link(sink) 317 | 318 | # 开始运行 319 | loop = GLib.MainLoop() 320 | bus = pipeline.get_bus() 321 | bus.add_signal_watch() 322 | bus.connect("message", bus_call, loop) 323 | 324 | tiler_src_pad = pgie.get_static_pad("src") 325 | if not tiler_src_pad: 326 | sys.stderr.write(" 无法获取src pad \n") 327 | else: 328 | tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) 329 | 330 | pipeline.set_state(Gst.State.PLAYING) 331 | try: 332 | loop.run() 333 | except: 334 | pass 335 | # cleanup 336 | pipeline.set_state(Gst.State.NULL) 337 | 338 | 339 | if __name__ == '__main__': 340 | videos = [] 341 | videos.append('/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264') 342 | videos.append('/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264') 343 | videos.append('/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264') 344 | videos.append('/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264') 345 | width = 1280 346 | height = 720 347 | tiler_rows = 2 348 | tiler_columns = 2 349 | TILED_OUTPUT_WIDTH = 1800 350 | TILED_OUTPUT_HEIGHT = 1080 351 | config_file_path = 'dstest3_pgie_config.txt' 352 | main(videos, width, height, tiler_rows, tiler_columns, TILED_OUTPUT_WIDTH, TILED_OUTPUT_HEIGHT, config_file_path) 353 | sys.exit() 354 | -------------------------------------------------------------------------------- /deepstream_rtsps_h264.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import gi 4 | 5 | gi.require_version('Gst', '1.0') 6 | # from gi.repository import Gst, GObject, GLib 7 | 8 | from gi.repository import GObject, Gst 9 | from gi.repository import GLib 10 | from common.bus_call import bus_call 11 | from common.is_aarch_64 import is_aarch64 12 | from common.FPS import GETFPS 13 | import pyds 14 | 15 | fps_streams = {} 16 | 17 | 18 | 19 | # tiler_sink_pad_buffer_probe将提取OSD接收到的元数据,并更新绘制矩形的参数,对象信息等. 20 | def tiler_src_pad_buffer_probe(pad, info, u_data): 21 | gst_buffer = info.get_buffer() 22 | if not gst_buffer: 23 | print("无法获取GstBuffer") 24 | return 25 | 26 | # 从gst_buffer U缓冲区检索批处理元数据 27 | # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the 28 | # C gst_缓冲区的地址作为输入,通过哈希(gst_缓冲区)获得 29 | batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) 30 | l_frame = batch_meta.frame_meta_list 31 | 32 | while l_frame is not None: 33 | try: 34 | # 请注意,l_frame.data需要转换为pyds.NvDsFrameMeta 35 | # 演员由pyds完成。glist_get_nvds_frame_meta() 36 | # 强制转换还保留底层内存的所有权 37 | # 在C代码中,因此Python垃圾收集器将离开 38 | # 只有它。 39 | # frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data) 40 | frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) 41 | except StopIteration as ex: 42 | print('异常') 43 | print(ex) 44 | break 45 | 46 | ''' 47 | print("Frame Number is ", frame_meta.frame_num) 48 | print("Source id is ", frame_meta.source_id) 49 | print("Batch id is ", frame_meta.batch_id) 50 | print("Source Frame Width ", frame_meta.source_frame_width) 51 | print("Source Frame Height ", frame_meta.source_frame_height) 52 | print("Num object meta ", frame_meta.num_obj_meta) 53 | ''' 54 | source_id = frame_meta.source_id 55 | batch_id = frame_meta.batch_id 56 | frame_number = frame_meta.frame_num # 帧序号 57 | 58 | l_obj = frame_meta.obj_meta_list # 检测结果 59 | num_rects = frame_meta.num_obj_meta # 检测数量 60 | # print('源ID='+str(source_id)+' ,批次ID='+str(batch_id)+' ,帧序号='+str(frame_number)+' ,检测数量='+str(num_rects)) 61 | 62 | while l_obj is not None: 63 | try: 64 | # Casting l_obj.data to pyds.NvDsObjectMeta 65 | obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) 66 | except StopIteration as ex: 67 | print('异常') 68 | print(ex) 69 | break 70 | if obj_meta.class_id < 0: 71 | print('类ID错误:' + str(obj_meta.class_id) + ' ,不能小于0!') 72 | break 73 | cls_name = obj_meta.obj_label 74 | org_bbox_coords = obj_meta.detector_bbox_info.org_bbox_coords # 检测信息 height , left, top, width 75 | tracker_bbox_coords = obj_meta.tracker_bbox_info.org_bbox_coords 76 | clas_conf = round(obj_meta.confidence, 3) # 检测置信度 77 | tracker_conf = round(obj_meta.tracker_confidence, 3) # 跟踪置信度,dcf 跟踪才有置信度 78 | tracker_id_src = obj_meta.object_id # 跟踪ID 79 | print('obj_meta.object_id=', obj_meta.object_id) 80 | 81 | conf = clas_conf 82 | if clas_conf <= 0 and tracker_conf > 0: 83 | conf = tracker_conf 84 | box_x = round(org_bbox_coords.left, 0) 85 | box_y = round(org_bbox_coords.top, 0) 86 | box_w = round(org_bbox_coords.width, 0) 87 | box_h = round(org_bbox_coords.height, 0) 88 | if box_w <= 0 and tracker_bbox_coords.width > 0: 89 | box_x = round(tracker_bbox_coords.left, 0) 90 | box_y = round(tracker_bbox_coords.top, 0) 91 | box_w = round(tracker_bbox_coords.width, 0) 92 | box_h = round(tracker_bbox_coords.height, 0) 93 | 94 | msg = '源ID={},批次ID={},帧序号={} , 类id={}, 类名称={} , 置信度={},x={},y={},w={},h={}'.format(source_id, 95 | batch_id, 96 | frame_number, 97 | obj_meta.class_id, 98 | cls_name, 99 | conf, box_x, box_y, 100 | box_w, box_h) 101 | print(msg) 102 | 103 | rect_params = obj_meta.rect_params # 对象的位置参数 104 | box_color = [255, 255, 255, 255] 105 | rect_params.border_color.set(box_color[0], box_color[1], box_color[2], 106 | box_color[3]) # 指定检测边界框的边框颜色,{ r: 1.0 g: 0.0 b: 0.0 a: 1.0 } 107 | 108 | # mask_params= obj_meta.rect_params #保存对象的遮罩参数,此蒙版覆盖在对象上 109 | text_params = obj_meta.text_params # 保存描述对象的文本,该文本可以覆盖在标识对象的标准文本上 110 | display_text = '' 111 | display_text = cls_name + ' ' + str(conf) 112 | 113 | text_params.display_text = display_text 114 | 115 | # text_params.y_offset = text_params.y_offset -4 #12 116 | 117 | text_params.font_params.font_color.set(0.0, 0.0, 0.0, 1.0) # 设置字体颜色,{ r: 1.0 g: 0.0 b: 0.0 a: 1.0 } 118 | text_params.set_bg_clr = 1 # 设置背景填充 119 | bg_color = [255, 255, 255, 255] 120 | text_params.text_bg_clr.set(bg_color[0], bg_color[1], bg_color[2], 121 | bg_color[3]) # 设置背景颜色,{ r: 1.0 g: 0.0 b: 0.0 a: 1.0 } 122 | 123 | # 偏移一下 y轴 124 | if text_params.y_offset < 0: 125 | text_params.y_offset = 0 126 | 127 | try: 128 | l_obj = l_obj.next 129 | except StopIteration as ex: 130 | print('异常') 131 | print(ex) 132 | break 133 | 134 | """display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) 135 | display_meta.num_labels = 1 136 | py_nvosd_text_params = display_meta.text_params[0] 137 | py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, vehicle_count, person) 138 | py_nvosd_text_params.x_offset = 10; 139 | py_nvosd_text_params.y_offset = 12; 140 | py_nvosd_text_params.font_params.font_name = "Serif" 141 | py_nvosd_text_params.font_params.font_size = 10 142 | py_nvosd_text_params.font_params.font_color.red = 1.0 143 | py_nvosd_text_params.font_params.font_color.green = 1.0 144 | py_nvosd_text_params.font_params.font_color.blue = 1.0 145 | py_nvosd_text_params.font_params.font_color.alpha = 1.0 146 | py_nvosd_text_params.set_bg_clr = 1 147 | py_nvosd_text_params.text_bg_clr.red = 0.0 148 | py_nvosd_text_params.text_bg_clr.green = 0.0 149 | py_nvosd_text_params.text_bg_clr.blue = 0.0 150 | py_nvosd_text_params.text_bg_clr.alpha = 1.0 151 | #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",vehicle_count,"Person_count=",person) 152 | pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)""" 153 | 154 | # msg = "帧序号={} , 检测数量={} , 未佩戴口罩计数={} , 正常佩戴口罩计数={} , 错误佩戴口罩={} ".format( 155 | # frame_number, num_rects, obj_counter[PGIE_CLASS_ID_1], obj_counter[PGIE_CLASS_ID_2], obj_counter[PGIE_CLASS_ID_3]) 156 | # print(msg) 157 | 158 | # Get frame rate through this probe 159 | fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() 160 | try: 161 | l_frame = l_frame.next 162 | except StopIteration: 163 | break 164 | 165 | return Gst.PadProbeReturn.OK 166 | 167 | 168 | def on_pad_added(element, element_src_pad, data): 169 | print("In cb_newpad\n"); 170 | caps = element_src_pad.get_current_caps() 171 | str = caps.get_structure(0) 172 | name = str.get_name() 173 | depay_elem = data 174 | 175 | media = str.get_string("media") 176 | is_video = media == 'video' 177 | if 'x-rtp' in name and is_video is True: 178 | print('开始绑定RTSP') 179 | sinkpad = depay_elem.get_static_pad("sink") 180 | state = element_src_pad.link(sinkpad) 181 | if state != Gst.PadLinkReturn.OK: 182 | print('无法将depay加载程序链接到rtsp src') 183 | else: 184 | print('绑定RTSP成功') 185 | else: 186 | print('不符合不能绑定,get_name=', name, ' , media=', media) 187 | 188 | 189 | def get_source(url, index, pipeline): 190 | # Source element for reading from the file 191 | print("Creating Source \n ") 192 | source = Gst.ElementFactory.make("rtspsrc", "rtsp-source-" + str(index)) 193 | if not source: 194 | sys.stderr.write(" Unable to create Source \n") 195 | source.set_property("short-header", "true") 196 | source.set_property('latency', 0) # 缓存毫秒 197 | depay = Gst.ElementFactory.make('rtph264depay', "depay" + str(index)) 198 | if not depay: 199 | sys.stderr.write(" Unable to create depayer \n") 200 | 201 | source.connect('pad-added', on_pad_added, depay) 202 | 203 | # Since the data format in the input file is elementary h264 stream, 204 | # we need a h264parser 205 | print("Creating H264Parser \n") 206 | h264parser = Gst.ElementFactory.make("h264parse", "h264-parser" + str(index)) 207 | if not h264parser: 208 | sys.stderr.write(" Unable to create h264 parser \n") 209 | 210 | # Use nvdec_h264 for hardware accelerated decode on GPU 211 | print("Creating Decoder \n") 212 | decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder" + str(index)) 213 | if not decoder: 214 | sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") 215 | 216 | source.set_property('location', url) 217 | 218 | pipeline.add(source) 219 | pipeline.add(depay) 220 | pipeline.add(h264parser) 221 | pipeline.add(decoder) 222 | 223 | # file-source -> h264-parser -> nvh264-decoder -> 224 | source.link(depay) 225 | depay.link(h264parser) 226 | h264parser.link(decoder) 227 | 228 | return decoder 229 | 230 | 231 | def main(rtsps, width, height, tiler_rows, tiler_columns, TILED_OUTPUT_WIDTH, TILED_OUTPUT_HEIGHT, config_file_path): 232 | print(rtsps) 233 | 234 | for i in range(len(rtsps)): 235 | str_key = "stream{0}".format(i) 236 | fps_streams[str_key] = GETFPS(i) 237 | 238 | # Standard GStreamer initialization 239 | Gst.init(None) 240 | 241 | # Create gstreamer elements 242 | # Create Pipeline element that will form a connection of other elements 243 | print("Creating Pipeline \n ") 244 | pipeline = Gst.Pipeline() 245 | 246 | if not pipeline: 247 | sys.stderr.write(" Unable to create Pipeline \n") 248 | 249 | # Create nvstreammux instance to form batches from one or more sources. 250 | streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") 251 | if not streammux: 252 | sys.stderr.write(" Unable to create NvStreamMux \n") 253 | 254 | pipeline.add(streammux) 255 | 256 | for i in range(len(rtsps)): 257 | url = rtsps[i] 258 | decoder = get_source(url, i, pipeline) 259 | sinkpad = streammux.get_request_pad("sink_" + str(i)) 260 | if not sinkpad: 261 | sys.stderr.write(" Unable to get the sink pad of streammux \n") 262 | srcpad = decoder.get_static_pad("src") 263 | if not srcpad: 264 | sys.stderr.write(" Unable to get source pad of decoder \n") 265 | srcpad.link(sinkpad) 266 | 267 | # Use nvinfer to run inferencing on decoder's output, 268 | # behaviour of inferencing is set through config file 269 | pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") 270 | if not pgie: 271 | sys.stderr.write(" Unable to create pgie \n") 272 | 273 | # Use convertor to convert from NV12 to RGBA as required by nvosd 274 | nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") 275 | if not nvvidconv: 276 | sys.stderr.write(" Unable to create nvvidconv \n") 277 | 278 | tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") 279 | if not tiler: 280 | sys.stderr.write(" Unable to create tiler \n") 281 | 282 | # Create OSD to draw on the converted RGBA buffer 283 | nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") 284 | 285 | if not nvosd: 286 | sys.stderr.write(" Unable to create nvosd \n") 287 | 288 | nvosd.set_property('process-mode', 0) 289 | nvosd.set_property('display-text', 1) 290 | 291 | # Finally render the osd output 292 | if is_aarch64(): 293 | transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") 294 | 295 | print("Creating EGLSink \n") 296 | # nveglglessink 297 | # nvoverlaysink 298 | sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") 299 | if not sink: 300 | sys.stderr.write(" Unable to create egl sink \n") 301 | 302 | streammux.set_property('width', width) 303 | streammux.set_property('height', height) 304 | streammux.set_property('batch-size', len(rtsps)) 305 | streammux.set_property('batched-push-timeout', 40000) 306 | streammux.set_property('live-source', 1) 307 | pgie.set_property('config-file-path', config_file_path) 308 | 309 | tiler.set_property("rows", tiler_rows) 310 | tiler.set_property("columns", tiler_columns) 311 | tiler.set_property("width", TILED_OUTPUT_WIDTH) 312 | tiler.set_property("height", TILED_OUTPUT_HEIGHT) 313 | # sink.set_property("qos",0) 314 | 315 | print("Adding elements to Pipeline \n") 316 | 317 | pipeline.add(pgie) 318 | pipeline.add(tiler) 319 | pipeline.add(nvvidconv) 320 | pipeline.add(nvosd) 321 | pipeline.add(sink) 322 | if is_aarch64(): 323 | pipeline.add(transform) 324 | 325 | # we link the elements together 326 | 327 | # nvinfer -> nvvidconv -> nvosd -> video-renderer 328 | print("Linking elements in the Pipeline \n") 329 | 330 | streammux.link(pgie) 331 | pgie.link(tiler) 332 | tiler.link(nvvidconv) 333 | nvvidconv.link(nvosd) 334 | if is_aarch64(): 335 | nvosd.link(transform) 336 | transform.link(sink) 337 | else: 338 | nvosd.link(sink) 339 | 340 | # 开始运行 341 | loop = GLib.MainLoop() 342 | bus = pipeline.get_bus() 343 | bus.add_signal_watch() 344 | bus.connect("message", bus_call, loop) 345 | 346 | tiler_src_pad = pgie.get_static_pad("src") 347 | if not tiler_src_pad: 348 | sys.stderr.write(" 无法获取src pad \n") 349 | else: 350 | tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) 351 | 352 | pipeline.set_state(Gst.State.PLAYING) 353 | try: 354 | loop.run() 355 | except: 356 | pass 357 | # cleanup 358 | pipeline.set_state(Gst.State.NULL) 359 | 360 | 361 | if __name__ == '__main__': 362 | rstps = [] 363 | rstps.append('rtsp://admin:123456789a@192.168.2.66:554/cam/realmonitor?channel=1&subtype=0') 364 | rstps.append('rtsp://admin:123456789a@192.168.2.66:554/cam/realmonitor?channel=1&subtype=1') 365 | # rstps.append('rtsp://admin:123456789a@192.168.2.3:554/h264/ch1/main/av_stream') 366 | # rstps.append('rtsp://admin:123456789a@192.168.2.3:554/h264/ch1/sub/av_stream') 367 | width = 1280 368 | height = 720 369 | tiler_rows = 2 370 | tiler_columns = 2 371 | TILED_OUTPUT_WIDTH = 1800 372 | TILED_OUTPUT_HEIGHT = 1080 373 | config_file_path = 'dstest3_pgie_config.txt' 374 | main(rstps, width, height, tiler_rows, tiler_columns, TILED_OUTPUT_WIDTH, TILED_OUTPUT_HEIGHT, config_file_path) 375 | sys.exit() 376 | --------------------------------------------------------------------------------