GstPipeline:pipeline0/nvv4l2h265enc:h265-enc_file_0: Device is in streaming mode

Please provide complete information as applicable to your setup.

• DeepStream Version 6. 1
RTX 3060
All rtsp deepstream samples are working but I have one deepstream application that rise error when we run through rtsp stream

the stream is rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4
and the error is

*** DeepStream: Launched RTSP Streaming at rtsp://localhost:8554/camera-0 ***


Starting pipeline 

gstnvtracker: Loading low-level lib at deepstream/lib/libnvds_nvmultiobjecttracker.so
gstnvtracker: Batch processing is ON
gstnvtracker: Past frame output is OFF
[NvMultiObjectTracker] Initialized
0:00:01.544212381 915187      0x4782060 INFO                 nvinfer gstnvinfer.cpp:646:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1909> [UID = 1]: deserialized trt engine from :/home/experts-vision/Documents/Jetson-app/Jetson_client/models/yolov4_resnet18_epoch_070.etlt_b1_gpu0_int8.engine
WARNING: [TRT]: The getMaxBatchSize() function should not be used with an engine built from a network created with NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag. This function will always return 1.
INFO: ../nvdsinfer/nvdsinfer_model_builder.cpp:610 [Implicit Engine Info]: layers num: 5
0   INPUT  kFLOAT Input           3x384x1248      
1   OUTPUT kINT32 BatchedNMS      1               
2   OUTPUT kFLOAT BatchedNMS_1    200x4           
3   OUTPUT kFLOAT BatchedNMS_2    200             
4   OUTPUT kFLOAT BatchedNMS_3    200             

0:00:01.600341940 915187      0x4782060 INFO                 nvinfer gstnvinfer.cpp:646:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2012> [UID = 1]: Use deserialized engine model: /home/experts-vision/Documents/Jetson-app/Jetson_client/models/yolov4_resnet18_epoch_070.etlt_b1_gpu0_int8.engine
0:00:01.603473818 915187      0x4782060 INFO                 nvinfer gstnvinfer_impl.cpp:328:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:/home/experts-vision/Documents/Jetson-app/Jetson_client/config/models/nvinfer_config.txt sucessfully
Decodebin child added: source 

Decodebin child added: decodebin0 

Decodebin child added: rtpmp4gdepay0 

Decodebin child added: decodebin1 

Decodebin child added: aacparse0 

Decodebin child added: rtph264depay0 

Decodebin child added: h264parse0 

Decodebin child added: capsfilter0 

Decodebin child added: nvv4l2decoder0 

Decodebin child added: avdec_aac0 

In cb_newpad

gstname= audio/x-raw
In cb_newpad

gstname= video/x-raw
features= <Gst.CapsFeatures object at 0x7f0dc1d7ba00 (GstCapsFeatures at 0x7f0c70001a60)>
Error: gst-resource-error-quark: Could not get/set settings from/on resource. (13): gstv4l2object.c(3511): gst_v4l2_object_set_format_full (): /GstPipeline:pipeline0/nvv4l2h265enc:h265-enc_file_0:
Device is in streaming mode

FPS: 0.270821158421986
deepstream listener thread ended!!!
[NvMultiObjectTracker] De-initialized

I have used two encoders in two different pipelines one for rtsp streaming and other will save video the code is here below.

#!/usr/bin/env python3


import datetime
import sys
sys.path.append("../")
import os
from deepstream.utils.common import is_aarch64
from deepstream.lib import pyds
from utils.Message import Message
from utils.constants import Constants
import math
import time
import yaml
import gi
gi.require_version("Gst", "1.0")
gi.require_version("GstRtspServer", "1.0")
from gi.repository import Gst, GstRtspServer, GLib
import configparser
from threading import Thread





color_list = {"topsite\r":  (0, 0, 1),               
              "downside\r": (1, 0, 0),
              "helmet\r": (0.6, 0.2, 1),
              "downiside\r":  (1, 0, 0),
              "person\r": (0.2, 0.8, 0.2),
	      "animal\r": (0.2, 0.7, 0.2)}




count_frames = 0
last_time = time.perf_counter()
stopping_flag = False


def bus_call(bus, message, manager):
    global stopping_flag

    t = message.type
    if t == Gst.MessageType.EOS:
        print("End-of-stream\n")
        stopping_flag = True
        manager.loop.quit()
    elif t == Gst.MessageType.WARNING:
        err, debug = message.parse_warning()
        print("Warning: %s: %s\n" % (err, debug))
    elif t == Gst.MessageType.ERROR:
        err, debug = message.parse_error()
        stopping_flag = True
        print("Error: %s: %s\n" % (err, debug))
        manager.loop.quit()
    return True


def filesink_out_bin(pipeline, manager, index):
    print("creating filesink out bin")
    queue = Gst.ElementFactory.make("queue", f"queue_file_{index}")

    print("Creating nvvidconv1")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", f"convertor1_file_{index}")
    if not nvvidconv1:
        sys.stderr.write("Error: Unable to create nvvidconv1")

    nvv4l2h265enc = Gst.ElementFactory.make("nvv4l2h265enc", f"h265-enc_file_{index}")
    # nvv4l2h265enc.set_property("bufapi-version", "true")

    h265parse = Gst.ElementFactory.make("h265parse", f"h265-parse_file_{index}")

    print(f"saving file at {os.path.join(manager.video_output_dir, f'camera-{index}_video_xx.mp4')}")
    file_sink = Gst.ElementFactory.make("splitmuxsink", f"filesink_file_{index}")
    file_sink.set_property("location", os.path.join(manager.video_output_dir, f"camera-{index}_video_%d.mp4"))
    file_sink.set_property("max-size-time", 1 * 60_000_000_000)  # in minutes

    pipeline.add(queue)
    pipeline.add(nvvidconv1)
    pipeline.add(nvv4l2h265enc)
    pipeline.add(h265parse)
    pipeline.add(file_sink)

    queue.link(nvvidconv1)
    nvvidconv1.link(nvv4l2h265enc)
    nvv4l2h265enc.link(h265parse)
    h265parse.link(file_sink)

    sinkpad = queue.get_static_pad('sink')
    srcpad = queue.get_static_pad('src')
    if not sinkpad:
        sys.stderr.write(f"Error: Unable to create file sink pad for stream-{index}")

    if index > 0:
        srcpad.add_probe(Gst.PadProbeType.BUFFER, control_flow_raw_probe, manager)
    else:
        srcpad.add_probe(Gst.PadProbeType.BUFFER, control_flow_inferred_probe, manager)
    return sinkpad


def analytics_probe(pad, info, manager):
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame:
        balconys = set()
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break
        l_obj = frame_meta.obj_meta_list
        while l_obj:
            try:
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            l_user_meta = obj_meta.obj_user_meta_list
            while l_user_meta:
                try:
                    user_meta = pyds.NvDsUserMeta.cast(l_user_meta.data)
                    if user_meta.base_meta.meta_type == pyds.nvds_get_user_meta_type("NVIDIA.DSANALYTICSOBJ.USER_META"):
                        user_meta_data = pyds.NvDsAnalyticsObjInfo.cast(user_meta.user_meta_data)
                        if user_meta_data.roiStatus:
                            for i in user_meta_data.roiStatus:
                                balconys.add(i)
                except StopIteration:
                    break

                try:
                    l_user_meta = l_user_meta.next
                except StopIteration:
                    break

            try:
                l_obj = l_obj.next
            except StopIteration:
                break
        display_text = ""
        first = True
        for balcony in balconys:
            if not first:
                display_text += "\n"
            display_text += f"DANGER! humans in {balcony}"
            first = False
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]

        py_nvosd_text_params.display_text = display_text

        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 15
        py_nvosd_text_params.font_params.font_color.set(1.0, 0.0, 0.0, 1.0)

        py_nvosd_text_params.set_bg_clr = 1
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK


def model_postprocessing_probe(pad, info, manager):
    IOU_factor = 0.6
    invalid_confidence_threshold = 0.2
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break
        all_balconys = []
        l_obj = frame_meta.obj_meta_list
        while l_obj:
            try:
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            if "balcony" in obj_meta.obj_label:
                all_balconys.append(obj_meta)
            try:
                l_obj = l_obj.next
            except StopIteration:
                break
        all_balconys.sort(key=lambda x: x.confidence, reverse=True)
        filtered_balconys = []
        for balcony in all_balconys:
            dont_add = False
            center_of_balcony = (balcony.rect_params.left + balcony.rect_params.width / 2,
                                  balcony.rect_params.top + balcony.rect_params.height / 2)

            for filtered_balcony in filtered_balconys:
                center_of_filtered = (filtered_balcony.rect_params.left + filtered_balcony.rect_params.width / 2,
                                      filtered_balcony.rect_params.top + filtered_balcony.rect_params.height / 2)
                dx = center_of_balcony[0] - center_of_filtered[0]
                dy = center_of_balcony[1] - center_of_filtered[1]
                if abs(dx) < balcony.rect_params.width * IOU_factor and abs(dy) < balcony.rect_params.height * IOU_factor:
                    if filtered_balcony.obj_label == "topsite" and balcony.obj_label == "downside":
                        if filtered_balcony.confidence - balcony.confidence < invalid_confidence_threshold:
                            filtered_balconys.remove(filtered_balcony)
                            pyds.nvds_remove_obj_meta_from_frame(frame_meta, filtered_balcony)
                            filtered_balconys.append(balcony)
                            break

                    dont_add = True
                    break
            if dont_add:
                # print(f"removing object in frame {frame_meta.frame_num}")
                pyds.nvds_remove_obj_meta_from_frame(frame_meta, balcony)
            else:
                filtered_balconys.append(balcony)
            #
            # print(balcony.confidence)
            # print(balcony.rect_params.left, balcony.rect_params.top, balcony.rect_params.width, balcony.rect_params.height)
        # print("*"*50)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK


def osd_sink_pad_buffer_probe(pad, info, manager):
    global count_frames, last_time
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    while l_frame:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break
        # if frame_meta.pad_index == 0:
        count_frames += 1
        if time.perf_counter() - last_time >= 1:
            print(f"FPS: {count_frames / (time.perf_counter() - last_time)}")
            count_frames = 0
            last_time = time.perf_counter()
        l_obj = frame_meta.obj_meta_list
        while l_obj:
            try:
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            color = color_list[obj_meta.obj_label.split("\\")[0]]
            rect_params = obj_meta.rect_params
            rect_params.border_color.set(*color, 1)

            try:
                l_obj = l_obj.next
            except StopIteration:
                break
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK


def control_flow_raw_probe(pad, info, manager):
    if manager.record_raw_file:
        return Gst.PadProbeReturn.OK
    return Gst.PadProbeReturn.DROP


def control_flow_inferred_probe(pad, info, manager):
    if manager.record_inferred_file:
        return Gst.PadProbeReturn.OK
    return Gst.PadProbeReturn.DROP


def deepstream_listener(manager):
    try:
        manager.clear_sink_flags_queue()
        while True:
            if stopping_flag:
                break
            if not manager.sink_flags_queue_is_empty():
                message = manager.get_sink_flags_queue()
                print(f"deepstream listener got {message}")
                if message.header == Constants.Messages.STOP_DEEPSTREAM or message.header == Constants.Messages.CAMERAS_CONFIG:
                    manager.pipeline.send_event(Gst.Event.new_eos())
                    break
                else:
                    print(message.data)
                    print(type(message.data))
                    sink_number = int(message.data)
                    if sink_number == 0:
                        manager.record_inferred_file = not manager.record_inferred_file
                    else:
                        manager.record_raw_file = not manager.record_raw_file
            else:
                time.sleep(1)
    except KeyboardInterrupt:
        manager.pipeline.send_event(Gst.Event.new_eos())

    print("deepstream listener thread ended!!!")




def cb_newpad(decodebin, decoder_src_pad, data):
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)

    # Need to check if the pad created by the decodebin is for video and not
    # audio.
    print("gstname=", gstname)
    if gstname.find("video") != -1:
        # Link the decodebin pad only if decodebin has picked nvidia
        # decoder plugin nvdec_*. We do this by checking if the pad caps contain
        # NVMM memory features.
        print("features=", features)
        if features.contains("memory:NVMM"):
            # Get the source bin ghost pad
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write(
                    "Failed to link decoder src pad to source bin ghost pad\n"
                )
        else:
            sys.stderr.write(
                " Error: Decodebin did not pick nvidia decoder plugin.\n")


def decodebin_child_added(child_proxy, Object, name, user_data):
    print("Decodebin child added:", name, "\n")
    if name.find("decodebin") != -1:
        Object.connect("child-added", decodebin_child_added, user_data)


def create_source_bin(index, uri):
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    # Source element for reading from the uri.
    # We will use decodebin and let it figure out the container format of the
    # stream and the codec and plug the appropriate demux and decode plugins.
    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    # We set the input uri to the source element
    uri_decode_bin.set_property("uri", uri)
    # Connect to the "pad-added" signal of the decodebin which generates a
    # callback once a new pad for raw data has beed created by the decodebin
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)

    # We need to create a ghost pad for the source bin which will act as a proxy
    # for the video decoder src pad. The ghost pad will not have a target right
    # now. Once the decode bin creates the video decoder and generates the
    # cb_newpad callback, we will set the ghost pad target to the video decoder
    # src pad.
    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(
        Gst.GhostPad.new_no_target(
            "src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin


def ds_main(manager):
    # Check input arguments
    input_sources_file = manager.config_data["input_sources"]
    with open(input_sources_file, 'r') as f:
        input_sources = yaml.safe_load(f)["input_streams"]
    batch_size = len(input_sources)

    primary_detector_config = os.path.join(manager.working_directory, manager.config_data['primary_detector'])
    tracker_config = os.path.join(manager.working_directory, manager.config_data['tracker_config'])
    analytics_config = os.path.join(manager.working_directory, manager.config_data['analytics_config_file'])

    # Standard GStreamer initialization
    Gst.init(None)
    output_dir = os.path.join(manager.program_output_dir, f"{int(datetime.datetime.utcnow().timestamp())}")
    os.makedirs(output_dir, exist_ok=True)
    manager.video_output_dir = output_dir
    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(batch_size):
        print("Creating source_bin ", i, " \n ")
        uri_name = input_sources[i]
        if uri_name.find("rtsp://") == 0:
            is_live = True
            print("its a live stream!!")
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)

        tee_pre_mux = Gst.ElementFactory.make("tee", f"tee_pre_mux_{i}")
        if not tee_pre_mux:
            sys.stderr.write("unable to create tee pre mux")
        pipeline.add(tee_pre_mux)
        tee_sink_pad = tee_pre_mux.get_static_pad("sink")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(tee_sink_pad)

        tee_raw_file_src_pad = tee_pre_mux.get_request_pad("src_0")
        filesink_sink_pad = filesink_out_bin(pipeline, manager, i+1)
        tee_raw_file_src_pad.link(filesink_sink_pad)


        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        tee_streammux_src_pad = tee_pre_mux.get_request_pad("src_1")
        tee_streammux_src_pad.link(sinkpad)

    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    print("Creating nvtracker \n ")
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    print("Creating nvdsanalytics \n ")
    nvanalytics = Gst.ElementFactory.make("nvdsanalytics", "analytics")
    if not nvanalytics:
        manager.logger.info(" Unable to create nvanalytics \n")

    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd")
    if not nvvidconv_postosd:
        sys.stderr.write(" Unable to create nvvidconv_postosd \n")

    tee_post_osd = Gst.ElementFactory.make("tee", "tee_post_osd")
    if not tee_post_osd:
        sys.stderr.write("Unable to create tee_post_osd")

    # Create a caps filter
    caps = Gst.ElementFactory.make("capsfilter", "filter")
    caps.set_property(
        "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")
    )

    encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder")
    print("Creating H265 Encoder")
    if not encoder:
        sys.stderr.write(" Unable to create encoder")
    encoder.set_property("bitrate", manager.config_data["encoder_bitrate"])
    if is_aarch64():
        encoder.set_property("preset-level", 1)
        encoder.set_property("insert-sps-pps", 1)
        #encoder.set_property("bufapi-version", 1)

    rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay")
    print("Creating H265 rtppay")
    if not rtppay:
        sys.stderr.write(" Unable to create rtppay")

    # Make the UDP sink
    udpsink_port_num = 5400
    sink = Gst.ElementFactory.make("udpsink", "udpsink")
    if not sink:
        sys.stderr.write(" Unable to create udpsink")

    streammux.set_property("width", manager.config_data["streammux_width"])
    streammux.set_property("height", manager.config_data["streammux_height"])
    streammux.set_property("batch-size", batch_size)
    streammux.set_property("batched-push-timeout", manager.config_data["streammux_batch_push"])
    if is_live:
        streammux.set_property('live-source', 1)

    pgie.set_property("config-file-path", primary_detector_config)

    pgie_batch_size = pgie.get_property("batch-size")
    if pgie_batch_size != batch_size:
        print(
            "WARNING: Overriding infer-config batch-size",
            pgie_batch_size,
            " with number of sources ",
            batch_size,
            " \n",
        )
        pgie.set_property("batch-size", batch_size)

        # Set properties of tracker
    config = configparser.ConfigParser()
    config.read(tracker_config)
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width':
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height':
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id':
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file':
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file':
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process':
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)
        if key == 'enable-past-frame':
            tracker_enable_past_frame = config.getint('tracker', key)
            tracker.set_property('enable_past_frame', tracker_enable_past_frame)


    nvanalytics.set_property("config-file", analytics_config)

    print("Adding elements to Pipeline \n")
    tiler_rows = int(math.sqrt(batch_size))
    tiler_columns = int(math.ceil((1.0 * batch_size) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width",  manager.config_data["tiler_width"])
    tiler.set_property("height", manager.config_data["tiler_height"])


    sink.set_property("host", "224.224.255.255")
    sink.set_property("port", udpsink_port_num)
    sink.set_property("async", False)
    sink.set_property("sync", 1)
    sink.set_property("qos", 0)
    print(f"******************** UDP out at 224.224.255.255:{udpsink_port_num} ********************")

    queue_1 = Gst.ElementFactory.make("queue", "optimization-queue-1")
    queue_2 = Gst.ElementFactory.make("queue", "optimization-queue-2")
    queue_3 = Gst.ElementFactory.make("queue", "optimization-queue-3")
    queue_4 = Gst.ElementFactory.make("queue", "optimization-queue-4")
    queue_5 = Gst.ElementFactory.make("queue", "optimization-queue-5")
    queue_6 = Gst.ElementFactory.make("queue", "optimization-queue-6")
    queue_7 = Gst.ElementFactory.make("queue", "optimization-queue-7")
    queue_8 = Gst.ElementFactory.make("queue", "optimization-queue-8")
    queue_9 = Gst.ElementFactory.make("queue", "optimization-queue-9")

    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(nvanalytics)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(nvvidconv_postosd)
    pipeline.add(tee_post_osd)
    pipeline.add(caps)
    pipeline.add(encoder)
    pipeline.add(rtppay)
    pipeline.add(sink)

    pipeline.add(queue_1)
    pipeline.add(queue_2)
    pipeline.add(queue_3)
    pipeline.add(queue_4)
    pipeline.add(queue_5)
    pipeline.add(queue_6)
    pipeline.add(queue_7)
    pipeline.add(queue_8)
    pipeline.add(queue_9)

    streammux.link(queue_1)
    queue_1.link(pgie)
    pgie.link(queue_2)
    queue_2.link(nvvidconv)
    # queue_2.link(nvanalytics)
    # nvanalytics.link(queue_3)
    nvvidconv.link(queue_3)
    queue_3.link(tracker)
    tracker.link(queue_4)
    queue_4.link(tiler)
    tiler.link(queue_5)
    queue_5.link(nvosd)
    nvosd.link(queue_6)
    queue_6.link(tee_post_osd)

    tee_osd_file_src_pad = tee_post_osd.get_request_pad("src_0")
    filesink_osd_sink_pad = filesink_out_bin(pipeline, manager, 0)
    tee_osd_file_src_pad.link(filesink_osd_sink_pad)

    tee_rtsp_src_pad = tee_post_osd.get_request_pad("src_1")
    nvvidcov_postosd_sink_pad = nvvidconv_postosd.get_static_pad("sink")
    tee_rtsp_src_pad.link(nvvidcov_postosd_sink_pad)

    nvvidconv_postosd.link(caps)
    caps.link(queue_7)
    queue_7.link(encoder)
    encoder.link(queue_8)
    queue_8.link(rtppay)
    rtppay.link(queue_9)
    queue_9.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, manager)

    manager.loop = loop
    manager.pipeline = pipeline
    # Start streaming
    rtsp_port_num = 8554

    server = GstRtspServer.RTSPServer.new()
    server.props.service = "%d" % rtsp_port_num
    server.attach(None)

    factory = GstRtspServer.RTSPMediaFactory.new()
    factory.set_launch(
        f'( udpsrc name=pay0 port={udpsink_port_num} buffer-size=524288 caps="application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)H265, payload=96 " )')
    factory.set_shared(True)

    server.get_mount_points().add_factory(f"/camera-{0}", factory)

    print(f"\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:{rtsp_port_num}/camera-{0} ***\n\n")


    queue_4_src_pad = queue_4.get_static_pad("src")
    if not queue_4_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        queue_4_src_pad.add_probe(Gst.PadProbeType.BUFFER, model_postprocessing_probe, manager)

    nvanalytics_src_pad = nvanalytics.get_static_pad("src")
    if not nvanalytics_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        nvanalytics_src_pad.add_probe(Gst.PadProbeType.BUFFER, analytics_probe, manager)

    osd_sink_pad = nvosd.get_static_pad("sink")
    if not osd_sink_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        osd_sink_pad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, manager)

    t = Thread(target=deepstream_listener, args=(manager,))
    t.start()
    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except BaseException:
        pipeline.send_event(Gst.Event.new_eos())
        time.sleep(5)
        while loop.is_running():
            print("waiting for loop to exit")
            time.sleep(1)
    # cleanup
    t.join()
    pipeline.set_state(Gst.State.NULL)
    message = Message(header=Constants.Messages.DEEPSTREAM_ENDED)
    manager.put_message_for_main(message)

why the error came up?

work on Jetson smoothly.

You can refer to the link below: https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new. We can only support 1 enc in your platform. You can try to change 1 stream to software encode.

thank you @yuweiw
it mean i have to comment one pipeline so other will work.

ok it worked for rtsp streaming as I have commented this line in code

#osd_sink_pad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, manager)

1 Like

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.