Pass sink arguments

I want to understand, how to pass :

How can I specify RTSP streaming of DeepStream output?

You can enable remote display by adding an RTSP sink in the application configuration file. The sample configuration file source30_1080p_dec_infer-resnet_tiled_display_int8.txt has an example of this in the [sink2] section. You must set the enable flag to 1. Once you enable remote display, the application prints the RTSP URL, which you can open in any media player like VLC.

This whole thing inside a python file, instead of a .txt file.
Using the setproperty functions, is there a reference program for the same.

You man refer to this example: deepstream_python_apps/apps/deepstream-test1-rtsp-out at master · NVIDIA-AI-IOT/deepstream_python_apps (github.com)

gstname= video/x-raw
features= <Gst.CapsFeatures object at 0x7f5f8fd88ca0 (GstCapsFeatures at 0x7f5e680c6200)>
In cb_newpad

gstname= video/x-raw
features= <Gst.CapsFeatures object at 0x7f5f8fd88280 (GstCapsFeatures at 0x7f5e60058ca0)>
0:00:18.566600007 9929 0x1e6dd80 WARN nvinfer gstnvinfer.cpp:2300:gst_nvinfer_output_loop: error: Internal data stream error.
0:00:18.566616413 9929 0x1e6dd80 WARN nvinfer gstnvinfer.cpp:2300:gst_nvinfer_output_loop: error: streaming stopped, reason not-linked (-1)
Error: gst-stream-error-quark: Internal data stream error. (1): gstnvinfer.cpp(2300): gst_nvinfer_output_loop (): /GstPipeline:pipeline0/GstNvInfer:primary-inference:
streaming stopped, reason not-linked (-1)
Exiting app

This is the error that I am getting .

Is this from deepstream example or you program? Can you provide the command and full log?

It is the deepstream3.py where I made some changes from “How to create an rtsp sink with deepstream python program? - #17 by mdegans” to stream.

This is the entire python file.

#!/usr/bin/env python3

################################################################################

SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.

SPDX-License-Identifier: Apache-2.0

Licensed under the Apache License, Version 2.0 (the “License”);

you may not use this file except in compliance with the License.

You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software

distributed under the License is distributed on an “AS IS” BASIS,

WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

See the License for the specific language governing permissions and

limitations under the License.

################################################################################

import sys
sys.path.append(‘…/’)
from pathlib import Path
import gi
import configparser
import argparse
gi.require_version(‘Gst’, ‘1.0’)
from gi.repository import GLib, Gst
from ctypes import *
import time
import sys
import math
import platform
from common.is_aarch_64 import is_aarch64
from common.osd import on_buffer as osd_sink_pad_buffer_probe

from common.bus_call import bus_call
from common.FPS import PERF_DATA

import pyds

no_display = False
silent = False
file_loop = False
perf_data = None

MAX_DISPLAY_LEN=64
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
MUXER_OUTPUT_WIDTH=1920
MUXER_OUTPUT_HEIGHT=1080
MUXER_BATCH_TIMEOUT_USEC=4000000
TILED_OUTPUT_WIDTH=1280
TILED_OUTPUT_HEIGHT=720
GST_CAPS_FEATURES_NVMM=“memory:NVMM”
OSD_PROCESS_MODE= 0
OSD_DISPLAY_TEXT= 1
pgie_classes_str= [“Vehicle”, “TwoWheeler”, “Person”,“RoadSign”]

pgie_src_pad_buffer_probe will extract metadata received on tiler sink pad

and update params for drawing rectangle, object information etc.

def pgie_src_pad_buffer_probe(pad,info,u_data):
frame_number=0
num_rects=0
got_fps = False
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.NvDsFrameMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break

    frame_number=frame_meta.frame_num
    l_obj=frame_meta.obj_meta_list
    num_rects = frame_meta.num_obj_meta
    obj_counter = {
    PGIE_CLASS_ID_VEHICLE:0,
    PGIE_CLASS_ID_PERSON:0,
    PGIE_CLASS_ID_BICYCLE:0,
    PGIE_CLASS_ID_ROADSIGN:0
    }
    while l_obj is not None:
        try: 
            # Casting l_obj.data to pyds.NvDsObjectMeta
            obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
        except StopIteration:
            break
        obj_counter[obj_meta.class_id] += 1
        try: 
            l_obj=l_obj.next
        except StopIteration:
            break
    if not silent:
        print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",obj_counter[PGIE_CLASS_ID_VEHICLE],"Person_count=",obj_counter[PGIE_CLASS_ID_PERSON])

    # Update frame rate through this probe
    stream_index = "stream{0}".format(frame_meta.pad_index)
    global perf_data
    perf_data.update_fps(stream_index)

    try:
        l_frame=l_frame.next
    except StopIteration:
        break

return Gst.PadProbeReturn.OK

def cb_newpad(decodebin, decoder_src_pad,data):
print(“In cb_newpad\n”)
caps=decoder_src_pad.get_current_caps()
if not caps:
caps = decoder_src_pad.query_caps()
gststruct=caps.get_structure(0)
gstname=gststruct.get_name()
source_bin=data
features=caps.get_features(0)

# Need to check if the pad created by the decodebin is for video and not
# audio.
print("gstname=",gstname)
if(gstname.find("video")!=-1):
    # Link the decodebin pad only if decodebin has picked nvidia
    # decoder plugin nvdec_*. We do this by checking if the pad caps contain
    # NVMM memory features.
    print("features=",features)
    if features.contains("memory:NVMM"):
        # Get the source bin ghost pad
        bin_ghost_pad=source_bin.get_static_pad("src")
        if not bin_ghost_pad.set_target(decoder_src_pad):
            sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
    else:
        sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")

def decodebin_child_added(child_proxy,Object,name,user_data):
print(“Decodebin child added:”, name, “\n”)
if(name.find(“decodebin”) != -1):
Object.connect(“child-added”,decodebin_child_added,user_data)

if "source" in name:
    source_element = child_proxy.get_by_name("source")
    if source_element.find_property('drop-on-latency') != None:
        Object.set_property("drop-on-latency", True)

def create_source_bin(index,uri):
print(“Creating source bin”)

# Create a source GstBin to abstract this bin's content from the rest of the
# pipeline
bin_name="source-bin-%02d" %index
print(bin_name)
nbin=Gst.Bin.new(bin_name)
if not nbin:
    sys.stderr.write(" Unable to create source bin \n")

# Source element for reading from the uri.
# We will use decodebin and let it figure out the container format of the
# stream and the codec and plug the appropriate demux and decode plugins.
if file_loop:
    # use nvurisrcbin to enable file-loop
    uri_decode_bin=Gst.ElementFactory.make("nvurisrcbin", "uri-decode-bin")
    uri_decode_bin.set_property("file-loop", 1)
else:
    uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
if not uri_decode_bin:
    sys.stderr.write(" Unable to create uri decode bin \n")
# We set the input uri to the source element
uri_decode_bin.set_property("uri",uri)
# Connect to the "pad-added" signal of the decodebin which generates a
# callback once a new pad for raw data has beed created by the decodebin
uri_decode_bin.connect("pad-added",cb_newpad,nbin)
uri_decode_bin.connect("child-added",decodebin_child_added,nbin)

# We need to create a ghost pad for the source bin which will act as a proxy
# for the video decoder src pad. The ghost pad will not have a target right
# now. Once the decode bin creates the video decoder and generates the
# cb_newpad callback, we will set the ghost pad target to the video decoder
# src pad.
Gst.Bin.add(nbin,uri_decode_bin)
bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
if not bin_pad:
    sys.stderr.write(" Failed to add ghost pad in source bin \n")
    return None
return nbin

def main(args, requested_pgie=None, config=None, disable_probe=False):
global perf_data
perf_data = PERF_DATA(len(args))

number_sources=len(args)

# Standard GStreamer initialization
Gst.init(None)

# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
is_live = False

if not pipeline:
    sys.stderr.write(" Unable to create Pipeline \n")
print("Creating streamux \n ")

# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
    sys.stderr.write(" Unable to create NvStreamMux \n")

pipeline.add(streammux)
for i in range(number_sources):
    print("Creating source_bin ",i," \n ")
    uri_name=args[i]
    if uri_name.find("rtsp://") == 0 :
        is_live = True
    source_bin=create_source_bin(i, uri_name)
    if not source_bin:
        sys.stderr.write("Unable to create source bin \n")
    pipeline.add(source_bin)
    padname="sink_%u" %i
    sinkpad= streammux.get_request_pad(padname) 
    if not sinkpad:
        sys.stderr.write("Unable to create sink pad bin \n")
    srcpad=source_bin.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to create src pad bin \n")
    srcpad.link(sinkpad)
queue1=Gst.ElementFactory.make("queue","queue1")
queue2=Gst.ElementFactory.make("queue","queue2")
queue3=Gst.ElementFactory.make("queue","queue3")
queue4=Gst.ElementFactory.make("queue","queue4")
queue5=Gst.ElementFactory.make("queue","queue5")
pipeline.add(queue1)
pipeline.add(queue2)
pipeline.add(queue3)
pipeline.add(queue4)
pipeline.add(queue5)

nvdslogger = None
transform = None

print("Creating Pgie \n ")
if requested_pgie != None and (requested_pgie == 'nvinferserver' or requested_pgie == 'nvinferserver-grpc') :
    pgie = Gst.ElementFactory.make("nvinferserver", "primary-inference")
elif requested_pgie != None and requested_pgie == 'nvinfer':
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
else:
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")

if not pgie:
    sys.stderr.write(" Unable to create pgie :  %s\n" % requested_pgie)

if disable_probe:
    # Use nvdslogger for perf measurement instead of probe function
    print ("Creating nvdslogger \n")
    nvdslogger = Gst.ElementFactory.make("nvdslogger", "nvdslogger")

print("Creating tiler \n ")
tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
if not tiler:
    sys.stderr.write(" Unable to create tiler \n")
print("Creating nvvidconv \n ")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
    sys.stderr.write(" Unable to create nvvidconv \n")
print("Creating nvosd \n ")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
    sys.stderr.write(" Unable to create nvosd \n")
nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd")
if not nvvidconv_postosd:
    sys.stderr.write(" Unable to create nvvidconv_postosd \n")
nvosd.set_property('process-mode',OSD_PROCESS_MODE)
nvosd.set_property('display-text',OSD_DISPLAY_TEXT)
"""
sink code
"""
# Make the h264 encoder
encoder = Gst.ElementFactory.make("nvv4l2h264enc", "h264-encoder")
if not encoder:
    sys.stderr.write(" Unable to create encoder")
encoder.set_property('bitrate', 4000000)
if is_aarch64():
    encoder.set_property('preset-level', 1)
    encoder.set_property('insert-sps-pps', 1)
    encoder.set_property('bufapi-version', 1)

# Make the payload-encode video into RTP packets
rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay-h264")
if not rtppay:
    sys.stderr.write(" Unable to create rtppay")

# Make the UDP sink
updsink_port_num = 5400
sink = Gst.ElementFactory.make("udpsink", "udpsink")
if not sink:
    sys.stderr.write(" Unable to create udpsink")

sink.set_property('host', '224.224.255.255')
sink.set_property('port', updsink_port_num)
sink.set_property('async', False)
sink.set_property('sync', 1)

streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
######################3
if no_display:
    print("Creating Fakesink \n")
    sink = Gst.ElementFactory.make("fakesink", "fakesink")
    sink.set_property('enable-last-sample', 0)
    sink.set_property('sync', 0)
else:
    if(is_aarch64()):
        print("Creating transform \n ")
        transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")
    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")

if not sink:
    sys.stderr.write(" Unable to create sink element \n")

if is_live:
    print("At least one of the sources is live")
    streammux.set_property('live-source', 1)

streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', number_sources)
streammux.set_property('batched-push-timeout', 4000000)
if requested_pgie == "nvinferserver" and config != None:
    pgie.set_property('config-file-path', config)
elif requested_pgie == "nvinferserver-grpc" and config != None:
    pgie.set_property('config-file-path', config)
elif requested_pgie == "nvinfer" and config != None:
    pgie.set_property('config-file-path', config)
else:
    pgie.set_property('config-file-path', "dstest3_pgie_config.txt")
pgie_batch_size=pgie.get_property("batch-size")
if(pgie_batch_size != number_sources):
    print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n")
    pgie.set_property("batch-size",number_sources)
tiler_rows=int(math.sqrt(number_sources))
tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows))
tiler.set_property("rows",tiler_rows)
tiler.set_property("columns",tiler_columns)
tiler.set_property("width", TILED_OUTPUT_WIDTH)
tiler.set_property("height", TILED_OUTPUT_HEIGHT)
sink.set_property("qos",0)

print("Adding elements to Pipeline \n")
pipeline.add(pgie)
if nvdslogger:
    pipeline.add(nvdslogger)
pipeline.add(tiler)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
if transform:
    pipeline.add(transform)
pipeline.add(sink)
pipeline.add(nvvidconv_postosd)
pipeline.add(caps)
pipeline.add(encoder)
pipeline.add(rtppay)
pipeline.add(sink)


print("Linking elements in the Pipeline \n")
streammux.link(queue1)
queue1.link(pgie)
pgie.link(queue2)
if nvdslogger:
    queue2.link(nvdslogger)
    nvdslogger.link(tiler)
else:
    queue2.link(tiler)
tiler.link(queue3)
queue3.link(nvvidconv)
nvvidconv.link(queue4)
queue4.link(nvosd)
nvosd.link(nvvidconv_postosd)
nvvidconv_postosd.link(caps)
caps.link(encoder)
encoder.link(rtppay)
if transform:
    nvosd.link(queue5)
    queue5.link(transform)
    transform.link(sink) # rtpay
else:
    nvosd.link(queue5)
    queue5.link(sink)   

# create an event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)

encoder_name = "H264"
rtsp_port_num = 8554

server = GstRtspServer.RTSPServer.new()
server.props.service = "%d" % rtsp_port_num
server.attach(None)

factory = GstRtspServer.RTSPMediaFactory.new()
factory.set_launch(
    "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (
    updsink_port_num, encoder_name))
factory.set_shared(True)
server.get_mount_points().add_factory("/ds-test", factory)

print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num)

osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
    sys.stderr.write(" Unable to get sink pad of nvosd \n")

osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

pgie_src_pad=pgie.get_static_pad("src")
if not pgie_src_pad:
    sys.stderr.write(" Unable to get src pad \n")
else:
    if not disable_probe:
        pgie_src_pad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe, 0)
        # perf callback function to print fps every 5 sec
        GLib.timeout_add(5000, perf_data.perf_print_callback)

# List the sources
print("Now playing...")
for i, source in enumerate(args):
    print(i, ": ", source)

print("Starting pipeline \n")
# start play back and listed to events		
pipeline.set_state(Gst.State.PLAYING)
try:
    loop.run()
except:
    pass
# cleanup
print("Exiting app\n")
pipeline.set_state(Gst.State.NULL)

def parse_args():

parser = argparse.ArgumentParser(prog="deepstream_test_3",
                description="deepstream-test3 multi stream, multi model inference reference app")
parser.add_argument(
    "-i",
    "--input",
    help="Path to input streams",
    nargs="+",
    metavar="URIs",
    default=["a"],
    required=True,
)
parser.add_argument(
    "-c",
    "--configfile",
    metavar="config_location.txt",
    default=None,
    help="Choose the config-file to be used with specified pgie",
)
parser.add_argument(
    "-g",
    "--pgie",
    default=None,
    help="Choose Primary GPU Inference Engine",
    choices=["nvinfer", "nvinferserver", "nvinferserver-grpc"],
)
parser.add_argument(
    "--no-display",
    action="store_true",
    default=False,
    dest='no_display',
    help="Disable display of video output",
)
parser.add_argument(
    "--file-loop",
    action="store_true",
    default=False,
    dest='file_loop',
    help="Loop the input file sources after EOS",
)
parser.add_argument(
    "--disable-probe",
    action="store_true",
    default=False,
    dest='disable_probe',
    help="Disable the probe function and use nvdslogger for FPS",
)
parser.add_argument(
    "-s",
    "--silent",
    action="store_true",
    default=False,
    dest='silent',
    help="Disable verbose output",
)
# Check input arguments
if len(sys.argv) == 1:
    parser.print_help(sys.stderr)
    sys.exit(1)
args = parser.parse_args()

stream_paths = args.input
pgie = args.pgie
config = args.configfile
disable_probe = args.disable_probe
global no_display
global silent
global file_loop
no_display = args.no_display
silent = args.silent
file_loop = args.file_loop

if config and not pgie or pgie and not config:
    sys.stderr.write ("\nEither pgie or configfile is missing. Please specify both! Exiting...\n\n\n\n")
    parser.print_help()
    sys.exit(1)
if config:
    config_path = Path(config)
    if not config_path.is_file():
        sys.stderr.write ("Specified config-file: %s doesn't exist. Exiting...\n\n" % config)
        sys.exit(1)

print(vars(args))
return stream_paths, pgie, config, disable_probe

if name == ‘main’:
stream_paths, pgie, config, disable_probe = parse_args()
sys.exit(main(stream_paths, pgie, config, disable_probe))

Can deepstream_test3 run before your change is applied? To be sure the environment is working.

Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU)
• DeepStream Version
• JetPack Version (valid for Jetson only)
• TensorRT Version
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs)
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)
• The pipeline being used

it was working fine!

1 Like

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.