Create RTSP sink in deepstream 5.0 sdk python apps

how to Create RTSP sink in deepstream 5.0 sdk python apps? is there any example?
using Jetson nano.

Hi,
Please take a look at

This is my main fnction, with this i am able to create RTSP sink. the only problem is that, when i capture its RTSP stream with OpenCV in another PC i am seeing cropped output. @DaneLLL

def main(args):
# Check input arguments
if len(args) < 2:
sys.stderr.write(“usage: %s [uri2] … [uriN]\n” % args[0])
sys.exit(1)

# for i in range(0, len(args) - 1):
#     fps_streams["stream{0}".format(i)] = GETFPS(i)
number_sources = len(args) - 1

# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)

# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()

if not pipeline:
    sys.stderr.write(" Unable to create Pipeline \n")
print("Creating streamux \n ")

# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
    sys.stderr.write(" Unable to create NvStreamMux \n")

pipeline.add(streammux)
for i in range(number_sources):
    print("Creating source_bin ", i, " \n ")
    uri_name = args[i + 1]
    source_bin = create_source_bin(i, uri_name)
    if not source_bin:
        sys.stderr.write("Unable to create source bin \n")
    pipeline.add(source_bin)
    padname = "sink_%u" % i
    sinkpad = streammux.get_request_pad(padname)
    if not sinkpad:
        sys.stderr.write("Unable to create sink pad bin \n")
    srcpad = source_bin.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to create src pad bin \n")
    srcpad.link(sinkpad)
print("Creating Pgie \n ")
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
    sys.stderr.write(" Unable to create pgie \n")
print("Creating tiler \n ")
tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
if not tiler:
    sys.stderr.write(" Unable to create tiler \n")
print("Creating nvvidconv \n ")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
    sys.stderr.write(" Unable to create nvvidconv \n")
print("Creating nvosd \n ")

# Create OSD to draw on the converted RGBA buffer
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
    sys.stderr.write(" Unable to create nvosd \n")
nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd")
if not nvvidconv_postosd:
    sys.stderr.write(" Unable to create nvvidconv_postosd \n")

# Create a caps filter
caps = Gst.ElementFactory.make("capsfilter", "filter")
caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))

# Make the h264 encoder
encoder = Gst.ElementFactory.make("nvv4l2h264enc", "h264-encoder")
if not encoder:
    sys.stderr.write(" Unable to create encoder")
encoder.set_property('bitrate', 4000000)
if is_aarch64():
    encoder.set_property('preset-level', 1)
    encoder.set_property('insert-sps-pps', 1)
    encoder.set_property('bufapi-version', 1)

# Make the payload-encode video into RTP packets
rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay-h264")
if not rtppay:
    sys.stderr.write(" Unable to create rtppay")

# Make the UDP sink
updsink_port_num = 5400
sink = Gst.ElementFactory.make("udpsink", "udpsink")
if not sink:
    sys.stderr.write(" Unable to create udpsink")

sink.set_property('host', '224.224.255.255')
sink.set_property('port', updsink_port_num)
sink.set_property('async', False)
sink.set_property('sync', 1)

streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)

pgie.set_property('config-file-path', "dstest_imagedata_config.txt")
pgie_batch_size = pgie.get_property("batch-size")
if (pgie_batch_size != number_sources):
    print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
          number_sources, " \n")
    pgie.set_property("batch-size", number_sources)
tiler_rows = int(math.sqrt(number_sources))
tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
tiler.set_property("rows", tiler_rows)
tiler.set_property("columns", tiler_columns)
tiler.set_property("width", TILED_OUTPUT_WIDTH)
tiler.set_property("height", TILED_OUTPUT_HEIGHT)

print("Adding elements to Pipeline \n")
pipeline.add(pgie)
pipeline.add(tiler)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
pipeline.add(nvvidconv_postosd)
pipeline.add(caps)
pipeline.add(encoder)
pipeline.add(rtppay)
pipeline.add(sink)

print("Linking elements in the Pipeline \n")
streammux.link(pgie)
pgie.link(tiler)
tiler.link(nvvidconv)
nvvidconv.link(nvosd)
nvosd.link(nvvidconv_postosd)
nvvidconv_postosd.link(caps)
caps.link(encoder)
encoder.link(rtppay)
rtppay.link(sink)

# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)

encoder_name = "H264"
rtsp_port_num = 8554

server = GstRtspServer.RTSPServer.new()
server.props.service = "%d" % rtsp_port_num
server.attach(None)

factory = GstRtspServer.RTSPMediaFactory.new()
factory.set_launch(
    "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (
    updsink_port_num, encoder_name))
factory.set_shared(True)
server.get_mount_points().add_factory("/ds-test", factory)

print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num)

osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
    sys.stderr.write(" Unable to get sink pad of nvosd \n")

osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

# List the sources
print("Now playing...")
for i, source in enumerate(args):
    if i != 0:
        print(i, ": ", source)

print("Starting pipeline \n")
# start play back and listed to events
pipeline.set_state(Gst.State.PLAYING)
try:
    loop.run()
except KeyboardInterrupt:
    print("Got SIGINT")
finally:
    print("Cleaning up")
    pipeline.set_state(Gst.State.NULL)

Hi,
Do yo observe the cropped output in running default deepstream-test1-rtsp-out? We have verified the default sample in each release. You should be able to refer to it and do further debugging.

1 Like

I am running Python examples, can’t find a rtsp out python example

Hi,
You may refer to our sample
deepstream_python_apps/deepstream_test1_rtsp_out.py at master · NVIDIA-AI-IOT/deepstream_python_apps · GitHub
and apply to your python code.

I want to take an RTSP stream in jetson nano what should be the RTSP link that I shall pass. my camera have a username and password @DaneLLL

Hi,
Do you mean decoding a RTSP source on Jetson Nano?