No screen appears to display video with inferences

• Hardware Platform (Jetson / GPU) Jetson Orin Nano
• DeepStream Version 7.0
• JetPack Version (valid for Jetson only) 6.0
• TensorRT Version 8.6.2

I am trying to replicate this pipeline in python code but it doesn’t display a video with the inferences on my screen, the gst-launch pipeline does.

gst-launch-1.0 filesrc location={video_file} ! qtdemux ! h264parse ! nvv4l2decoder ! mux.sink_0 nvstreammux name=mux batch-size=1 width=1920 height=1080  ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.yml ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=RGBA' ! nvdsosd ! nv3dsink

This is the code

import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib

def main():
    # Initialize GStreamer
    Gst.init(None)

    # Create the pipeline
    pipeline = Gst.Pipeline.new("my-pipeline")

    # Create elements
    source = Gst.ElementFactory.make("filesrc", "file-source")
    demuxer = Gst.ElementFactory.make("qtdemux", "qt-demuxer")
    parser = Gst.ElementFactory.make("h264parse", "h264-parser")
    decoder = Gst.ElementFactory.make("nvv4l2decoder", "nv-decoder")
    streammux = Gst.ElementFactory.make("nvstreammux", "nv-streammux")
    nvinfer = Gst.ElementFactory.make("nvinfer", "nv-infer")
    converter = Gst.ElementFactory.make("nvvideoconvert", "nv-converter")
    caps = Gst.Caps.from_string("video/x-raw(memory:NVMM),format=RGBA")
    capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
    osd = Gst.ElementFactory.make("nvdsosd", "nv-onscreendisplay")
    sink = Gst.ElementFactory.make("nv3dsink", "nvvideo-renderer")

    # Check if all elements were created successfully
    if not all([source, demuxer, parser, decoder, streammux, nvinfer, converter, capsfilter, osd, sink]):
        sys.stderr.write("Error: Could not create all elements.\n")
        sys.exit(1)

    # Set element properties
    source.set_property("location", ".{video_file}")
    streammux.set_property("batch-size", 1)
    streammux.set_property("width", 1920)
    streammux.set_property("height", 1080)
    nvinfer.set_property("config-file-path", "model_config.txt")
    capsfilter.set_property("caps", caps)

    # Add elements to the pipeline
    pipeline.add(source)
    pipeline.add(demuxer)
    pipeline.add(parser)
    pipeline.add(decoder)
    pipeline.add(streammux)
    pipeline.add(nvinfer)
    pipeline.add(converter)
    pipeline.add(capsfilter)
    pipeline.add(osd)
    pipeline.add(sink)

    # Link elements
    source.link(demuxer)
    parser.link(decoder)
    decoder.link(streammux)
    streammux.link(nvinfer)
    nvinfer.link(converter)
    converter.link(capsfilter)
    capsfilter.link(osd)
    osd.link(sink)

    # Connect demuxer's pad-added signal
    demuxer.connect("pad-added", on_pad_added, parser)

    # Start playing
    ret = pipeline.set_state(Gst.State.PLAYING)
    if ret == Gst.StateChangeReturn.FAILURE:
        sys.stderr.write("Error: Unable to set the pipeline to the playing state.\n")
        sys.exit(1)

    # Run the main loop
    bus = pipeline.get_bus()
    msg = bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.ERROR | Gst.MessageType.EOS)

    # Free resources
    pipeline.set_state(Gst.State.NULL)

def on_pad_added(demuxer, pad, parser):
    pad_name = pad.get_name()
    if pad_name.startswith("video"):
        demuxer.link(parser)

if __name__ == "__main__":
    main()

Can you spot a problem with the code?or the lack of on screen display might be related to something else?

1.If you put a *.h264 as input, qtdemux is no need.

2.The nvstreammux can not link to nvv4l2decoder directly in source code, this is called request pad in gstreamer.
https://gstreamer.freedesktop.org/documentation/plugin-development/advanced/request.html?gi-language=c

sinkpad = streammux.request_pad_simple("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux \n")
    srcpad = decoder.get_static_pad("src")
    if not srcpad:
        sys.stderr.write(" Unable to get source pad of decoder \n")

You can refer to /opt/nvidia/deepstream/deepstream/sources/deepstream_python_apps/apps/deepstream-test1/deepstream_test_1.py.

1 Like

I’ve adapted my code to follow the same logic as deepstream-test 1 and now I get this error.

I also tried to run deepstream-test 1 and got the same error.

Any idea on how to fix this and why I am getting this error?

Edit1: I’ve seen a similar post with a similar error but in that case, I believe it was not connected to a display. The Jetson I am using is connected to a display.
Edit2: I am running it with the same video from deepstream-test 1.

How do you run test1.py, can you share the command line?

Generally speaking, it should be like the following. Did you input an mp4 file? test1.py does not support processing mp4 files

python3 deepstream_test_1.py /opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264
1 Like

Oh, that’s why then. I was using .mp4 files. I was using the jupyter notebooks provided here GitHub - NVIDIA-AI-IOT/deepstream_python_apps: DeepStream SDK Python bindings and sample applications . These were the inputs that I as using:

How can I adapt it to mp4 files?
Edit1: If I can make it function with other types of sources like rtsp streams, it is even better

Try to use uridecodebin, deepstream_test_3.py is an example showing usage.

python3 deepstreamp_test_3.py -i "file:///xxxxx or rtsp:///"

I rewrote my code and somehow it worked. Not sure why it didn’t before. Maybe the demux isn’t necessary, might do some tests without it later. Here it is for reference.

import sys
import os
import gi
gi.require_version("Gst","1.0")
from gi.repository import GLib, Gst


def main(args):
    #Inicializa GStreamer
    Gst.init(None)

    #Cria pipeline
    pipeline = Gst.Pipeline()

    #Cria elementos comuns
    streammux = Gst.ElementFactory.make("nvstreammux","stream-muxer")
    nvinfer = Gst.ElementFactory.make("nvinfer","primary-inference")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert","convertor")
    nvosd = Gst.ElementFactory.make("nvdsosd","onscreendisp")
    #sink = Gst.ElementFactory.make("nv3dsink","nv3d-sink")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")


    if not all ([pipeline,streammux,nvinfer,nvvidconv,nvosd,sink]):
        sys.stderr.write("Nao foi possivel criar todos os elementos")
        return -1
    
    #Configura elementos

    streammux.set_property('width',1920)
    streammux.set_property('height',1080)
    streammux.set_property('batch-size',1)
    streammux.set_property('batched-push-timeout',4000000)

    nvinfer.set_property('config-file-path',"model_config.txt")

    pipeline.add(streammux)
    pipeline.add(nvinfer)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(sink)
    
    source = Gst.ElementFactory.make("filesrc","file-source-0")
    demuxer = Gst.ElementFactory.make("qtdemux","demuxer-0")
    parser = Gst.ElementFactory.make("h264parse","parser-0")
    decoder = Gst.ElementFactory.make("nvv4l2decoder","nv-decoder-0")

    if not all ([source,demuxer,parser,decoder]):
        sys.stderr.write("Nao foi possivel criar elementos do video")
        return -1
    
    source.set_property('location',args[1])

    pipeline.add(source)
    pipeline.add(demuxer)
    pipeline.add(parser)
    pipeline.add(decoder)

    #Linkando elementos
    source.link(demuxer)
    demuxer.connect("pad-added",on_pad_added,parser)
    parser.link(decoder)

    sinkpad = streammux.get_request_pad(f"sink_0")
    srcpad = decoder.get_static_pad("src")
    srcpad.link(sinkpad)

    streammux.link(nvinfer)
    nvinfer.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(sink)

    ret = pipeline.set_state(Gst.State.PLAYING)
    if ret == Gst.StateChangeReturn.FAILURE:
        sys.stderr.write("Nao foi possivel iniciar pipeline")
        return -1
    
    bus = pipeline.get_bus()
    msg = bus.timed_pop_filtered(
        Gst.CLOCK_TIME_NONE,
        Gst.MessageType.ERROR | Gst.MessageType.EOS
    )

    # Free resources
    pipeline.set_state(Gst.State.NULL)


def on_pad_added(src,new_pad,data):
        
    if new_pad.get_current_caps().get_structure(0).get_name().startswith("video/"):
        sink_pad = data.get_static_pad("sink")
        if not sink_pad.is_linked():
            new_pad.link(sink_pad)

if __name__ == '__main__':
    if len(sys.argv) < 2:
        sys.stderr.write("Usage: python3 script_name.py <stream1> <stream2> ...\n")
        sys.exit(1)
    sys.exit(main(sys.argv))

Now I just need to put my Yolo model in it. Seems complicated to do it as I seem to have to create my own custom bbox parsers and plugins. I might open a new request for help in the forum later if I am unable to do it. Anyways, thanks for the help!

If you just want deepstream pipeline with yolo model. Please refer to this repository.

This is contributed by community users

1 Like

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.