• Hardware Platform (Jetson / GPU) Jetson Orin Nano
• DeepStream Version 7.0
• JetPack Version (valid for Jetson only) 6.0
• TensorRT Version 8.6.2
I am trying to replicate this pipeline in python code but it doesn’t display a video with the inferences on my screen, the gst-launch pipeline does.
gst-launch-1.0 filesrc location={video_file} ! qtdemux ! h264parse ! nvv4l2decoder ! mux.sink_0 nvstreammux name=mux batch-size=1 width=1920 height=1080 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.yml ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=RGBA' ! nvdsosd ! nv3dsink
This is the code
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib
def main():
# Initialize GStreamer
Gst.init(None)
# Create the pipeline
pipeline = Gst.Pipeline.new("my-pipeline")
# Create elements
source = Gst.ElementFactory.make("filesrc", "file-source")
demuxer = Gst.ElementFactory.make("qtdemux", "qt-demuxer")
parser = Gst.ElementFactory.make("h264parse", "h264-parser")
decoder = Gst.ElementFactory.make("nvv4l2decoder", "nv-decoder")
streammux = Gst.ElementFactory.make("nvstreammux", "nv-streammux")
nvinfer = Gst.ElementFactory.make("nvinfer", "nv-infer")
converter = Gst.ElementFactory.make("nvvideoconvert", "nv-converter")
caps = Gst.Caps.from_string("video/x-raw(memory:NVMM),format=RGBA")
capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
osd = Gst.ElementFactory.make("nvdsosd", "nv-onscreendisplay")
sink = Gst.ElementFactory.make("nv3dsink", "nvvideo-renderer")
# Check if all elements were created successfully
if not all([source, demuxer, parser, decoder, streammux, nvinfer, converter, capsfilter, osd, sink]):
sys.stderr.write("Error: Could not create all elements.\n")
sys.exit(1)
# Set element properties
source.set_property("location", ".{video_file}")
streammux.set_property("batch-size", 1)
streammux.set_property("width", 1920)
streammux.set_property("height", 1080)
nvinfer.set_property("config-file-path", "model_config.txt")
capsfilter.set_property("caps", caps)
# Add elements to the pipeline
pipeline.add(source)
pipeline.add(demuxer)
pipeline.add(parser)
pipeline.add(decoder)
pipeline.add(streammux)
pipeline.add(nvinfer)
pipeline.add(converter)
pipeline.add(capsfilter)
pipeline.add(osd)
pipeline.add(sink)
# Link elements
source.link(demuxer)
parser.link(decoder)
decoder.link(streammux)
streammux.link(nvinfer)
nvinfer.link(converter)
converter.link(capsfilter)
capsfilter.link(osd)
osd.link(sink)
# Connect demuxer's pad-added signal
demuxer.connect("pad-added", on_pad_added, parser)
# Start playing
ret = pipeline.set_state(Gst.State.PLAYING)
if ret == Gst.StateChangeReturn.FAILURE:
sys.stderr.write("Error: Unable to set the pipeline to the playing state.\n")
sys.exit(1)
# Run the main loop
bus = pipeline.get_bus()
msg = bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.ERROR | Gst.MessageType.EOS)
# Free resources
pipeline.set_state(Gst.State.NULL)
def on_pad_added(demuxer, pad, parser):
pad_name = pad.get_name()
if pad_name.startswith("video"):
demuxer.link(parser)
if __name__ == "__main__":
main()
Can you spot a problem with the code?or the lack of on screen display might be related to something else?