Adapting DeepStream Python Tutorial for Raspberry Pi Camera on Jetson Nano

• Hardware Platform (Jetson / GPU) : Jetson nano
• DeepStream Version : 6.0
• JetPack Version (valid for Jetson only) : 4.6

Hello!
I’m working on adapting the tutorial Building Video AI Applications at the Edge on Jetson Nano, on my raspberry camera. I managed to have a stream working with a source (nvarguscamerasrc) , a video convertor (nvvideoconvert) and an overlay (nvoverlaysink).

But once I add a muxer (nvstreammux) between my video converter and my inference object (nvinfer). I always get the following error :
Error: gst-stream-error-quark: Input buffer number of surfaces (0) must be equal to mux->num_surfaces_per_frame (1)
Set nvstreammux property num-surfaces-per-frame appropriately

I put my current code down there :

#!/usr/bin/env python3

import argparse
import sys

import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst

from common.bus_call import bus_call

def main(args):
    # Standard GStreamer initialization
    print("Initializing GStreamer...\n")
    GObject.threads_init()
    Gst.init(None)

    # Create GStreamer elements
    print("Creating Pipeline...\n")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write("Unable to create Pipeline\n")
        return
    
    # Source element for reading from the camera
    print("Creating Source...\n")
    source = Gst.ElementFactory.make("nvarguscamerasrc", "camera-source")
    if not source:
        sys.stderr.write("Unable to create Source\n")
        return
    
    source.set_property("sensor-mode", 4)  # Choose the sensor mode (0, 1, 2, etc.)

    # Use convertor to convert formats as required
    print("Creating nvvideoconvert (pre_nvvidconv)...\n")
    pre_nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not pre_nvvidconv:
        sys.stderr.write("Unable to create pre_nvvidconv\n")
        return
    
    # Create nvstreammux instance to form batches from one or more sources
    print("Creating nvstreammux...\n")
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write("Unable to create NvStreamMux\n")
        return

    # Use nvinfer to run inferencing 
    print("Creating pgie...\n")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    # Use convertor to convert from NV12 to RGBA as required by nvosd
    print("Creating nvvideoconvert (post_nvvidconv)...\n")
    post_nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor-1")
    if not post_nvvidconv:
        sys.stderr.write("Unable to create post_nvvidconv\n")
        return

    # Create OSD to draw on the converted RGBA buffer
    print("Creating nvdsosd (nvosd)...\n")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write("Unable to create nvosd\n")
        return

    # Create EGLSink for video rendering
    print("Creating EGLSink...\n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write("Unable to create egl sink\n")
        return

    # Set streammux properties
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('batch-size', 1)
    streammux.set_property('num-surfaces-per-frame', 1)  # Ensure it matches input
    streammux.set_property('batched-push-timeout', 4000000)
    print("Streammux properties set: width=1280, height=720, batch-size=1, batched-push-timeout=4000000\n")


    pgie.set_property('config-file-path', "model_config.txt")

    # Set sync = false to avoid late frame drops at the display-sink
    sink.set_property('sync', False)
    print("Sink sync property set to False\n")

    # Add elements to Pipeline
    print("Adding elements to Pipeline...\n")
    pipeline.add(source)
    pipeline.add(pre_nvvidconv)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(post_nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(sink)

    print("Linking elements in the Pipeline...\n")
    # Link source to pre_nvvidconv
    source.link(pre_nvvidconv)

     # Get sink pad of streammux and link to pre_nvvidconv's src pad
    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write("Unable to get the sink pad of streammux\n")
    else:
        print("Sink pad of streammux obtained\n")

    srcpad = pre_nvvidconv.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to get source pad of pre_nvvidconv\n")

    srcpad.link(sinkpad)
    streammux.link(pgie)
    pgie.link(post_nvvidconv)
    post_nvvidconv.link(nvosd)
    nvosd.link(sink)

    # Create an event loop and feed GStreamer bus messages to it
    print("Starting event loop...\n")
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)
    
    # Start playback and listen to events
    print("Starting pipeline...\n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    pipeline.set_state(Gst.State.NULL)

if __name__ == '__main__':
    sys.exit(main(sys.argv))```

This error seems to be caused by nvarguscamerasrc not outputting images. Please refer to this FAQ and adjust the python code according to gst-launch-1.0 pipeline.

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.