Question on a simple example for a webcam (python)

Happy New Year. I’m learning the python API - I need to take a frame from the camera and access the frame to work with it via cv2 or pyds. I wrote a simple example but I keep getting the error “Input buffer number of surfaces (0) must be equal to mux->num_surfaces_per_frame (1)”.

#!/usr/bin/env python


# gst-launch-1.0 nvarguscamerasrc sensor_id=0 ! \
#    'video/x-raw(memory:NVMM),width=3264, height=2464, framerate=21/1, format=NV12' ! \
#    nvvidconv flip-method=0 ! 'video/x-raw, width=3264, height=2464' ! \
#    nvvidconv ! nvegltransform ! nveglglessink


import sys, gi
sys.path.append("../")
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.bus_call import bus_call
from common.create_element_or_error import create_element_or_error
from common.FPS import GETFPS
import pyds
print(1)



import pyds


def osd_sink_pad_buffer_probe(pad,info,u_data):
    frame_number=0
    #Intiallizing object counter with 0.
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={}".format(frame_number)

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame=l_frame.next
        except StopIteration:
            break
			
    return Gst.PadProbeReturn.OK	






def main():
    
    # Standard GStreamer initialization

    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    




    ############################################
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvideoconvert = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvideoconvert:
        sys.stderr.write(" Unable to create nvvidconv \n")
    
    nvstreammux = create_element_or_error("nvstreammux", "stream-muxer")
    nvstreammux.set_property('width', 3264)
    nvstreammux.set_property('height', 1848)
    nvstreammux.set_property('batched-push-timeout', 4000000)
    nvstreammux.set_property('batch-size', 1)
    nvstreammux.set_property('sync-inputs', 1)
    nvstreammux.set_property('num-surfaces-per-frame', 1)
    ############################################



    
    # Create Elements
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    caps = create_element_or_error("capsfilter", "source-caps")
    caps.set_property("caps", Gst.Caps.from_string("sensor_id=0"))


    convertor = create_element_or_error('nvvidconv', 'converter')
    caps = create_element_or_error("capsfilter", "convertor-caps")
    caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=(int)3264, height=(int)1848, framerate=28/1, format=(string)NV12"))    
    
    
    nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
    if not nvvidconvsrc:
        sys.stderr.write(" Unable to create Nvvideoconvert \n")


    convertors = create_element_or_error('nvvideoconvert', 'converters')
    capsConverter = create_element_or_error("capsfilter", "convertors-capsConverter")
    capsConverter.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=(int)3264, height=(int)1848, framerate=28/1, format=(string)RGBA"))


    videocrops  = create_element_or_error('videocrop', 'videocrops')
    videocrops.set_property('top', 100)
    videocrops.set_property('bottom', 100)
    videocrops.set_property('left', 50)
    videocrops.set_property('right', 150)



    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "egl-overlay")
    sink.set_property('sync', False)

    # Set Element Properties
    # source.set_property('sensor-id', 0)
    
    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(caps)
    pipeline.add(convertor)
    pipeline.add(capsConverter)
    pipeline.add(convertors)

    pipeline.add(nvvidconvsrc)

    pipeline.add(videocrops)

    pipeline.add(transform)
    pipeline.add(nvstreammux)
    pipeline.add(nvvideoconvert)
    pipeline.add(nvosd)

    pipeline.add(sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(caps)
    caps.link(convertor)
    convertor.link(capsConverter)
    

    
    capsConverter.link(convertors)   

    #convertors.link(videocrop) 
    
      
    convertors.link(nvvidconvsrc)



    # sinkpad = nvstreammux.get_request_pad("sink_0")
    # if not sinkpad:
    #      sys.stderr.write(" Unable to get the sink pad of streammux \n")
    # srcpad = nvvidconvsrc.get_static_pad("src")
    # if not srcpad:
    #      sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
    # srcpad.link(sinkpad)
    # nvstreammux.link(nvosd)






    

    


    nvvidconvsrc.link(nvosd)   

    #nvvideoconvert.link(transform)
    nvosd.link(transform)

    
    transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")

    #osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass


    # Cleanup
    pipeline.set_state(Gst.State.NULL)

if __name__ == "__main__":
    sys.exit(main())

**• Hardware Platform Jetson **
• DeepStream Version 6.0
• JetPack Version 4.6
• Issue Type questions

Can you refer below?

deepstream_python_apps/deepstream_test_1_usb.py at master · NVIDIA-AI-IOT/deepstream_python_apps · GitHub

1 Like

Thank you for your reply. Can you tell me if it is possible to insert a stub without a model into dstest1_pgie_config.txt? I am currently having problems with the timing of the models on xavier NX

No.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.