Testing usb cam on deepstream test1 usb-cam app without pgie config file

Please provide complete information as applicable to your setup.

• Hardware Platform Jetson Nano A01
• DeepStream Version 6.0.1
• JetPack Version 4.6.1
• TensorRT Version 8.2.1.9
• Camera Module arducam IMX477

Hi guys,
I am trying to run deepstream-test1-usbcam on my usb cam. The thing is I am trying to run this script without any pgie detector, so I commented out the functionalities for dstest_pgie_config.txt completely.

This is the modified code I am using:

import sys
sys.path.append('../')
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call

import pyds

PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3


#def on_pad_added(decodebin, pad, target):
    #"""
    #Callback function to dynamically link pads created by decodebin.
    #"""
    #sink_pad = target.get_static_pad("sink")
    #if not sink_pad.is_linked():
        #pad.link(sink_pad)


def osd_sink_pad_buffer_probe(pad,info,u_data):
    frame_number=0
    #Intiallizing object counter with 0.
    obj_counter = {
 PGIE_CLASS_ID_VEHICLE:0,
        PGIE_CLASS_ID_PERSON:0,
        PGIE_CLASS_ID_BICYCLE:0,
        PGIE_CLASS_ID_ROADSIGN:0
    }
    num_rects=0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
           frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj=frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            try: 
                l_obj=l_obj.next
            except StopIteration:
                break

 display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CL$

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame=l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK        


def main(args):
    # Check input arguments
    if len(args) != 2:
        sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0])
        sys.exit(1)

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)



 # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from the file
    print("Creating Source \n ")
    source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
    if not source:
        sys.stderr.write(" Unable to create Source \n")

    caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
    if not caps_v4l2src:
        sys.stderr.write(" Unable to create v4l2src capsfilter \n")

    #decodebin = Gst.ElementFactory.make("decodebin", "decoder")
    #if not decodebin:
        #sys.stderr.write(" Unable to create decodebin \n")

print("Creating Video Converter \n")

    # Adding videoconvert -> nvvideoconvert as not all
    # raw formats are supported by nvvideoconvert;
    # Say YUYV is unsupported - which is the common
    # raw format for many logi usb cams
    # In case we have a camera with raw format supported in
    # nvvideoconvert, GStreamer plugins' capability negotiation
    # shall be intelligent enough to reduce compute by
    # videoconvert doing passthrough (TODO we need to confirm this)


    # videoconvert to make sure a superset of raw formats are supported
    vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
    if not vidconvsrc:
        sys.stderr.write(" Unable to create videoconvert \n")

    # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
    nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
    if not nvvidconvsrc:
        sys.stderr.write(" Unable to create Nvvideoconvert \n")

caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
    if not caps_vidconvsrc:
        sys.stderr.write(" Unable to create capsfilter \n")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    # Use nvinfer to run inferencing on camera's output,
    # behaviour of inferencing is set through config file
    #pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    #if not pgie:
        #sys.stderr.write(" Unable to create pgie \n")

    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

 if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    # Finally render the osd output
    if is_aarch64():
        transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    print("Playing cam %s " %args[1])
    caps_v4l2src.set_property('caps', Gst.Caps.from_string("image/jpeg, framerate=60/1"))
    caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("jpegdec(memory:NVMM)"))
    source.set_property('device', args[1])
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    #pgie.set_property('config-file-path', "dstest1_pgie_config.txt")
    # Set sync = false to avoid late frame drops at the display-sink
    sink.set_property('sync', False)

 print("Adding elements to Pipeline \n")
    pipeline.add(source)
    pipeline.add(caps_v4l2src)
    #pipeline.add(decodebin)
    pipeline.add(vidconvsrc)
    pipeline.add(nvvidconvsrc)
    pipeline.add(caps_vidconvsrc)
    pipeline.add(streammux)
    #pipeline.add(pgie)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(sink)
    if is_aarch64():
        pipeline.add(transform)

 # we link the elements together
    # v4l2src -> nvvideoconvert -> mux -> 
    # nvinfer -> nvvideoconvert -> nvosd -> video-renderer
    print("Linking elements in the Pipeline \n")
    source.link(caps_v4l2src)
    caps_v4l2src.link(vidconvsrc)
    #caps_v4l2src.link(decodebin)
    #decodebin.connect("pad-added", on_pad_added, vidconvsrc)
    vidconvsrc.link(nvvidconvsrc)
    nvvidconvsrc.link(caps_vidconvsrc)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux \n")
    srcpad = caps_vidconvsrc.get_static_pad("src")
    if not srcpad:
        sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
    srcpad.link(sinkpad)
    streammux.link(nvvidconv)
    #pgie.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

# Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    pipeline.set_state(Gst.State.NULL)

if __name__ == '__main__':
    sys.exit(main(sys.argv))

deepstream-test1-usbcam.txt (9.9 KB)

and I am getting the following error:

Creating Pipeline 
 
Creating Source 
 
Creating Video Converter 

Creating EGLSink 

Playing cam /dev/video0 
Adding elements to Pipeline 

Linking elements in the Pipeline 

Starting pipeline 

Error: gst-stream-error-quark: Internal data stream error. (1): gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:pipeline0/GstV4l2Src:usb-cam-source:
streaming stopped, reason not-negotiated (-4)

I have also tried to change EGLsink elements to fakesink but still the error persists.

You can check whether it is correct or not.
The goal is to run just the usb cam through the deepstream app,

Apart from that I have also test usb cam with separate gstreamer pipeline:
gst-launch-1.0 v4l2src device=/dev/video0 ! image/jpeg, width=1920, height=1080, framerate=60/1 ! jpegdec ! videoconvert ! autovideosink

and it was working fine.
(maybe you can suggest me some other deepstream test apps, because the goal is to run usb cam on deepstream)

Please feel free to ask for any relevant information, Thanks.

You can refer this FAQ.

You can try the following pipeline, using nvvideoconvert to copy the data to GPU memory

gst-launch-1.0 v4l2src device=/dev/video0 ! image/jpeg, width=1920, height=1080, framerate=60/1 ! jpegdec ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=NV12' ! mux.sink_0 nvstreammux name=mux width=1280 height=720 batch-size=1 ! fakesink
1 Like

with this pipeline you mentioned:
gst-launch-1.0 v4l2src device=/dev/video0 ! image/jpeg, width=1920, height=1080, framerate=60/1 ! jpegdec ! nvvideoconvert ! ‘video/x-raw(memory:NVMM),format=NV12’ ! mux.sink_0 nvstreammux name=mux width=1280 height=720 batch-size=1 ! fakesink

my pipeline seems to get hang and it takes forever to run:

Setting pipeline to PAUSED ...
Pipeline is live and does not need PREROLL ...
Setting pipeline to PLAYING ...
New clock: GstSystemClock

I have also tried reducing framerate settings to 1280*720 according these supported fromats of my camera:

ioctl: VIDIOC_ENUM_FMT
	Index       : 0
	Type        : Video Capture
	Pixel Format: 'MJPG' (compressed)
	Name        : Motion-JPEG
		Size: Discrete 1920x1080
			Interval: Discrete 0.017s (60.000 fps)
		Size: Discrete 4032x3040
			Interval: Discrete 0.100s (10.000 fps)
		Size: Discrete 3840x2160
			Interval: Discrete 0.050s (20.000 fps)
		Size: Discrete 2592x1944
			Interval: Discrete 0.033s (30.000 fps)
		Size: Discrete 2560x1440
			Interval: Discrete 0.033s (30.000 fps)
		Size: Discrete 1600x1200
			Interval: Discrete 0.020s (50.000 fps)
		Size: Discrete 1280x960
			Interval: Discrete 0.010s (100.000 fps)
		Size: Discrete 1280x720
			Interval: Discrete 0.010s (100.000 fps)
		Size: Discrete 640x480
			Interval: Discrete 0.013s (80.000 fps)

But still it is not working. Although it works fine with the following pipeline:

gst-launch-1.0 v4l2src device=/dev/video0 ! image/jpeg, width=1920, height=1080, framerate=60/1 ! jpegdec ! videoconvert ! autovideosink

The thing I wanna know is that how can i modify my deepstream test script according to this pipeline so that it works?

  1. Does the deepstream_test_1.py work properly ? Make sure DeepStream is installed normally.
    2.If the deeptream_test_1.py works fine,
    Since your camera outputs JPEG, software decoding may be too slow. I am not sure whether Jetson Nano can decode 60fps JPEG image stream. Try the nvjpegdec
gst-launch-1.0 v4l2src device=/dev/video0 !  "image/jpeg, width=1920, height=1080, framerate=60/1"  !  nvjpegdec  ! nvvideoconvert ! "video/x-raw(memory:NVMM),format=NV12"  ! mux.sink_0 nvstreammux name=mux width=1280 height=720 batch-size=1 ! fakesink

3.If the deepstream_test1.py cannot work, Usually it’s an installation problem with jetpack/deepstream

Yeah the deepstream test1, 2 and 3 sample apps are working absolutely fine (on the sample video). The issue is with the deepstream-test1-usbcam app when I try to make it run with the usb cam it throws an error (which I have mentioned above).

furthermore, the pipeline you mentioned is working fine with the nvjpegdec,

gst-launch-1.0 v4l2src device=/dev/video0 !  "image/jpeg, width=1920, height=1080, framerate=60/1"  !  nvjpegdec  ! nvvideoconvert ! "video/x-raw(memory:NVMM),format=NV12"  ! mux.sink_0 nvstreammux name=mux width=1280 height=720 batch-size=1 ! fakesink

(first it didn’t worked then I unplugged the usb cam and replugged it and now its working fine.)

However when I run the pipeline alternatively, I get the following output with error:

Setting pipeline to PAUSED ...
Pipeline is live and does not need PREROLL ...
Setting pipeline to PLAYING ...
New clock: GstSystemClock
ERROR: from element /GstPipeline:pipeline0/GstV4l2Src:v4l2src0: Failed to allocate required memory.
Additional debug info:
gstv4l2src.c(658): gst_v4l2src_decide_allocation (): /GstPipeline:pipeline0/GstV4l2Src:v4l2src0:
Buffer pool activation failed
Execution ended after 0:00:00.760126944
Setting pipeline to PAUSED ...
Setting pipeline to READY ...
Setting pipeline to NULL ...
Freeing pipeline ...

You can modify the deepstream-test1-usbcam.py source code according to this pipeline and it should work fine.

I’m not sure if it’s similar to this topic, you can refer to it

I have tried the methods from posts you’ve mentioned, but still I got this memory allocation issue.
Also, I modified the deepstream script for my usb cam, but it still doesn’t works. I wanna know whether is it correct?

 caps_v4l2src.set_property('caps', Gst.Caps.from_string("image/jpeg, width=1920, height=1080, framerate=60/1"))
    caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM), format=NV12"))
    source.set_property('device', args[1])
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

Following is the error I am getting:

Creating Pipeline 
 
Creating Source 
 
Creating Video Converter 

Creating EGLSink 

Playing cam /dev/video0 
Adding elements to Pipeline 

Linking elements in the Pipeline 

Starting pipeline 


Using winsys: x11 
Error: gst-stream-error-quark: Internal data stream error. (1): gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:pipeline0/GstV4l2Src:usb-cam-source:
streaming stopped, reason not-linked (-1)

The command line above can run correctly, so you need to modify the pipeline in the code to be the same as in the command line of gst-launch-1.0

Try the following code.

deepstream-test1-usbcam.py (9.4 KB)

1 Like

This script works fine with my usb camera, Thanks!

Also, about that memory allocation issue. I have to inform that i am using usb2 camera connectors. Isn’t there any way possible that I can optimize pipeline (for usb2.0 cam) to not get this error? (maybe I can start a new topic instead if you say).

You can open a new topic, but this problem is driver related. You can try to discuss it in the Jetson forum

1 Like

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.