Deepstream test app 3, python, with usb camera

Hi all,

I was able to use my usb camera with the deepstream-test-app-1-usbcamera.py and have no issues. I wanted to try and create this with multiple camera streams and have modified the test app 3 by removing the uridecodebin (with the other 2 functions) to add the following:

def create_source_bin(index,uri):
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name="source-bin-%02d" %index
    print(bin_name)
    nbin=Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    usb_cam_source=Gst.ElementFactory.make("v4l2src", "source")
    usb_cam_source.set_property("device",uri)

    caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
    if not caps_v4l2src:
        sys.stderr.write(" Unable to create v4l2src capsfilter \n")
    
    vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
    if not vidconvsrc:
        sys.stderr.write(" Unable to create videoconvert \n")
    
    nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
    if not nvvidconvsrc:
        sys.stderr.write(" Unable to create Nvvideoconvert \n")
    
    caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
    if not caps_vidconvsrc:
        sys.stderr.write(" Unable to create capsfilter \n")
    
    caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1,format=YUYV"))
    caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))

    print('adding element to source bin')
    Gst.Bin.add(nbin,usb_cam_source)
    Gst.Bin.add(nbin,caps_v4l2src)
    Gst.Bin.add(nbin,vidconvsrc)
    Gst.Bin.add(nbin,nvvidconvsrc)
    Gst.Bin.add(nbin,caps_vidconvsrc)

    print('linking elemnent in source bin')
    usb_cam_source.link(caps_v4l2src)
    caps_v4l2src.link(vidconvsrc)
    vidconvsrc.link(nvvidconvsrc)
    nvvidconvsrc.link(caps_vidconvsrc)

    pad = caps_vidconvsrc.get_static_pad("src")
    ghostpad = Gst.GhostPad.new("src",pad)
    bin_pad=nbin.add_pad(ghostpad)
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin

I have left the rest of the code within the test app3 the same. The issue that keeps popping up when I try and run this is this:

Error: gst-stream-error-quark: Internal data stream error. (1): gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:pipeline0/GstBin:source-bin-00/GstV4l2Src:source:
streaming stopped, reason not-linked (-1)

I’m not sure at all what I am doing wrong, if someone able to help explain to me what is it I am doing wrong, it would be very appreciative.

Thank you

Please provide complete information as applicable to your setup.

• Jetson Xavier NX dev kit
• DeepStream 5.0
• JetPack 4.4 (valid for Jetson only)

Can single camera work with your codes?

The error means the element link failed. Have you tried “gst-launch-1.0” command before you use your python app?

Hi Fiona,
Thank you for replying, I’ve tried using one camera and it doesn’t work and I’m not sure why exactly. But anyway, I’ve used a similar pipeline to use with another usb camera (mjpeg format), which works if I exclude the initial caps filter on the source. However, the issue with it now is the output frame rate is very very low, and I’m not quite sure why.

Do you know the USB camera features? The output resolution, the output formats,…

Hi Fiona,

Thank you for replying. The following is the current USB camera I’m using.

 ioctl: VIDIOC_ENUM_FMT
    	Index       : 0
    	Type        : Video Capture
    	Pixel Format: 'MJPG' (compressed)
    	Name        : Motion-JPEG
    		Size: Discrete 640x360
    			Interval: Discrete 0.004s (260.004 fps)
    		Size: Discrete 1280x720
    			Interval: Discrete 0.008s (120.000 fps)
    		Size: Discrete 1920x1080
    			Interval: Discrete 0.017s (60.000 fps)

I’ve modified the deepstream-test3 app’s create_source_bin function into the following:

    def create_source_bin(index,uri):
        print("Creating source bin")

        # Create a source GstBin to abstract this bin's content from the rest of the
        # pipeline
        bin_name="source-bin-%02d" %index
        print(bin_name)
        nbin=Gst.Bin.new(bin_name)
        if not nbin:
            sys.stderr.write(" Unable to create source bin \n")

        usb_cam_source=Gst.ElementFactory.make("v4l2src", "source")
        usb_cam_source.set_property("device",uri)

        jpegDec = Gst.ElementFactory.make("jpegdec", "convertor_jpeg")
        if not jpegDec:
            sys.stderr.write(" Unable to create jpegDec \n")
        
        nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
        if not nvvidconvsrc:
            sys.stderr.write(" Unable to create Nvvideoconvert \n")
        
        caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
        if not caps_vidconvsrc:
            sys.stderr.write(" Unable to create capsfilter \n")
        
        caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM), format=NV12"))

        print('adding element to source bin')
        Gst.Bin.add(nbin,usb_cam_source)
        Gst.Bin.add(nbin,jpegDec)
        Gst.Bin.add(nbin,nvvidconvsrc)
        Gst.Bin.add(nbin,caps_vidconvsrc)

        print('linking elemnent in source bin')
        usb_cam_source.link(jpegDec)
        jpegDec.link(nvvidconvsrc)
        nvvidconvsrc.link(caps_vidconvsrc)

        pad = caps_vidconvsrc.get_static_pad("src")
        ghostpad = Gst.GhostPad.new("src",pad)
        bin_pad=nbin.add_pad(ghostpad)
        if not bin_pad:
            sys.stderr.write(" Failed to add ghost pad in source bin \n")
            return None
        return nbin

For reference here is the code for setting up the pipelines and the linking for the app:

def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0])
        sys.exit(1)

    for i in range(0,len(args)-1):
        fps_streams["stream{0}".format(i)]=GETFPS(i)
    number_sources=len(args)-1

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin ",i," \n ")
        usb_cam=args[i+1]
        # if uri_name.find("rtsp://") == 0 :
        #     is_live = True
        source_bin=create_source_bin(i, usb_cam)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname="sink_%u" %i
        sinkpad= streammux.get_request_pad(padname) 
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad=source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    queue1=Gst.ElementFactory.make("queue","queue1")
    queue2=Gst.ElementFactory.make("queue","queue2")
    queue3=Gst.ElementFactory.make("queue","queue3")
    queue4=Gst.ElementFactory.make("queue","queue4")
    queue5=Gst.ElementFactory.make("queue","queue5")
    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    print("Creating tiler \n ")
    tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    nvosd.set_property('process-mode',OSD_PROCESS_MODE)
    nvosd.set_property('display-text',OSD_DISPLAY_TEXT)
    if(is_aarch64()):
        print("Creating transform \n ")
        transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    # if is_live:
    #     print("Atleast one of the sources is live")
    #     streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "dstest3_pgie_config.txt")
    pgie_batch_size=pgie.get_property("batch-size")
    if(pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n")
        pgie.set_property("batch-size",number_sources)
    tiler_rows=int(math.sqrt(number_sources))
    tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows))
    tiler.set_property("rows",tiler_rows)
    tiler.set_property("columns",tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)
    sink.set_property("qos",0)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tiler)
    tiler.link(queue3)
    queue3.link(nvvidconv)
    nvvidconv.link(queue4)
    queue4.link(nvosd)
    if is_aarch64():
        nvosd.link(queue5)
        queue5.link(transform)
        transform.link(sink)
    else:
        nvosd.link(queue5)
        queue5.link(sink)   

I think it’s more or less the same as the deepstream test3 app, this whole pipeline works for the camera I’m using, however, I’m not sure why it gives me very low number of frame rates.

Thank you for your help.

1 Like

jpegdec is software decoder, it will be relatively slower than HW accelerated decoder.
Jetson has HW accelerated decoder.


https://docs.nvidia.com/metropolis/deepstream/dev-guide/index.html#page/DeepStream%20Plugins%20Development%20Guide/deepstream_plugin_details.html#wwpID0E0GR0HA