Unable to run inference pipeline using csi camera source in deepstream python app

Hi,
I can read directly from camera(gstreamer pipeline in command line), but am unable to run inference pipeline using deepstream python test 2 app, sample deepstream python test app 2 located here https://github.com/NVIDIA-AI-IOT/deepstream_python_apps/blob/master/apps/deepstream-test2/deepstream_test_2.py.

I am using custom pgie. Sample pgie config file was “dstest2_pgie_config.txt”.I am using nvarguscamerasrc to read from csi camera.

Gstreamer pipeline to test camera that works:

gst-launch-1.0 nvarguscamerasrc ! 'video/x-raw(memory:NVMM),width=1280, height=720, framerate=120/1, format=NV12' ! nvvidconv flip-method=0 ! 'video/x-raw,width=640, height=480' ! nvvidconv ! nvegltransform ! nveglglessink -e

The modified main function is as follows:

def main():
    # Check input arguments
    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)
    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    # Source element for reading from the file
    print("Creating Source \n ")
    source = Gst.ElementFactory.make("nvarguscamerasrc", "pi-cam-source")
    if not source:
        sys.stderr.write(" Unable to create Source \n")
    caps_picamsrc = Gst.ElementFactory.make("capsfilter", "picamsrc_caps")
    if not caps_picamsrc:
        sys.stderr.write(" Unable to create picamsrc capsfilter \n")
    print("Creating Video Converter \n")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create Nvvideoconvert \n")
    caps_nvvidconv1 = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
    if not caps_nvvidconv1:
        sys.stderr.write(" Unable to create capsfilter \n")
    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")
    # Use nvinfer to run inferencing on camera's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "converto2r")
    if not nvvidconv2:
        sys.stderr.write(" Unable to create nvvidconv \n")
    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    # Finally render the osd output
    if is_aarch64():
        transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")
    caps_picamsrc.set_property('caps', Gst.Caps.from_string('video/x-raw(memory:NVMM),width=1280, height=720, framerate=120/1, format=NV12'))
    caps_nvvidconv1.set_property('caps', Gst.Caps.from_string('video/x-raw,width=640, height=480'))
    streammux.set_property('width', 640)
    streammux.set_property('height', 480)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "detectnetv2_resnet10.txt")
    # Set sync = false to avoid late frame drops at the display-sink
    sink.set_property('sync', False)
    print("Adding elements to Pipeline \n")
    pipeline.add(source)
    pipeline.add(caps_picamsrc)
    pipeline.add(nvvidconv1)
    pipeline.add(caps_nvvidconv1)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(nvvidconv2)
    pipeline.add(nvosd)
    pipeline.add(sink)
    if is_aarch64():
        pipeline.add(transform)
    print("Linking elements in the Pipeline \n")
    source.link(caps_picamsrc)
    caps_picamsrc.link(nvvidconv1)
    nvvidconv1.link(caps_nvvidconv1)
    sinkpad = streammux.get_request_pad("sink_0")
    srcpad = caps_nvvidconv1.get_static_pad("src")
    srcpad.link(sinkpad)
    streammux.link(pgie)
    pgie.link(nvvidconv2)
    nvvidconv2.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)
    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")
    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    pipeline.set_state(Gst.State.NULL)

The error I receive is as follows:

Creating Pipeline
Creating Source
Creating Video Converter
Creating EGLSink
Adding elements to Pipeline
Linking elements in the Pipeline
Starting pipeline
Using winsys: x11
Creating LL OSD context new
Creating LL OSD context new
GST_ARGUS: Creating output stream
CONSUMER: Waiting until producer is connected...
GST_ARGUS: Available Sensor modes :
GST_ARGUS: 3264 x 2464 FR = 21.000000 fps Duration = 47619048 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 3264 x 1848 FR = 28.000001 fps Duration = 35714284 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1920 x 1080 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1280 x 720 FR = 59.999999 fps Duration = 16666667 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: 1280 x 720 FR = 120.000005 fps Duration = 8333333 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;
GST_ARGUS: Running with following settings:
   Camera index = 0
   Camera mode  = 4
   Output Stream W = 1280 H = 720
   seconds to Run    = 0
   Frame Rate = 120.000005
GST_ARGUS: PowerService: requested_clock_Hz=24192000
GST_ARGUS: Setup Complete, Starting captures for 0 seconds
GST_ARGUS: Starting repeat capture requests.
CONSUMER: Producer has connected; continuing.
Error: gst-stream-error-quark: Internal data stream error. (1): gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:pipeline0/GstNvArgusCameraSrc:pi-cam-source:
streaming stopped, reason error (-5)
GST_ARGUS: Cleaning up
(python3:19692): GStreamer-CRITICAL **: 18:53:09.834: gst_mini_object_set_qdata: assertion 'object != NULL' failed
CONSUMER: Done Success
GST_ARGUS: Done Success
GST_ARGUS:
PowerServiceHwVic::cleanupResources
1 Like

Hi
Sorry for late reply!
Could you run “export GST_DEBUG=*:4” before running your application to capture more debug log?
Another experiment you could try to narrow down the issue, add “fakesink” after caps_picamsrc, nvvidconv1 … nvosd respectively to find out which plugin cause this issue.

Thanks!

Below is a working main function. Please note that image flip is currently not supported. The nvvidconv plugin is deprecated and not fully compatible with other deepstream plugins due to buffer format differences. This will be addressed in an upcoming release of the SDK.

def main(args):
    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from the file
    print("Creating Source \n ")
    source = Gst.ElementFactory.make("nvarguscamerasrc", "src-elem")
    if not source:
        sys.stderr.write(" Unable to create Source \n")

    # Converter to scale the image
    nvvidconv_src = Gst.ElementFactory.make("nvvideoconvert", "convertor_src")
    if not nvvidconv_src:
        sys.stderr.write(" Unable to create nvvidconv_src \n")

    # Caps for NVMM and resolution scaling
    caps_nvvidconv_src = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
    if not caps_nvvidconv_src:
        sys.stderr.write(" Unable to create capsfilter \n")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine")
    if not sgie1:
        sys.stderr.write(" Unable to make sgie1 \n")

    sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine")
    if not sgie1:
        sys.stderr.write(" Unable to make sgie2 \n")

    sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine")
    if not sgie3:
        sys.stderr.write(" Unable to make sgie3 \n")

    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    # Finally render the osd output
    if is_aarch64():
        transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    source.set_property('bufapi-version', True)

    caps_nvvidconv_src.set_property('caps', Gst.Caps.from_string('video/x-raw(memory:NVMM), width=1280, height=720'))

    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    #Set properties of pgie and sgie
    pgie.set_property('config-file-path', "dstest2_pgie_config.txt")
    sgie1.set_property('config-file-path', "dstest2_sgie1_config.txt")
    sgie2.set_property('config-file-path', "dstest2_sgie2_config.txt")
    sgie3.set_property('config-file-path', "dstest2_sgie3_config.txt")

    #Set properties of tracker
    config = configparser.ConfigParser()
    config.read('dstest2_tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process' :
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)

    print("Adding elements to Pipeline \n")
    pipeline.add(source)
    pipeline.add(nvvidconv_src)
    pipeline.add(caps_nvvidconv_src)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(sgie1)
    pipeline.add(sgie2)
    pipeline.add(sgie3)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(sink)
    if is_aarch64():
        pipeline.add(transform)

    # we link the elements together
    print("Linking elements in the Pipeline \n")
    source.link(nvvidconv_src)
    nvvidconv_src.link(caps_nvvidconv_src)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux \n")
    srcpad = caps_nvvidconv_src.get_static_pad("src")
    if not srcpad:
        sys.stderr.write(" Unable to get source pad of source \n")
    srcpad.link(sinkpad)
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(sgie1)
    sgie1.link(sgie2)
    sgie2.link(sgie3)
    sgie3.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)


    # create and event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()

    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")
    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)


    print("Starting pipeline \n")

    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
      loop.run()
    except:
      pass

    # cleanup
    pipeline.set_state(Gst.State.NULL)
2 Likes

Thank you so much zhliunycm2 it really helped me.

@pritam, glad we can help. Thanks for using DeepStream.

Hey! Could you please help regarding CSI camera with deep stream. I am unable to flip camera, it is upset down. I know that when we check camera from terminal we just add ‘flip-method=0 !’. Do you know how to add this feature in deepstream python app. Thanks!

Hi 4ever_fun, flip is still not supported in DS at this moment. We will add it in a future release.

@zhliunycm2, I have tested your code in Jeston Nano with IMX219 camera. The problem is very serious frame drop. When I use the gst-launch-1.0 command, it works perfectly. I will open an extra issue about this problem.

Creating Pipeline

Creating Source

Creating Video Converter

Creating EGLSink

Unknown or legacy key specified ‘is-classifier’ for group [property]
Warn: ‘threshold’ parameter has been deprecated. Use ‘pre-cluster-threshold’ instead.
Adding elements to Pipeline

Linking elements in the Pipeline

Starting pipeline

Using winsys: x11
0:00:00.282837455 21633 0x33d3b400 INFO nvinfer gstnvinfer.cpp:619:gst_nvinfer_logger: NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::buildModel() <nvdsinfer_context_impl.cpp:1715> [UID = 1]: Trying to create engine from model files
Loading pre-trained weights…
Loading weights of yolov3-tiny complete!
Total Number of weights read : 8858734
Loading pre-trained weights…
Loading weights of yolov3-tiny complete!
Total Number of weights read : 8858734
Building Yolo network…
layer inp_size out_size weightPtr
(0) conv-bn-leaky 3 x 416 x 416 16 x 416 x 416 496
(1) maxpool 16 x 416 x 416 16 x 208 x 208 496
(2) conv-bn-leaky 16 x 208 x 208 32 x 208 x 208 5232
(3) maxpool 32 x 208 x 208 32 x 104 x 104 5232
(4) conv-bn-leaky 32 x 104 x 104 64 x 104 x 104 23920
(5) maxpool 64 x 104 x 104 64 x 52 x 52 23920
(6) conv-bn-leaky 64 x 52 x 52 128 x 52 x 52 98160
(7) maxpool 128 x 52 x 52 128 x 26 x 26 98160
(8) conv-bn-leaky 128 x 26 x 26 256 x 26 x 26 394096
(9) maxpool 256 x 26 x 26 256 x 13 x 13 394096
(10) conv-bn-leaky 256 x 13 x 13 512 x 13 x 13 1575792
(11) maxpool 512 x 13 x 13 512 x 13 x 13 1575792
(12) conv-bn-leaky 512 x 13 x 13 1024 x 13 x 13 6298480
(13) conv-bn-leaky 1024 x 13 x 13 256 x 13 x 13 6561648
(14) conv-bn-leaky 256 x 13 x 13 512 x 13 x 13 7743344
(15) conv-linear 512 x 13 x 13 255 x 13 x 13 7874159
(16) yolo 255 x 13 x 13 255 x 13 x 13 7874159
(17) route - 256 x 13 x 13 7874159
(18) conv-bn-leaky 256 x 13 x 13 128 x 13 x 13 7907439
INFO: [TRT]: mm1_19: broadcasting input0 to make tensors conform, dims(input0)=[1,26,13][NONE] dims(input1)=[128,13,13][NONE].
INFO: [TRT]: mm2_19: broadcasting input1 to make tensors conform, dims(input0)=[128,26,13][NONE] dims(input1)=[1,13,26][NONE].
(19) upsample 128 x 13 x 13 128 x 26 x 26 -
(20) route - 384 x 26 x 26 7907439
(21) conv-bn-leaky 384 x 26 x 26 256 x 26 x 26 8793199
(22) conv-linear 256 x 26 x 26 255 x 26 x 26 8858734
(23) yolo 255 x 26 x 26 255 x 26 x 26 8858734
Output yolo blob names :
yolo_17
yolo_24
Total number of yolo layers: 49
Building yolo network complete!
Building the TensorRT Engine…
INFO: [TRT]: mm1_19: broadcasting input0 to make tensors conform, dims(input0)=[1,26,13][NONE] dims(input1)=[128,13,13][NONE].
INFO: [TRT]: mm2_19: broadcasting input1 to make tensors conform, dims(input0)=[128,26,13][NONE] dims(input1)=[1,13,26][NONE].
INFO: [TRT]: Some tactics do not have sufficient workspace memory to run. Increasing workspace size may increase performance, please check verbose output.
INFO: [TRT]: Detected 1 inputs and 2 output network tensors.
Building complete!
0:01:14.648207057 21633 0x33d3b400 INFO nvinfer gstnvinfer.cpp:619:gst_nvinfer_logger: NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::buildModel() <nvdsinfer_context_impl.cpp:1748> [UID = 1]: serialize cuda engine to file: /opt/nvidia/deepstream/deepstream-5.0/sources/deepstream_python_apps/apps/model_b1_gpu0_fp32.engine successfully
INFO: [Implicit Engine Info]: layers num: 3
0 INPUT kFLOAT data 3x416x416
1 OUTPUT kFLOAT yolo_17 255x13x13
2 OUTPUT kFLOAT yolo_24 255x26x26

0:01:15.159124571 21633 0x33d3b400 INFO nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus: [UID 1]: Load new model:config_infer_primary_yoloV3_tiny.txt sucessfully
Error: gst-stream-error-quark: NvStreamMux does not suppport raw buffers. Use nvvideoconvert before NvStreamMux to convert to NVMM buffers (5): /dvs/git/dirty/git-master_linux/deepstream/sdk/src/gst-plugins/gst-nvmultistream/gstnvstreammux.c(954): gst_nvstreammux_sink_event (): /GstPipeline:pipeline0/GstNvStreamMux:Stream-muxer

(python3:21633): GStreamer-CRITICAL **: 12:24:04.703: gst_mini_object_set_qdata: assertion ‘object != NULL’ failed
GST_ARGUS: Creating output stream
CONSUMER: Waiting until producer is connected…
GST_ARGUS: Available Sensor modes :
GST_ARGUS: 3264 x 2464 FR = 21.000000 fps Duration = 47619048 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 3264 x 1848 FR = 28.000001 fps Duration = 35714284 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1920 x 1080 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1640 x 1232 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1280 x 720 FR = 59.999999 fps Duration = 16666667 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1280 x 720 FR = 120.000005 fps Duration = 8333333 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: Running with following settings:
Camera index = 0
Camera mode = 5
Output Stream W = 1280 H = 720
seconds to Run = 0
Frame Rate = 120.000005
GST_ARGUS: Setup Complete, Starting captures for 0 seconds
GST_ARGUS: Starting repeat capture requests.
CONSUMER: Producer has connected; continuing.
GST_ARGUS: Cleaning up
nvbuf_utils: dmabuf_fd -1 mapped entry NOT found
nvbuf_utils: Can not get HW buffer from FD… Exiting…
CONSUMER: Done Success
GST_ARGUS: Done Success
WARNING Argus: 2 client objects still exist during shutdown:
546870305856 (0x7f1c001bd8)
547570278064 (0x7f28001d60)