Internal data stream error while running CSI camera on deepstream-test 2 application

Hi,
I am running deepstream test-2 application on jetson nano and I made some changes in that for CSI camera but getting error like:

Creating Pipeline 
 
Creating Source 
 
Creating Video Converter 

Creating EGLSink 

Adding elements to Pipeline 

Linking elements in the Pipeline 

Starting pipeline 

Using winsys: x11 
Creating LL OSD context new
gstnvtracker: Loading low-level lib at /opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_klt.so
gstnvtracker: Optional NvMOT_RemoveStreams not implemented
gstnvtracker: Batch processing is OFF
Creating LL OSD context new
GST_ARGUS: Creating output stream
CONSUMER: Waiting until producer is connected...
GST_ARGUS: Available Sensor modes :
GST_ARGUS: 3264 x 2464 FR = 21.000000 fps Duration = 47619048 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 3264 x 1848 FR = 28.000001 fps Duration = 35714284 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1920 x 1080 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1280 x 720 FR = 59.999999 fps Duration = 16666667 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1280 x 720 FR = 120.000005 fps Duration = 8333333 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: Running with following settings:
   Camera index = 0 
   Camera mode  = 4 
   Output Stream W = 1280 H = 720 
   seconds to Run    = 0 
   Frame Rate = 120.000005 
GST_ARGUS: PowerService: requested_clock_Hz=24192000
GST_ARGUS: Setup Complete, Starting captures for 0 seconds
GST_ARGUS: Starting repeat capture requests.
CONSUMER: Producer has connected; continuing.
Error: gst-stream-error-quark: Internal data stream error. (1): gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:pipeline0/GstNvArgusCameraSrc:pi-cam-source:
streaming stopped, reason error (-5)
GST_ARGUS: Cleaning up
CONSUMER: Done Success
GST_ARGUS: Done Success
XIO:  fatal IO error 9 (Bad file descriptor) on X server "�G0"
      after 18 requests (18 known processed) with 0 events remaining.
GST_ARGUS: 
PowerServiceHwVic::cleanupResources

Code is :

def main(args):
    # Check input arguments
    # if len(args) != 2:
    #     sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
    #     sys.exit(1)

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from the file
    print("Creating Source \n ")
    source = Gst.ElementFactory.make("nvarguscamerasrc", "src_elem")
    if not source:
        sys.stderr.write(" Unable to create Source \n")

    caps_picamsrc = Gst.ElementFactory.make("capsfilter", "src_cap_filter")
    if not caps_picamsrc:
        sys.stderr.write(" Unable to create picamsrc capsfilter \n")

# Video convertor
    print("Creating Video Converter \n")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create Nvvideoconvert \n")
    
    caps_nvvidconv1 = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
    if not caps_nvvidconv1:
        sys.stderr.write(" Unable to create capsfilter \n")

# Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "converto2r")
    if not nvvidconv2:
        sys.stderr.write(" Unable to create nvvidconv \n")

    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    # Finally render the osd output
    if is_aarch64():
        transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")

print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nvoverlaysink", "sink_sub_bin_sink1")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    caps_picamsrc.set_property('caps', Gst.Caps.from_string('video/x-raw(memory:NVMM),width=1280, height=720, framerate=21/1, format=NV12'))
    caps_nvvidconv1.set_property('caps', Gst.Caps.from_string('video/x-raw,width=960, height=616'))

#print("Playing file %s " %args[1])
    #source.set_property('location', args[1])
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('batch-size', 32)
    streammux.set_property('batched-push-timeout', 4000000)

    #Set properties of pgie and sgie
    pgie.set_property('config-file-path', "Resnet_10_head.txt")

    #sink.set_property('sync', False)

#Set properties of tracker
    config = configparser.ConfigParser()
    config.read('dstest2_tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process' :
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)

    print("Adding elements to Pipeline \n")
    pipeline.add(source)
    pipeline.add(caps_picamsrc)
    pipeline.add(nvvidconv1)
    pipeline.add(caps_nvvidconv1)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(nvvidconv2)
    pipeline.add(nvosd)
    pipeline.add(sink)
    if is_aarch64():
        pipeline.add(transform)

    # we link the elements together
    # file-source -> h264-parser -> nvh264-decoder ->
    # nvinfer -> nvvidconv -> nvosd -> video-renderer

    print("Linking elements in the Pipeline \n")
    source.link(caps_picamsrc)
    caps_picamsrc.link(nvvidconv1)
    nvvidconv1.link(caps_nvvidconv1)
    sinkpad = streammux.get_request_pad("sink_0")
    srcpad = caps_nvvidconv1.get_static_pad("src")
    srcpad.link(sinkpad)
    streammux.link(pgie)
    pgie.link(tracker)

    tracker.link(nvvidconv2)
    nvvidconv2.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

# create and event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()

    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")
    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

print("Starting pipeline \n")
    
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
      loop.run()
    except:
      pass

    # cleanup
    pipeline.set_state(Gst.State.NULL)

if __name__ == '__main__':
    sys.exit(main(sys.argv))

please help me out

Hi
Sorry for a late reply, i see another post from you which can open the CSI camera, so seems you solve the problem? please let me know, if you still have problem, please update with your latest issues regarding this topic.

hello amycao,
I am still having the same issue I had just change the linking order in pipeline that’s why i was able to run camera but unable to get detection or unable to draw line.
Please help me out.

similar issue with https://devtalk.nvidia.com/default/topic/1071110/deepstream-sdk/unable-to-run-inference-pipeline-using-csi-camera-source-in-deepstream-python-app/post/5435300/#5435300, seems your issue solved, right?