Pyds.get_nvds_buf_surface segmenation fault

Description

Pyds.get_nvds_buf_surface segmenation fault & Fail to configure caps

Environment

TensorRT Version: tensorrt 7.2.3
GPU Type: GTX 1050ti
Nvidia Driver Version: 465.27
CUDA Version: 10.2
Operating System + Version: Ubuntu 18.04
Python Version (if applicable): python 3.6.
Deepstream Version: 5.0

Issue

When I call get_nvds_buf_surface with my python code, I got Segmentation fault (core dumped)
I think pipeline is right cause I tested on Jetson board.
Here is my pipeline

MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
TILED_OUTPUT_WIDTH = 1920
TILED_OUTPUT_HEIGHT = 1080
GST_CAPS_FEATURES_NVMM = "memory:NVMM"
OSD_PROCESS_MODE = 0
OSD_DISPLAY_TEXT = 1


def make_pipeline_components(args, number_sources, batch_size):
    pipeline_components = dict()

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")

    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    else:
        streammux.set_property('width', MUXER_OUTPUT_WIDTH)  # 1920)
        streammux.set_property('height', MUXER_OUTPUT_HEIGHT)  # 1080)
        streammux.set_property('batch-size', number_sources)
        streammux.set_property('batched-push-timeout', 4000000)

        # if is_live:
        #     print("Atleast one of the sources is live")
        #     streammux.set_property('live-source', 1)
        pipeline_components['streammux'] = streammux

    # make pgie component
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")

    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    else:
        pgie.set_property('config-file-path', "config/pgie_config.txt")
        pgie_batch_size = pgie.get_property("batch-size")
        #pgie.set_property('model-engine-file', f'model/yangsan.etlt_b{batch_size}_gpu0_fp16.engine')
        if pgie_batch_size != number_sources:
            print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
                  number_sources, " \n")
            pgie.set_property("batch-size", number_sources)
        pipeline_components['pgie'] = pgie

    
    print("Creating Tracker \n ")
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")

    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")
    else:
        config = configparser.ConfigParser()
        config.read('config/tracker_config.txt')
        config.sections()

        for key in config['tracker']:
            if key == 'tracker-width':
                tracker_width = config.getint('tracker', key)
                tracker.set_property('tracker-width', tracker_width)
            if key == 'tracker-height':
                tracker_height = config.getint('tracker', key)
                tracker.set_property('tracker-height', tracker_height)
            if key == 'gpu-id':
                tracker_gpu_id = config.getint('tracker', key)
                tracker.set_property('gpu_id', tracker_gpu_id)
            if key == 'll-lib-file':
                tracker_ll_lib_file = config.get('tracker', key)
                tracker.set_property('ll-lib-file', tracker_ll_lib_file)
            if key == 'll-config-file':
                tracker_ll_config_file = config.get('tracker', key)
                tracker.set_property('ll-config-file', tracker_ll_config_file)
            if key == 'enable-batch-process':
                tracker_enable_batch_process = config.getint('tracker', key)
                tracker.set_property('enable_batch_process', tracker_enable_batch_process)
        pipeline_components['tracker'] = tracker

    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    filter1.set_property("caps", caps1)
    pipeline_components['n1'] = nvvidconv1
    pipeline_components['f1'] = filter1

    # make tiler component
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")

    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    else:
        tiler_rows = int(math.sqrt(number_sources))
        tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
        tiler.set_property("rows", tiler_rows)
        tiler.set_property("columns", tiler_columns)
        tiler.set_property("width", TILED_OUTPUT_WIDTH)
        tiler.set_property("height", TILED_OUTPUT_HEIGHT)
        pipeline_components['tiler'] = tiler

    # make nvvidconv component
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")

    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    else:
        pipeline_components['nvvidconv'] = nvvidconv

    # make nvosd component
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    else:
        nvosd.set_property('process-mode', OSD_PROCESS_MODE)
        nvosd.set_property('display-text', OSD_DISPLAY_TEXT)
        pipeline_components['nvosd'] = nvosd

    if args.port is not None:
        # codec = "H264"
        bitrate = 4000000
        nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd")
        if not nvvidconv_postosd:
            sys.stderr.write(" Unable to create nvvidconv_postosd \n")

        pipeline_components['nvvidconv_postosd'] = nvvidconv_postosd

        # Create a caps filter
        caps = Gst.ElementFactory.make("capsfilter", "filter")
        caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))

        pipeline_components['caps'] = caps

        encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
        print("Creating H264 Encoder")

        if not encoder:
            sys.stderr.write(" Unable to create encoder")
        encoder.set_property('bitrate', bitrate)
        if is_aarch64():
            encoder.set_property('preset-level', 1)
            encoder.set_property('insert-sps-pps', 1)
            encoder.set_property('bufapi-version', 1)

        pipeline_components['encoder'] = encoder
        rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay")
        print("Creating H264 rtppay")
        if not rtppay:
            sys.stderr.write(" Unable to create rtppay")

        pipeline_components['rtppay'] = rtppay

    if is_aarch64():
        # make transform component
        print("Creating transform \n ")
        transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")

        if not transform:
            sys.stderr.write(" Unable to create transform \n")
        else:
            pipeline_components['transform'] = transform

    
    # Make the UDP sink
    updsink_port_num = args.port
    sink = Gst.ElementFactory.make("udpsink", "udpsink")
    if not sink:
        sys.stderr.write(" Unable to create udpsink")

    sink.set_property('host', '224.224.255.255')
    sink.set_property('port', updsink_port_num)
    sink.set_property('async', False)
    sink.set_property('sync', 0)

    sink.set_property("qos", 0) 
    pipeline_components['sink'] = sink

    print("Creating Analytics - line crossing")
    analytics = Gst.ElementFactory.make("nvdsanalytics", "analytics")

    if not analytics:
        sys.stderr.write(" Unable to create analytics \n")
    else:
        analytics.set_property('config-file', args.analytics)
        pipeline_components['analytics'] = analytics

    return pipeline_components


def make_pipeline(args, pipeline_components, number_sources, input_source):
    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    pipeline.add(pipeline_components['streammux'])

    # source pipeline
    for i in range(number_sources):
        print("Creating source_bin ", i, " \n ")
        uri_name = input_source[i]
        # if uri_name.find("rtsp://") == 0:
        #     is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = pipeline_components['streammux'].get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)

    print("Adding elements to Pipeline \n")
    pipeline.add(pipeline_components['pgie'])
    pipeline.add(pipeline_components['tracker'])
    pipeline.add(pipeline_components['analytics'])
    pipeline.add(pipeline_components['n1'])
    pipeline.add(pipeline_components['f1'])
    pipeline.add(pipeline_components['tiler'])
    pipeline.add(pipeline_components['nvvidconv'])
    pipeline.add(pipeline_components['nvosd'])
    
    pipeline.add(pipeline_components['nvvidconv_postosd'])
    pipeline.add(pipeline_components['caps'])
    pipeline.add(pipeline_components['encoder'])
    pipeline.add(pipeline_components['rtppay'])

    if is_aarch64():
        pipeline.add(pipeline_components['transform'])

    pipeline.add(pipeline_components['sink'])

    return pipeline


def link_components(args, pipeline_components):
    print("Linking elements in the Pipeline \n")
    pipeline_components['streammux'].link(pipeline_components['pgie'])

    pipeline_components['pgie'].link(pipeline_components['tracker'])
    pipeline_components['tracker'].link(pipeline_components['n1'])
    pipeline_components['n1'].link(pipeline_components['f1'])
    pipeline_components['f1'].link(pipeline_components['analytics'])


    pipeline_components['analytics'].link(pipeline_components['tiler'])

    
    pipeline_components['tiler'].link(pipeline_components['nvvidconv'])
    pipeline_components['nvvidconv'].link(pipeline_components['nvosd'])

    
    pipeline_components['nvosd'].link(pipeline_components['nvvidconv_postosd'])
    pipeline_components['nvvidconv_postosd'].link(pipeline_components['caps'])
    pipeline_components['caps'].link(pipeline_components['encoder'])
    pipeline_components['encoder'].link(pipeline_components['rtppay'])
    pipeline_components['rtppay'].link(pipeline_components['sink'])

    return pipeline_components

I’m trying to inference on NGC ds-5.0 container for dgpu (tag-5.0.1-20.09-base)
Also, Video doesn’t display anything.
Here is my GST_DEBUG=3 error script

Creating Pgie 
 
Warning: 'input-dims' parameter has been deprecated. Use 'infer-dims' instead.
Warn: 'threshold' parameter has been deprecated. Use 'pre-cluster-threshold' instead.
Creating Tracker 
 
Creating tiler 
 
Creating nvvidconv 
 
Creating nvosd 
 
Creating EGLSink 

Creating Analytics - line crossing
Creating Pipeline 
 
Creating streamux 
 
Creating source_bin  0  
 
Creating source bin
source-bin-00
Adding elements to Pipeline 

Linking elements in the Pipeline 

['adult', 'child']
Now playing...
0 :  file:///workspace/its-final/daycare2.mp4
Starting pipeline 

gstnvtracker: Loading low-level lib at /opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so
gstnvtracker: Batch processing is ON
gstnvtracker: Past frame output is OFF
[NvDCF] Initialized
WARNING: ../nvdsinfer/nvdsinfer_func_utils.cpp:36 [TRT]: Current optimization profile is: 0. Please ensure there are no enqueued operations pending in this context prior to switching profiles
0:00:01.337936877  5392      0xc3b84f0 INFO                 nvinfer gstnvinfer.cpp:619:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1701> [UID = 1]: deserialized trt engine from :/workspace/its-final/model/yangsan.etlt_b1_gpu0_fp32.engine
INFO: ../nvdsinfer/nvdsinfer_model_builder.cpp:685 [Implicit Engine Info]: layers num: 3
0   INPUT  kFLOAT input_1         3x544x960       
1   OUTPUT kFLOAT output_bbox/BiasAdd 8x34x60         
2   OUTPUT kFLOAT output_cov/Sigmoid 2x34x60         

0:00:01.338012528  5392      0xc3b84f0 INFO                 nvinfer gstnvinfer.cpp:619:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:1805> [UID = 1]: Use deserialized engine model: /workspace/its-final/model/yangsan.etlt_b1_gpu0_fp32.engine
0:00:01.338882939  5392      0xc3b84f0 INFO                 nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:config/pgie_config.txt sucessfully
Decodebin child added: urisourcebin0 

0:00:01.340004639  5392      0xc3b84f0 WARN                 basesrc gstbasesrc.c:3583:gst_base_src_start_complete:<source> pad not activated yet
0:00:01.340174012  5392      0xc3b84f0 WARN                 basesrc gstbasesrc.c:3583:gst_base_src_start_complete:<source> pad not activated yet
0:00:01.344189070  5392 0x7f58780505e0 WARN                 qtdemux qtdemux_types.c:233:qtdemux_type_get: unknown QuickTime node type pasp
0:00:01.344207680  5392 0x7f58780505e0 WARN                 qtdemux qtdemux_types.c:233:qtdemux_type_get: unknown QuickTime node type sgpd
0:00:01.344215490  5392 0x7f58780505e0 WARN                 qtdemux qtdemux_types.c:233:qtdemux_type_get: unknown QuickTime node type sbgp
0:00:01.344221109  5392 0x7f58780505e0 WARN                 qtdemux qtdemux_types.c:233:qtdemux_type_get: unknown QuickTime node type keys
0:00:01.344226083  5392 0x7f58780505e0 WARN                 qtdemux qtdemux_types.c:233:qtdemux_type_get: unknown QuickTime node type ....
0:00:01.344237756  5392 0x7f58780505e0 WARN                 qtdemux qtdemux.c:3031:qtdemux_parse_trex:<qtdemux0> failed to find fragment defaults for stream 1
0:00:01.344274070  5392 0x7f58780505e0 WARN                 qtdemux qtdemux.c:3031:qtdemux_parse_trex:<qtdemux0> failed to find fragment defaults for stream 2
0:00:01.345534476  5392 0x7f58780505e0 FIXME             decodebin3 gstdecodebin3.c:1098:update_requested_selection:<decodebin3-0> Implement EXPOSE_ALL_MODE
0:00:01.346261108  5392 0x7f58780505e0 FIXME             decodebin3 gstdecodebin3-parse.c:420:parsebin_buffer_probe:<decodebin3-0> Re-use existing input streams if/when possible
0:00:01.346474594  5392 0x7f5878057e80 FIXME             decodebin3 gstdecodebin3.c:1450:get_output_for_slot:<decodebin3-0> emit autoplug-continue
0:00:01.346490081  5392 0x7f5878057e80 FIXME             decodebin3 gstdecodebin3.c:1453:get_output_for_slot:<decodebin3-0> Handle EXPOSE_ALL_MODE
In cb_newpad

video/x-raw(memory:NVMM), width=(int)[ 1, 2147483647 ], height=(int)[ 1, 2147483647 ], framerate=(fraction)[ 0/1, 2147483647/1 ]
0:00:01.348362658  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:3035:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.348374648  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:2921:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe minimum capture size for pixelformat MJPG
0:00:01.348380264  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:3035:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.348385054  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:2927:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe maximum capture size for pixelformat MJPG
0:00:01.348414076  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:3035:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.348435276  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:2921:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe minimum capture size for pixelformat MPG4
0:00:01.348442623  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:3035:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.348463225  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:2927:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe maximum capture size for pixelformat MPG4
0:00:01.348474004  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:3035:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.348478884  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:2921:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe minimum capture size for pixelformat H265
0:00:01.348487101  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:3035:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.348495061  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:2927:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe maximum capture size for pixelformat H265
0:00:01.348513249  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:3035:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.348521896  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:2921:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe minimum capture size for pixelformat H264
0:00:01.348527457  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:3035:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.348535564  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:2927:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe maximum capture size for pixelformat H264
0:00:01.348771168  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:3035:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:src> Unable to try format: Unknown error -1
0:00:01.348780970  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:2921:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:src> Could not probe minimum capture size for pixelformat NM12
0:00:01.348786774  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:3035:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:src> Unable to try format: Unknown error -1
0:00:01.348795512  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:2927:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:src> Could not probe maximum capture size for pixelformat NM12
0:00:01.348807053  5392 0x7f5878057e80 WARN                    v4l2 gstv4l2object.c:2372:gst_v4l2_object_add_interlace_mode:0x7f5870010c50 Failed to determine interlace mode
0:00:01.348889803  5392 0x7f5878057de0 FIXME             decodebin3 gstdecodebin3.c:1450:get_output_for_slot:<decodebin3-0> emit autoplug-continue
0:00:01.348915372  5392 0x7f5878057de0 FIXME             decodebin3 gstdecodebin3.c:1453:get_output_for_slot:<decodebin3-0> Handle EXPOSE_ALL_MODE
In cb_newpad

audio/x-raw, layout=(string)interleaved, format=(string)F32LE
0:00:01.477570339  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

0:00:01.477663126  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

0:00:01.477769888  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

0:00:01.477798621  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

0:00:01.477895257  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

0:00:01.477935599  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

0:00:01.478030313  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

0:00:01.478056919  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

0:00:01.478139761  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

0:00:01.478184755  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

0:00:01.478275657  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

0:00:01.478302765  5392 0x7f5878057e80 ERROR                   v4l2 gstv4l2object.c:2074:gst_v4l2_object_get_interlace_mode: Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git

cuGraphicsGLRegisterBuffer failed with error(219) gst_eglglessink_cuda_init texture = 1
0:00:01.599539127  5392      0x3235000 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2784:gst_eglglessink_configure_caps:<nvvideo-renderer> Cuda Init failed
0:00:01.599550928  5392      0x3235000 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2795:gst_eglglessink_configure_caps:<nvvideo-renderer> Configuring caps failed
0:00:01.599597273  5392 0x7f5878057e80 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.599649779  5392 0x7f5878057e80 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.599657083  5392 0x7f5878057e80 WARN                GST_PADS gstpad.c:4226:gst_pad_peer_query:<onscreendisplay:src> could not send sticky events
0:00:01.600570133  5392 0x7f5878057e80 WARN            v4l2videodec gstv4l2videodec.c:1614:gst_v4l2_video_dec_decide_allocation:<nvv4l2decoder0> Duration invalid, not setting latency
0:00:01.600600524  5392 0x7f5878057e80 WARN          v4l2bufferpool gstv4l2bufferpool.c:1057:gst_v4l2_buffer_pool_start:<nvv4l2decoder0:pool:src> Uncertain or not enough buffers, enabling copy threshold
0:00:01.600807432  5392 0x7f5870021850 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.600994738  5392 0x7f5870021a30 WARN          v4l2bufferpool gstv4l2bufferpool.c:1503:gst_v4l2_buffer_pool_dqbuf:<nvv4l2decoder0:pool:src> Driver should never set v4l2_buffer.field to ANY
0:00:01.601123774  5392 0x7f5870021a30 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.601149402  5392 0x7f5870021a30 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.601172751  5392 0x7f5870021a30 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.601242332  5392 0x7f5870021850 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.601258790  5392 0x7f5870021850 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.601267289  5392 0x7f5870021850 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.601344682  5392 0x7f5870021850 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.601359348  5392 0x7f5870021850 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.601366274  5392 0x7f5870021850 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.769412728  5392      0x3234e80 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.774342712  5392      0x3234e80 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.779613618  5392      0x3234e80 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.799766918  5392      0x3234e80 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
0:00:01.814857378  5392      0x3234e80 ERROR          nveglglessink ext/eglgles/gsteglglessink.c:2840:gst_eglglessink_setcaps:<nvvideo-renderer> Failed to configure caps
Segmentation fault (core dumped) <- At This time called pyds.get_nvds_buf_surface()

Hi, This looks like a deepstream issue. We recommend you to raise it to the respective forum.

Thanks!