Deepstream-imagedata-multistream.py (Segmentation Fault)

I am getting segmentation fault after loading first frames of both videos sources and i am using jetson orin nano

deepstream-imagedata-multistream.py:

import sys

sys.path.append('../')
import gi
import configparser

gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
from ctypes import *
import time
import sys
import math
import platform
from common.platform_info import PlatformInfo
from common.bus_call import bus_call
from common.FPS import PERF_DATA
import numpy as np
import pyds
import cv2
import os
import os.path
from os import path

perf_data = None
frame_count = {}
saved_count = {}
global PGIE_CLASS_ID_VEHICLE
PGIE_CLASS_ID_VEHICLE = 0
global PGIE_CLASS_ID_PERSON
PGIE_CLASS_ID_PERSON = 2

MAX_DISPLAY_LEN = 64
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
MUXER_BATCH_TIMEOUT_USEC = 33000
TILED_OUTPUT_WIDTH = 1920
TILED_OUTPUT_HEIGHT = 1080
GST_CAPS_FEATURES_NVMM = "memory:NVMM"
pgie_classes_str = ["Vehicle", "TwoWheeler", "Person", "RoadSign"]

MIN_CONFIDENCE = 0.3
MAX_CONFIDENCE = 0.4

# tiler_sink_pad_buffer_probe  will extract metadata received on tiler src pad
# and update params for drawing rectangle, object information etc.
def tiler_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
        }
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            # Periodically check for objects with borderline confidence value that may be false positive detections.
            # If such detections are found, annotate the frame with bboxes and confidence value.
            # Save the annotated frame to file.
            if saved_count["stream_{}".format(frame_meta.pad_index)] % 30 == 0 and (
                    MIN_CONFIDENCE < obj_meta.confidence < MAX_CONFIDENCE):
                if is_first_obj:
                    is_first_obj = False
                    # Getting Image data using nvbufsurface
                    # the input should be address of buffer and batch_id
                    n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
                    n_frame = draw_bounding_boxes(n_frame, obj_meta, obj_meta.confidence)
                    # convert python array into numpy array format in the copy mode.
                    frame_copy = np.array(n_frame, copy=True, order='C')
                    # convert the array into cv2 default color format
                    frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA)
                    if platform_info.is_integrated_gpu():
                        # If Jetson, since the buffer is mapped to CPU for retrieval, it must also be unmapped 
                        pyds.unmap_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) # The unmap call should be made after operations with the original array are complete.
                                                                                            #  The original array cannot be accessed after this call.

                save_image = True

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        print("Frame Number=", frame_number, "Number of Objects=", num_rects, "Vehicle_count=",
              obj_counter[PGIE_CLASS_ID_VEHICLE], "Person_count=", obj_counter[PGIE_CLASS_ID_PERSON])
        # update frame rate through this probe
        stream_index = "stream{0}".format(frame_meta.pad_index)
        global perf_data
        perf_data.update_fps(stream_index)
        if save_image:
            img_path = "{}/stream_{}/frame_{}.jpg".format(folder_name, frame_meta.pad_index, frame_number)
            cv2.imwrite(img_path, frame_copy)
        saved_count["stream_{}".format(frame_meta.pad_index)] += 1
        print(saved_count)
        try:
            l_frame = l_frame.next
            print("l_frame: ",l_frame)
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK


def draw_bounding_boxes(image, obj_meta, confidence):
    confidence = '{0:.2f}'.format(confidence)
    rect_params = obj_meta.rect_params
    top = int(rect_params.top)
    left = int(rect_params.left)
    width = int(rect_params.width)
    height = int(rect_params.height)
    obj_name = pgie_classes_str[obj_meta.class_id]
    # image = cv2.rectangle(image, (left, top), (left + width, top + height), (0, 0, 255, 0), 2, cv2.LINE_4)
    color = (0, 0, 255, 0)
    w_percents = int(width * 0.05) if width > 100 else int(width * 0.1)
    h_percents = int(height * 0.05) if height > 100 else int(height * 0.1)
    linetop_c1 = (left + w_percents, top)
    linetop_c2 = (left + width - w_percents, top)
    image = cv2.line(image, linetop_c1, linetop_c2, color, 6)
    linebot_c1 = (left + w_percents, top + height)
    linebot_c2 = (left + width - w_percents, top + height)
    image = cv2.line(image, linebot_c1, linebot_c2, color, 6)
    lineleft_c1 = (left, top + h_percents)
    lineleft_c2 = (left, top + height - h_percents)
    image = cv2.line(image, lineleft_c1, lineleft_c2, color, 6)
    lineright_c1 = (left + width, top + h_percents)
    lineright_c2 = (left + width, top + height - h_percents)
    image = cv2.line(image, lineright_c1, lineright_c2, color, 6)
    # Note that on some systems cv2.putText erroneously draws horizontal lines across the image
    image = cv2.putText(image, obj_name + ',C=' + str(confidence), (left - 10, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (0, 0, 255, 0), 2)
    return image


def cb_newpad(decodebin, decoder_src_pad, data):
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)

    # Need to check if the pad created by the decodebin is for video and not
    # audio.
    if (gstname.find("video") != -1):
        # Link the decodebin pad only if decodebin has picked nvidia
        # decoder plugin nvdec_*. We do this by checking if the pad caps contain
        # NVMM memory features.
        if features.contains("memory:NVMM"):
            # Get the source bin ghost pad
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")


def decodebin_child_added(child_proxy, Object, name, user_data):
    print("Decodebin child added:", name, "\n")
    if name.find("decodebin") != -1:
        Object.connect("child-added", decodebin_child_added, user_data)

    if not platform_info.is_integrated_gpu() and name.find("nvv4l2decoder") != -1:
        # Use CUDA unified memory in the pipeline so frames can be easily accessed on CPU in Python.
        # 0: NVBUF_MEM_CUDA_DEVICE, 1: NVBUF_MEM_CUDA_PINNED, 2: NVBUF_MEM_CUDA_UNIFIED
        # Dont use direct macro here like NVBUF_MEM_CUDA_UNIFIED since nvv4l2decoder uses a
        # different enum internally
        Object.set_property("cudadec-memtype", 2)

    if "source" in name:
        source_element = child_proxy.get_by_name("source")
        if source_element.find_property('drop-on-latency') != None:
            Object.set_property("drop-on-latency", True)

def create_source_bin(index, uri):
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    # Source element for reading from the uri.
    # We will use decodebin and let it figure out the container format of the
    # stream and the codec and plug the appropriate demux and decode plugins.
    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    # We set the input uri to the source element
    uri_decode_bin.set_property("uri", uri)
    # Connect to the "pad-added" signal of the decodebin which generates a
    # callback once a new pad for raw data has beed created by the decodebin
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)

    # We need to create a ghost pad for the source bin which will act as a proxy
    # for the video decoder src pad. The ghost pad will not have a target right
    # now. Once the decode bin creates the video decoder and generates the
    # cb_newpad callback, we will set the ghost pad target to the video decoder
    # src pad.
    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin


def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" % args[0])
        sys.exit(1)

    global perf_data
    perf_data = PERF_DATA(len(args) - 2)
    number_sources = len(args) - 2

    global folder_name
    folder_name = args[-1]
    if path.exists(folder_name):
        sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name)
        sys.exit(1)

    os.mkdir(folder_name)
    print("Frames will be saved in ", folder_name)
    global platform_info
    platform_info = PlatformInfo()
    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        os.mkdir(folder_name + "/stream_" + str(i))
        frame_count["stream_" + str(i)] = 0
        saved_count["stream_" + str(i)] = 0
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i + 1]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.request_pad_simple(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if True:
        sink = Gst.ElementFactory.make("fakesink","fakesink")
    elif platform_info.is_integrated_gpu():
        print("Creating nv3dsink \n")
        sink = Gst.ElementFactory.make("fakesink", "fakesink")
        # sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink")
        if not sink:
            sys.stderr.write(" Unable to create nv3dsink \n")
    else:
        if platform_info.is_platform_aarch64():
            print("Creating nv3dsink \n")
            sink = Gst.ElementFactory.make("fakesink", "fakesink")

            # sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink")
        else:
            print("Creating EGLSink \n")
            sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
        if not sink:
            sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', MUXER_BATCH_TIMEOUT_USEC)
    pgie.set_property('config-file-path', "dstest_imagedata_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
              number_sources, " \n")
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    sink.set_property("sync", 0)
    sink.set_property("qos", 0)


    if not platform_info.is_integrated_gpu():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        if platform_info.is_wsl():
            #opencv functions like cv2.line and cv2.putText is not able to access NVBUF_MEM_CUDA_UNIFIED memory
            #in WSL systems due to some reason and gives SEGFAULT. Use NVBUF_MEM_CUDA_PINNED memory for such
            #usecases in WSL. Here, nvvidconv1's buffer is used in tiler sink pad probe and cv2 operations are
            #done on that.
            print("using nvbuf_mem_cuda_pinned memory for nvvidconv1\n")
            vc_mem_type = int(pyds.NVBUF_MEM_CUDA_PINNED)
            nvvidconv1.set_property("nvbuf-memory-type", vc_mem_type)
        else:
            nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    tiler_sink_pad = tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)
        # perf callback function to print fps every 5 sec
        GLib.timeout_add(5000, perf_data.perf_print_callback)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args[:-1]):
        if i != 0:
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)


if __name__ == '__main__':
    sys.exit(main(sys.argv))

Terminal:

velens@ubuntu:/opt/nvidia/deepstream/deepstream-6.4/sources/deepstream_python_apps/apps/deepstream-imagedata-multistream$ sudo python3 deepstream_imagedata-multistream.py file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/yoga.mp4 file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/fisheye_dist.mp4 new_frames
Frames will be saved in  new_frames
Creating Pipeline 
 
Creating streamux 
 
Creating source_bin  0  
 
Creating source bin
source-bin-00
Creating source_bin  1  
 
Creating source bin
source-bin-01
Creating Pgie 
 
Creating nvvidconv1 
 
Creating filter1 
 
Creating tiler 
 
Creating nvvidconv 
 
Creating nvosd 
 
Is it Integrated GPU? : 1
Adding elements to Pipeline 

Linking elements in the Pipeline 

Now playing...
1 :  file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/yoga.mp4
2 :  file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/fisheye_dist.mp4
Starting pipeline 

0:00:06.389595934  7804 0xaaab56512310 INFO                 nvinfer gstnvinfer.cpp:682:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:2092> [UID = 1]: deserialized trt engine from :/opt/nvidia/deepstream/deepstream-6.4/samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b2_gpu0_int8.engine
INFO: [Implicit Engine Info]: layers num: 3
0   INPUT  kFLOAT input_1         3x544x960       
1   OUTPUT kFLOAT output_bbox/BiasAdd 16x34x60        
2   OUTPUT kFLOAT output_cov/Sigmoid 4x34x60         

0:00:06.784423949  7804 0xaaab56512310 INFO                 nvinfer gstnvinfer.cpp:682:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2195> [UID = 1]: Use deserialized engine model: /opt/nvidia/deepstream/deepstream-6.4/samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b2_gpu0_int8.engine
0:00:06.800524542  7804 0xaaab56512310 INFO                 nvinfer gstnvinfer_impl.cpp:328:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:dstest_imagedata_config.txt sucessfully
Decodebin child added: source 

Decodebin child added: decodebin0 

Decodebin child added: source 

Decodebin child added: decodebin1 


**PERF:  {'stream0': 0.0, 'stream1': 0.0} 

Decodebin child added: qtdemux0 

Decodebin child added: qtdemux1 

Decodebin child added: multiqueue1 

Decodebin child added: multiqueue0 

Decodebin child added: h264parse0 

Decodebin child added: h264parse1 

Decodebin child added: capsfilter1 

Decodebin child added: capsfilter0 

Decodebin child added: aacparse0 

Decodebin child added: nvv4l2decoder0 

Decodebin child added: avdec_aac0 

Opening in BLOCKING MODE 
Decodebin child added: nvv4l2decoder1 

NvMMLiteOpen : Block : BlockType = 261 
NvMMLiteBlockCreate : Block : BlockType = 261 
Opening in BLOCKING MODE 
NvMMLiteOpen : Block : BlockType = 261 
NvMMLiteBlockCreate : Block : BlockType = 261 
In cb_newpad

In cb_newpad

In cb_newpad

Frame Number= 0 Number of Objects= 4 Vehicle_count= 4 Person_count= 0
{'stream_0': 0, 'stream_1': 1}
l_frame:  None
Segmentation fault

Could you use gdb tool to run that and attach the stack information?

$ gdb --args <your command>
$ bt

Here I am attaching the gdb output, but it shows nothing

velens@ubuntu:/opt/nvidia/deepstream/deepstream-6.4/sources/deepstream_python_apps/apps/deepstream-imagedata-multistream$ sudo gdb --args python deepstream_imagedata-multistream.py file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/sample_720p.h264  file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/sample_720p.h264 new_frames
GNU gdb (Ubuntu 12.1-0ubuntu1~22.04) 12.1
Copyright (C) 2022 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Type "show copying" and "show warranty" for details.
This GDB was configured as "aarch64-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<https://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
    <http://www.gnu.org/software/gdb/documentation/>.

For help, type "help".
Type "apropos word" to search for commands related to "word"...
Reading symbols from python...
(No debugging symbols found in python)
(gdb) bt
No stack.
(gdb) 

Sorry, I missed one step. You can check if the program runs when you use the r command.

$ gdb --args <your command>
$ r
$ bt
velens@ubuntu:/opt/nvidia/deepstream/deepstream-6.4/sources/deepstream_python_apps/apps/deepstream-imagedata-multistream$ sudo gdb --args python deepstream_imagedata-multistream.py file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/sample_720p.mp4 file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/sample_1080p_h264.mp4 new_frames
GNU gdb (Ubuntu 12.1-0ubuntu1~22.04) 12.1
Copyright (C) 2022 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Type "show copying" and "show warranty" for details.
This GDB was configured as "aarch64-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<https://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
    <http://www.gnu.org/software/gdb/documentation/>.

For help, type "help".
Type "apropos word" to search for commands related to "word"...
Reading symbols from python...
(No debugging symbols found in python)
(gdb) r
Starting program: /usr/bin/python deepstream_imagedata-multistream.py file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/sample_720p.mp4 file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/sample_1080p_h264.mp4 new_frames
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/lib/aarch64-linux-gnu/libthread_db.so.1".
[New Thread 0xfffff21cf120 (LWP 8147)]
[New Thread 0xfffff19bf120 (LWP 8148)]
[New Thread 0xffffef1af120 (LWP 8149)]
[New Thread 0xffffec99f120 (LWP 8150)]
[New Thread 0xffffea18f120 (LWP 8151)]
Frames will be saved in  new_frames
Creating Pipeline 
 
Creating streamux 
 
[New Thread 0xffffd463a120 (LWP 8153)]
Creating source_bin  0  
 
Creating source bin
source-bin-00
Creating source_bin  1  
 
Creating source bin
source-bin-01
Creating Pgie 
 
Creating nvvidconv1 
 
Creating filter1 
 
Creating tiler 
 
Creating nvvidconv 
 
Creating nvosd 
 
Is it Integrated GPU? : 1
Creating nv3dsink 

Adding elements to Pipeline 

Linking elements in the Pipeline 

Now playing...
1 :  file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/sample_720p.mp4
2 :  file:///opt/nvidia/deepstream/deepstream-6.4/samples/streams/sample_1080p_h264.mp4
Starting pipeline 

[New Thread 0xffff91ccf120 (LWP 8154)]
[New Thread 0xffff91282120 (LWP 8155)]
WARNING: [TRT]: Using an engine plan file across different models of devices is not recommended and is likely to affect performance or even cause errors.
0:00:07.606734879  8145 0xaaaaf6ab9940 INFO                 nvinfer gstnvinfer.cpp:682:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:2092> [UID = 1]: deserialized trt engine from :/opt/nvidia/deepstream/deepstream-6.4/samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b2_gpu0_int8.engine
INFO: [Implicit Engine Info]: layers num: 3
0   INPUT  kFLOAT input_1         3x544x960       
1   OUTPUT kFLOAT output_bbox/BiasAdd 16x34x60        
2   OUTPUT kFLOAT output_cov/Sigmoid 4x34x60         

0:00:08.003457148  8145 0xaaaaf6ab9940 INFO                 nvinfer gstnvinfer.cpp:682:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2195> [UID = 1]: Use deserialized engine model: /opt/nvidia/deepstream/deepstream-6.4/samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b2_gpu0_int8.engine
[New Thread 0xffff77fff120 (LWP 8156)]
[New Thread 0xffff777ef120 (LWP 8157)]
[New Thread 0xffff76fdf120 (LWP 8158)]
0:00:08.074142194  8145 0xaaaaf6ab9940 INFO                 nvinfer gstnvinfer_impl.cpp:328:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:dstest_imagedata_config.txt sucessfully
Decodebin child added: source 

Decodebin child added: decodebin0 

[New Thread 0xffff767cf120 (LWP 8159)]
[New Thread 0xffff75fbf120 (LWP 8160)]
Decodebin child added: source 

Decodebin child added: decodebin1 

[New Thread 0xffff757af120 (LWP 8161)]

**PERF:  {'stream0': 0.0, 'stream1': 0.0} 

Decodebin child added: qtdemux0 
Decodebin child added: qtdemux1 


[New Thread 0xffff74f9f120 (LWP 8162)]
[New Thread 0xffff5ffff120 (LWP 8163)]
Decodebin child added: multiqueue0 

Decodebin child added: multiqueue1 

[New Thread 0xffff5f7ef120 (LWP 8164)]
[New Thread 0xffff5efdf120 (LWP 8165)]
Decodebin child added: h264parse0 

Decodebin child added: h264parse1 

Decodebin child added: capsfilter1 

Decodebin child added: capsfilter0 

[New Thread 0xffff5e7cf120 (LWP 8166)]
[New Thread 0xffff5dfbf120 (LWP 8167)]
Decodebin child added: aacparse1 

Decodebin child added: aacparse0 

Decodebin child added: avdec_aac0 

Decodebin child added: avdec_aac1 

[Detaching after vfork from child process 8168]
[Detaching after vfork from child process 8171]
Decodebin child added: nvv4l2decoder0 
Decodebin child added: nvv4l2decoder1 


Opening in BLOCKING MODE 
NvMMLiteOpen : Block : BlockType = 261 
Opening in BLOCKING MODE 
[New Thread 0xffff43476120 (LWP 8174)]
NvMMLiteOpen : Block : BlockType = 261 
[New Thread 0xffff42c66120 (LWP 8175)]
[New Thread 0xffff42456120 (LWP 8176)]
NvMMLiteBlockCreate : Block : BlockType = 261 
[New Thread 0xffff4163d120 (LWP 8177)]
[New Thread 0xffff40e2d120 (LWP 8178)]
[New Thread 0xffff4061d120 (LWP 8179)]
[New Thread 0xffff3fe0d120 (LWP 8180)]
NvMMLiteBlockCreate : Block : BlockType = 261 
[New Thread 0xffff3f5fd120 (LWP 8181)]
In cb_newpad

In cb_newpad

In cb_newpad

In cb_newpad

[New Thread 0xffff3eb6f120 (LWP 8182)]
[New Thread 0xffff3e35f120 (LWP 8183)]
[New Thread 0xffff3db4f120 (LWP 8184)]
[New Thread 0xffff3cc2f120 (LWP 8187)]
[New Thread 0xffff1f4fc120 (LWP 8189)]
[New Thread 0xffff1ecdc120 (LWP 8191)]
[New Thread 0xffff1f0ec120 (LWP 8190)]
[New Thread 0xffff1e8cc120 (LWP 8192)]
[New Thread 0xffff1e4bc120 (LWP 8193)]
Frame Number= 0 Number of Objects= 13 Vehicle_count= 8 Person_count= 5
Frame Number= 0 Number of Objects= 14 Vehicle_count= 9 Person_count= 5

Thread 10 "python" received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0xffff77fff120 (LWP 8156)]
0x0000ffffd8e9779c in NvBufSurfTransformAsync () from /usr/lib/aarch64-linux-gnu/nvidia/libnvbufsurftransform.so.1.0.0
(gdb) bt
#0  0x0000ffffd8e9779c in NvBufSurfTransformAsync () at /usr/lib/aarch64-linux-gnu/nvidia/libnvbufsurftransform.so.1.0.0
#1  0x0000ffff98bf0aac [PAC] in  () at /usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_multistreamtiler.so
#2  0x0000ffff98bf22e4 [PAC] in  () at /usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_multistreamtiler.so
#3  0x0000ffff98bee40c [PAC] in  () at /usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_multistreamtiler.so
#4  0x0000ffffe3f1b8b4 [PAC] in  () at /lib/aarch64-linux-gnu/libgstbase-1.0.so.0
#5  0x0000ffffe3f1ac5c in  () at /lib/aarch64-linux-gnu/libgstbase-1.0.so.0
#6  0x0000fffff62e4978 in  () at /lib/aarch64-linux-gnu/libgstreamer-1.0.so.0
#7  0x0000fffff62e7bb8 in  () at /lib/aarch64-linux-gnu/libgstreamer-1.0.so.0
#8  0x0000fffff62e7fe8 in gst_pad_push () at /lib/aarch64-linux-gnu/libgstreamer-1.0.so.0
#9  0x0000ffffe3f1ad6c in  () at /lib/aarch64-linux-gnu/libgstbase-1.0.so.0
#10 0x0000fffff62e4978 in  () at /lib/aarch64-linux-gnu/libgstreamer-1.0.so.0
#11 0x0000fffff62e7bb8 in  () at /lib/aarch64-linux-gnu/libgstreamer-1.0.so.0
#12 0x0000fffff62e7fe8 in gst_pad_push () at /lib/aarch64-linux-gnu/libgstreamer-1.0.so.0
#13 0x0000ffffe3f1ad6c in  () at /lib/aarch64-linux-gnu/libgstbase-1.0.so.0
#14 0x0000fffff62e4978 in  () at /lib/aarch64-linux-gnu/libgstreamer-1.0.so.0
#15 0x0000fffff62e7bb8 in  () at /lib/aarch64-linux-gnu/libgstreamer-1.0.so.0
#16 0x0000fffff62e7fe8 in gst_pad_push () at /lib/aarch64-linux-gnu/libgstreamer-1.0.so.0
#17 0x0000ffffd3c87714 in  () at /usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_infer.so
#18 0x0000fffff72a8064 in g_thread_proxy (data=0xaaaab1fc6300) at ../glib/gthread.c:831
#19 0x0000fffff7d5d5c8 in start_thread (arg=0x0) at ./nptl/pthread_create.c:442
#20 0x0000fffff7dc5edc in thread_start () at ../sysdeps/unix/sysv/linux/aarch64/clone.S:79
(gdb) 


Jusr from your log below

WARNING: [TRT]: Using an engine plan file across different models of devices is not recommended and is likely to affect performance or even cause errors.

Are you using an engine file that generated on other devices? Could you remove that engine file and generate that again on the devices you are using?

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.