Core dump when saveing frames

Please provide complete information as applicable to your setup.

• Hardware Platform (RTX3050)
• DeepStream Version: 7.0
• TensorRT Version: 8.6
• NVIDIA GPU Driver Version (550.120)
• Issue Type( questions)
**• How to reproduce the issue ?

Hello Nvs,
I try to follow deepstream-imagedata-multistream this example to save frames, below is my pipeline

streammux -> pgie -> tracker -> nvvidconv1 -> filter1 -> tiler -> nvvidconv -> osd -> sink

when I run the pipeline it shows

root@seer102:/app/repo/deepstream_python# python3 DS_fake.py -s "/dev/video0" -c config/config_infer_primary_yoloV8_pose.txt
2025-01-14 10:05:31,617 - DeepStreamPipeline - INFO - DeepStream Pipeline Starting...
2025-01-14 10:05:31,639 - DeepStreamPipeline - INFO - Configuration:
2025-01-14 10:05:31,650 - DeepStreamPipeline - INFO - Source: /dev/video0
2025-01-14 10:05:31,660 - DeepStreamPipeline - INFO - Config Infer: config/config_infer_primary_yoloV8_pose.txt
2025-01-14 10:05:31,670 - DeepStreamPipeline - INFO - Streammux Width: 1920
2025-01-14 10:05:31,680 - DeepStreamPipeline - INFO - Streammux Height: 1080
2025-01-14 10:05:31,691 - DeepStreamPipeline - INFO - GPU ID: 0
2025-01-14 10:05:31,701 - DeepStreamPipeline - INFO - FPS Measurement Interval: 5
2025-01-14 10:05:31,762 - DeepStreamPipeline - INFO - Platform: x86
/app/repo/deepstream_python/DS_fake.py:889: DeprecationWarning: Gst.Element.get_request_pad is deprecated
  sinkpad = streammux.get_request_pad(f'sink_{i}')
Creating nvosd 
 
2025-01-14 10:05:32,235 - DeepStreamPipeline - INFO - 
=== Configuration Info ===
2025-01-14 10:05:32,246 - DeepStreamPipeline - INFO - SOURCE: /dev/video0
2025-01-14 10:05:32,256 - DeepStreamPipeline - INFO - CONFIG_INFER: config/config_infer_primary_yoloV8_pose.txt
2025-01-14 10:05:32,266 - DeepStreamPipeline - INFO - STREAMMUX_WIDTH: 1920
2025-01-14 10:05:32,276 - DeepStreamPipeline - INFO - STREAMMUX_HEIGHT: 1080
2025-01-14 10:05:32,286 - DeepStreamPipeline - INFO - GPU_ID: 0
2025-01-14 10:05:32,297 - DeepStreamPipeline - INFO - PERF_MEASUREMENT_INTERVAL_SEC: 5
2025-01-14 10:05:32,307 - DeepStreamPipeline - INFO - JETSON: FALSE
2025-01-14 10:05:32,317 - DeepStreamPipeline - INFO - ========================

gstnvtracker: Loading low-level lib at /opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
[NvMultiObjectTracker] Initialized
0:00:05.188982753 107901 0x56e6f2ce2e90 INFO                 nvinfer gstnvinfer.cpp:682:gst_nvinfer_logger:<pgie> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:2095> [UID = 1]: deserialized trt engine from :/app/repo/deepstream_python/weights/yolov8m-pose.onnx_b1_gpu0_fp32.engine
WARNING: [TRT]: The getMaxBatchSize() function should not be used with an engine built from a network created with NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag. This function will always return 1.
INFO: ../nvdsinfer/nvdsinfer_model_builder.cpp:612 [Implicit Engine Info]: layers num: 2
0   INPUT  kFLOAT input           3x640x640       
1   OUTPUT kFLOAT output          8400x56         

0:00:05.292210304 107901 0x56e6f2ce2e90 INFO                 nvinfer gstnvinfer.cpp:682:gst_nvinfer_logger:<pgie> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2198> [UID = 1]: Use deserialized engine model: /app/repo/deepstream_python/weights/yolov8m-pose.onnx_b1_gpu0_fp32.engine
0:00:05.297797011 107901 0x56e6f2ce2e90 INFO                 nvinfer gstnvinfer_impl.cpp:343:notifyLoadModelStatus:<pgie> [UID 1]: Load new model:config/config_infer_primary_yoloV8_pose.txt sucessfully
2025-01-14 10:05:36,930 - DeepStreamPipeline - INFO - 

Segmentation fault (core dumped)

below is my codes

import cv2
import numpy as np
def tiler_sink_pad_buffer_probe(pad, info, u_data):
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        logger.error("Unable to get GstBuffer")
        return Gst.PadProbeReturn.OK

    # Retrieve batch metadata from the gst_buffer
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    if not batch_meta:
        logger.error("Unable to get batch_meta from GstBuffer")
        return Gst.PadProbeReturn.OK

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num

        # get frame_meta
        try:
            n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
        except Exception as e:
            logger.error(f"Failed to get buffer surface: {e}")
            return Gst.PadProbeReturn.OK

        # change data to numpy 
        frame_copy = np.array(n_frame, copy=True, order='C')
        frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGR)

        # 构建保存路径
        img_path = os.path.join("saved_frames", f"frame_{frame_number}.jpg")

        # 保存图像
        try:
            cv2.imwrite(img_path, frame_copy)
            logger.info(f"Saved frame {frame_number} to {img_path}")
        except Exception as e:
            logger.error(f"Failed to save frame {frame_number}: {e}")

        if platform_info.is_integrated_gpu():
            try:
                pyds.unmap_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
            except Exception as e:
                logger.error(f"Failed to unmap buffer surface: {e}")

        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK


def main():
    global logger
    logger = setup_logger()
    
    logger.info("DeepStream Pipeline Starting...")
    Gst.init(None)

    # 获取输入源列表
    sources = SOURCE.split(";")
    num_sources = len(sources)


    platform_info = PlatformInfo()
    
    # setting logging 
    logger.info(f"Configuration:")
    logger.info(f"Source: {SOURCE}")
    logger.info(f"Config Infer: {CONFIG_INFER}")
    logger.info(f"Streammux Width: {STREAMMUX_WIDTH}")
    logger.info(f"Streammux Height: {STREAMMUX_HEIGHT}")
    logger.info(f"GPU ID: {GPU_ID}")
    logger.info(f"FPS Measurement Interval: {PERF_MEASUREMENT_INTERVAL_SEC}")
    logger.info(f"Platform: {'Jetson' if is_aarch64() else 'x86'}")

    loop = GLib.MainLoop()  
    pipeline = Gst.Pipeline() 

    if not pipeline:
        logger.error('Failed to create pipeline')
        sys.exit(1)

    # create and config streammux
    streammux = Gst.ElementFactory.make('nvstreammux', 'nvstreammux')
    if not streammux:
        logger.error('Failed to create nvstreammux\n')
        sys.exit(1)

    # config streammux
    streammux.set_property('batch-size', num_sources)
    streammux.set_property('width', STREAMMUX_WIDTH)
    streammux.set_property('height', STREAMMUX_HEIGHT)
    streammux.set_property('batched-push-timeout', 4000000)
    streammux.set_property('live-source', 0)
    streammux.set_property('attach-sys-ts', 1)
    

    pipeline.add(streammux)

    # create source_bin for each source element 
    for i, source_uri in enumerate(sources):
        if source_uri.startswith('/dev/video'):
            source_bin = create_usb_source_bin(i, source_uri, streammux)
        else:
            source_bin = create_uridecode_bin(i, source_uri, streammux)

        if not source_bin:
            logger.error(f'Failed to create source_bin {i}')
            sys.exit(1)

        pipeline.add(source_bin)

        # 如果是USB摄像头,需要手动链接streammux
        if source_uri.startswith('/dev/video'):
            srcpad = source_bin.get_static_pad('src')
            sinkpad = streammux.get_request_pad(f'sink_{i}')
            # sinkpad = streammux.request_pad(f'sink_{i}')
            if srcpad.link(sinkpad) != Gst.PadLinkReturn.OK:
                logger.error(f'Failed to link source {i} to streammux')
                sys.exit(1)

    # create tiler 
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        logger.error("ERROR: Unable to create tiler\n")
        sys.exit(1)

    # config tiler
    tiles = num_sources
    if tiles <= 1:
        rows = cols = 1
    elif tiles <= 2:
        rows = 1
        cols = 2
    elif tiles <= 4:
        rows = cols = 2
    elif tiles <= 6:
        rows = 2
        cols = 3
    elif tiles <= 9:
        rows = cols = 3
    else:
        rows = cols = 4

    tiler.set_property('rows', rows)
    tiler.set_property('columns', cols)
    tiler.set_property('width', STREAMMUX_WIDTH)
    tiler.set_property('height', STREAMMUX_HEIGHT)

    # 创建其他GStreamer元素
    pgie = Gst.ElementFactory.make('nvinfer', 'pgie')
    if not pgie:
        logger.error('Failed to create nvinfer\n')
        sys.exit(1)

    tracker = Gst.ElementFactory.make('nvtracker', 'nvtracker')
    if not tracker:
        logger.error('Failed to create nvtracker\n')
        sys.exit(1)

    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
        print("Creating filter1 \n ")
        sys.exit(1)

    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")

    filter1.set_property("caps", caps1)

    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")

    osd = Gst.ElementFactory.make('nvdsosd', 'nvdsosd')
    if not osd:
        logger.error('Failed to create nvdsosd\n')
        sys.exit(1)

    sink = None
    # if is_aarch64():
    #     sink = Gst.ElementFactory.make('nv3dsink', 'nv3dsink')
    #     if not sink:
    #         logger.error('Failed to create nv3dsink\n')
    #         sys.exit(1)
    # else:
    #     sink = Gst.ElementFactory.make('nveglglessink', 'nveglglessink')
    #     if not sink:
    #         logger.error('Failed to create nveglglessink\n')
    #         sys.exit(1)

    # change to fakesink
    sink = Gst.ElementFactory.make('fakesink', 'sink')
    if not sink:
        logger.error('Failed to create fakesink\n')
        sys.exit(1)

    # logging setting 
    logger.info('\n=== Configuration Info ===')
    logger.info(f'SOURCE: {SOURCE}')
    logger.info(f'CONFIG_INFER: {CONFIG_INFER}')
    logger.info(f'STREAMMUX_WIDTH: {STREAMMUX_WIDTH}')
    logger.info(f'STREAMMUX_HEIGHT: {STREAMMUX_HEIGHT}')
    logger.info(f'GPU_ID: {GPU_ID}')
    logger.info(f'PERF_MEASUREMENT_INTERVAL_SEC: {PERF_MEASUREMENT_INTERVAL_SEC}')
    logger.info(f'JETSON: {"TRUE" if is_aarch64() else "FALSE"}')
    logger.info('========================\n')

    # set propoties of elements 
    pgie.set_property('config-file-path', CONFIG_INFER)
    pgie.set_property('qos', 0)

    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 384)
    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so')
    tracker.set_property('ll-config-file',
                         '/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml')
    tracker.set_property('display-tracking-id', 1)
    tracker.set_property('qos', 0)

    osd.set_property('process-mode', int(pyds.MODE_GPU))
    osd.set_property('qos', 0)

    sink.set_property('async', 0)
    sink.set_property('sync', 1)
    sink.set_property('qos', 0)

    if not platform_info.is_integrated_gpu():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        if platform_info.is_wsl():
            #opencv functions like cv2.line and cv2.putText is not able to access NVBUF_MEM_CUDA_UNIFIED memory
            #in WSL systems due to some reason and gives SEGFAULT. Use NVBUF_MEM_CUDA_PINNED memory for such
            #usecases in WSL. Here, nvvidconv1's buffer is used in tiler sink pad probe and cv2 operations are
            #done on that.
            print("using nvbuf_mem_cuda_pinned memory for nvvidconv1\n")
            vc_mem_type = int(pyds.NVBUF_MEM_CUDA_PINNED)
            nvvidconv1.set_property("nvbuf-memory-type", vc_mem_type)
        else:
            nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    # mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
    # streammux.set_property("nvbuf-memory-type", mem_type)
    # nvvidconv.set_property("nvbuf-memory-type", mem_type)
    # nvvidconv1.set_property("nvbuf-memory-type", mem_type)
    # tiler.set_property("nvbuf-memory-type", mem_type)


    if tracker.find_property('enable_batch_process') is not None:
        tracker.set_property('enable_batch_process', 1)

    if tracker.find_property('enable_past_frame') is not None:
        tracker.set_property('enable_past_frame', 1)

    if not is_aarch64():
        streammux.set_property('nvbuf-memory-type', 0)
        streammux.set_property('gpu_id', GPU_ID)
        pgie.set_property('gpu_id', GPU_ID)
        tracker.set_property('gpu_id', GPU_ID)
        nvvidconv1.set_property('nvbuf-memory-type', 0)
        nvvidconv1.set_property('gpu_id', GPU_ID)
        osd.set_property('gpu_id', GPU_ID)

    # add elements to pipeline 
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(osd)
    pipeline.add(sink)

    # link element
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(osd)
    osd.link(sink)

    # setting bus
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect('message', bus_call, loop)

    tracker_src_pad = tracker.get_static_pad('src')
    if not tracker_src_pad:
        logger.error('Failed to get tracker src pad\n')
        sys.exit(1)
    else:
        tracker_src_pad.add_probe(Gst.PadProbeType.BUFFER, tracker_src_pad_buffer_probe, 0)

    
    # add tiler_sink_pad_buffer_probe
    tiler_sink_pad = tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)
    
    # running pipeline 
    pipeline.set_state(Gst.State.PLAYING)

    logger.info('\n')

    try:
        loop.run()
    except KeyboardInterrupt:
        pass
    finally:
        
        data_sender.close()
        pipeline.set_state(Gst.State.NULL)

Could you help look at it?

Many many thanks!

My bad, I set nvbuf-memory-type twice.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.