Deepstream Memory Leak

• Jetson Xavier NX
• deepstream 5.1
• JetPack Version 4.5.1

While using deepstream connected to a gst camera feed I get the following error after approximately 20 minutes:

PosixMemMap:71 [12] mmap failed
nvbufsurface: NvBufSurfaceMap function failed
nvbufsurface: mapping of buffer (0) failed
nvbufsurface: error in mapping
get_nvds_buf_Surface: Failed to map buffer to CPU
Segmentation fault (core dumped)

I stripped down my program to minimize any outside issues as the following which still produces the error:

def startVideoStream(self):
    # Standard GStreamer initialization

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from camera
    print("Creating Source \n ")
    source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
    if not source:
        sys.stderr.write(" Unable to create Source \n")

    caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
    if not caps_v4l2src:
        sys.stderr.write(" Unable to create v4l2src capsfilter \n")

    print("Creating Video Converter \n")

    # Adding videoconvert -> nvvideoconvert as not all
    # raw formats are supported by nvvideoconvert;
    # Say YUYV is unsupported - which is the common
    # raw format for many logi usb cams
    # In case we have a camera with raw format supported in
    # nvvideoconvert, GStreamer plugins' capability negotiation
    # shall be intelligent enough to reduce compute by
    # videoconvert doing passthrough (TODO we need to confirm this)

    # Use nvjpegdec for hardware accelerated decode on GPU
    print("Creating jpegdec \n")
    decoder = Gst.ElementFactory.make("jpegdec", "jpeg-decoder")
    if not decoder:
        sys.stderr.write(" Unable to create jpegdec Decoder \n")

    caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
    if not caps_vidconvsrc:
        sys.stderr.write(" Unable to create capsfilter \n")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvmultistreamtiler", "onscreendisplay")#nvdsosd

    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("fakesink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    print("Playing cam /dev/video0")
    source.set_property('device', "/dev/video0")
    source.set_property('io-mode', 2)
    caps_v4l2src.set_property('caps', Gst.Caps.from_string("image/jpeg,width=640,height=480,framerate=30/1"))
    caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM),format=RGBA"))
    streammux.set_property('width', self.resolution.width)
    streammux.set_property('height', self.resolution.height)
    streammux.set_property('live-source', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    nvosd.set_property('rows', 1)
    nvosd.set_property('columns', 1)
    nvosd.set_property('width', 640)
    nvosd.set_property('height', 480)
    # Set sync = false to avoid late frame drops at the display-sink
    sink.set_property('sync', False)

    #Set properties of pgie and sgie
    pgie.set_property('config-file-path', "/home/drone1/Documents/GroundControl2/ConfigFiles/dstest2_pgie_config.txt")

    #Set properties of tracker
    config = configparser.ConfigParser()'/home/drone1/Documents/GroundControl2/ConfigFiles/dstest2_tracker_config.txt')

    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process' :
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)
        if key == 'enable-past-frame' :
            tracker_enable_past_frame = config.getint('tracker', key)
            tracker.set_property('enable_past_frame', tracker_enable_past_frame)

    print("Adding elements to Pipeline \n")
    #if is_aarch64():

    # we link the elements together
    # file-source -> h264-parser -> nvh264-decoder ->
    # nvinfer -> nvvidconv -> nvosd -> video-renderer
    print("Linking elements in the Pipeline \n")
    self.link_element(source, caps_v4l2src)
    self.link_element(caps_v4l2src, decoder)
    self.link_element(decoder, nvvidconv)
    self.link_element(nvvidconv, caps_vidconvsrc)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux \n")
    srcpad = caps_vidconvsrc.get_static_pad("src")
    if not srcpad:
        sys.stderr.write(" Unable to get source pad of decoder \n")
    self.link_element(srcpad, sinkpad)
    self.link_element(streammux, pgie)
    self.link_element(pgie, tracker)
    self.link_element(tracker, nvosd)
    self.link_element(nvosd, sink)

    # create and event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()

    bus = pipeline.get_bus()
    bus.connect ("message", bus_call, loop)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    #osdsinkpad = caps_vidconvsrc.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")
    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, self.osd_sink_pad_buffer_probe, 0)

    print("Starting pipeline \n")
    # start play back and listed to events

    # cleanup

def osd_sink_pad_buffer_probe(self,pad,info,u_data,):
    meta_data_list = []
    #Intiallizing object counter with 0.
    obj_counter = {
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
            # Note that needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(
        except StopIteration:

        num_rects = frame_meta.num_obj_meta
        obj_meta = None

        while l_obj is not None:
                # Casting to pyds.NvDsObjectMeta
                if obj_meta is not None:
            except StopIteration:
            obj_counter[obj_meta.class_id] += 1
            except StopIteration:
        except StopIteration:

    # Getting Image data using nvbufsurface
    # the input should be address of buffer and batch_id
    n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
    return Gst.PadProbeReturn.OK

VideoProcessDeep (11.3 KB)

I was wondering if there is a way to clear the buffer to prevent this issue from happening or perhaps a workaround for the issue?


ModuleNotFoundError: No module named ‘SystemConstants’

Seems you miss share somthing?

My mistake, I’ve removed the dependency. See the new attachment below.

VideoProcessDeep (11.2 KB)
dstest2_pgie_config.txt (3.3 KB)
dstest2_tracker_config.txt (1.8 KB)

Sorry for the long delay.
Is this still be an issue?

It is when I have an MJPEG camera as the source and my gst stream is configured for that input format. If I use a camera compatible with the deepstream USB example the memory issue does not present itself. Unfortunately the input device I need to use only outputs the MJPEG format.

I still can not run your app, i managed to run pipeline from your python script, it works, btw, i commented this line,
wx.core import DataFormat, is there anything wrong?

gst-launch-1.0 v4l2src device="/dev/video0" io-mode=2 ! “image/jpeg, width=640,height=480,framerate=25/1” ! jpegdec ! nvvideoconvert ! “video/x-raw(memory:NVMM),width=640,height=480,format=RGBA” ! m.sink_0 nvstreammux name=m width=640 height=480 batch-size=1 live-source=1 batched-push-timeout=4000000 ! nvinfer config-file-path=dstest2_pgie_config.txt ! nvdsosd ! nvegltransform ! nveglglessink

nvidia@nvidia-desktop:/opt/nvidia/deepstream/deepstream-6.0/sources/deepstream_python_apps/apps/tmp$ python3
Creating Pipeline

Creating Source

Creating Video Converter

Creating jpegdec

Creating EGLSink

Playing cam /dev/video0
Adding elements to Pipeline

Linking elements in the Pipeline

src could not be linked to sink_0
Starting pipeline

gstnvtracker: Loading low-level lib at /opt/nvidia/deepstream/deepstream-6.0/lib/
gstnvtracker: Batch processing is ON
gstnvtracker: Past frame output is ON
[NvMultiObjectTracker] Initialized
0:00:00.346014866 31284 0x2f1b4ef0 WARN nvinfer gstnvinfer.cpp:635:gst_nvinfer_logger: NvDsInferContext[UID 1]: Warning from NvDsInferContextImpl::initialize() <nvdsinfer_context_impl.cpp:1161> [UID = 1]: Warning, OpenCV has been deprecated. Using NMS for clustering instead of cv::groupRectangles with topK = 20 and NMS Threshold = 0.5
^C^C^C^C0:00:04.932692129 31284 0x2f1b4ef0 INFO nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger: NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1900> [UID = 1]: deserialized trt engine from :/opt/nvidia/deepstream/deepstream-6.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_int8.engine
INFO: [Implicit Engine Info]: layers num: 3
0 INPUT kFLOAT input_1 3x368x640
1 OUTPUT kFLOAT conv2d_bbox 16x23x40
2 OUTPUT kFLOAT conv2d_cov/Sigmoid 4x23x40

0:00:04.932871620 31284 0x2f1b4ef0 INFO nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger: NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2004> [UID = 1]: Use deserialized engine model: /opt/nvidia/deepstream/deepstream-6.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_int8.engine
0:00:05.107933616 31284 0x2f1b4ef0 INFO nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus: [UID 1]: Load new model:dstest2_pgie_config.txt sucessfully
Traceback (most recent call last):
File “”, line 294, in
File “”, line 221, in startVideoStream

I’m not sure why the script won’t operate on your machine but I did identify that if I remove the following line from the script:

n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)

I no longer get the error. Is there a way to release the memory associated with the command after operations are performed on the image?

I tried:

del n_frame

However the issue persisted so I’m assuming the object continues to exist outside my usage.

Sorry for the late.
I can not run your python app. so i modified based on python test1-usb app for usb mjpg format to mimic your case. attached for your reference. i can not repro the issue. please let me know if you have more issue. (13.8 KB)

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.