Cannot allocate memory in static TLS block

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) nano
• DeepStream Version 7.0
• JetPack Version (valid for Jetson only) 6.0
• TensorRT Version8.6.2.3-1+cuda12.2
• NVIDIA GPU Driver Version (valid for GPU only) NVIDIA-SMI 540.2.0
• Issue Type( questions, new requirements, bugs)

How to save face ROI ?

Original Code : DeepStream-Yolo-Face/deepstream.py at master · marcoslucianops/DeepStream-Yolo-Face · GitHub

when I modify def tracker_src_pad_buffer_probe : it return :

gstnvtracker: Loading low-level lib at /opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
gstnvtracker: Failed to open low-level lib at /opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
dlopen error: /lib/aarch64-linux-gnu/libgomp.so.1: cannot allocate memory in static TLS block
gstnvtracker: Failed to initilaize low level lib.

import cv2
import numpy as np

def tracker_src_pad_buffer_probe(pad, info, user_data):
buf = info.get_buffer()
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buf))

l_frame = batch_meta.frame_meta_list
while l_frame:
    try:
        frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
    except StopIteration:
        break

    current_index = frame_meta.source_id

    l_obj = frame_meta.obj_meta_list
    while l_obj:
        try:
            obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
        except StopIteration:
            break

        # Process each object here
        parse_face_from_meta(frame_meta, obj_meta)
        set_custom_bbox(obj_meta)

        # Save ROI (Region of Interest) as an image
        rect_params = obj_meta.rect_params
        top = max(int(rect_params.top), 0)
        left = max(int(rect_params.left), 0)
        width = max(int(rect_params.width), 0)
        height = max(int(rect_params.height), 0)

        # Get the frame data
        surface = pyds.get_nvds_buf_surface(hash(buf), frame_meta.batch_id)
        frame_image = np.array(surface, copy=True, order='C')

        # Crop the face ROI from the frame
        face_roi = frame_image[top:top+height, left:left+width]

        # Define the directory and save path
        save_dir = '/home/paymentinapp/DeepStream-Yolo-Face-master/roi'
        os.makedirs(save_dir, exist_ok=True)
        save_path = os.path.join(save_dir, f"face_{frame_meta.frame_num}_{obj_meta.object_id}.jpg")

        # Save the face ROI
        cv2.imwrite(save_path, face_roi)

        try:
            l_obj = l_obj.next
        except StopIteration:
            break

    fps_streams['stream{0}'.format(current_index)].get_fps()

    try:
        l_frame = l_frame.next
    except StopIteration:
        break

return Gst.PadProbeReturn.OK

This is not a DeepStreamd issue, it may have something to do with your board environment. You can refer to cv2-cannot-allocate-memory-in-static-tls-block and try that first.

Getting error : File “deepstream.py”, line 178, in tracker_src_pad_buffer_probe
n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
RuntimeError: get_nvds_buf_Surface: Currently we only support RGBA color Format

Even tried to convert into RGBA color Format but its still in YUV420 format after conversion.

Code is taken from : deepstream_python_apps/apps/deepstream-imagedata-multistream/deepstream_imagedata-multistream.py at master · NVIDIA-AI-IOT/deepstream_python_apps · GitHub

def tracker_src_pad_buffer_probe(pad, info, user_data):
buf = info.get_buffer()
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buf))

gst_buffer = info.get_buffer()
if not gst_buffer:
    print("Unable to get GstBuffer ")
    return
l_frame = batch_meta.frame_meta_list
while l_frame:
    try:
        frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
    except StopIteration:
        break

    current_index = frame_meta.source_id

    l_obj = frame_meta.obj_meta_list
    while l_obj:
        try:
            obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
        except StopIteration:
            break

        parse_face_from_meta(frame_meta, obj_meta)
        set_custom_bbox(obj_meta)

        # Get bounding box coordinates
        # rect_params = obj_meta.rect_params
        # top = max(int(rect_params.top), 0)
        # left = max(int(rect_params.left), 0)
        # width = max(int(rect_params.width), 0)
        # height = max(int(rect_params.height), 0)

            
        n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
        # Annotate the frame with bounding boxes
        #n_frame = draw_bounding_boxes(n_frame, obj_meta, obj_meta.confidence)
        # Convert python array into numpy array format
        frame_copy = np.array(n_frame, copy=True, order='C')
        # Convert the array into cv2 default color format
        frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA)
        
        # # If Jetson, unmap the buffer after processing
        # if platform_info.is_integrated_gpu():
        #     pyds.unmap_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)

        save_image = True
    
        try:
            l_obj = l_obj.next
        except StopIteration:
            break
    
    img_path = "{}/stream_{}/frame_{}.jpg".format(folder_name, frame_meta.pad_index, frame_meta.frame_num)
    # Create directory if it doesn't exist
    os.makedirs(os.path.dirname(img_path), exist_ok=True)
    # Save the image
    cv2.imwrite(img_path, frame_copy)
    print(f"Saved image to {img_path}")

    fps_streams['stream{0}'.format(current_index)].get_fps()

    try:
        l_frame = l_frame.next
    except StopIteration:
        break

return Gst.PadProbeReturn.OK

It’s useless to add conversion in the probe function. You need to transform that with nvvideoconvert plugin before the pgie plugin. Please refer to Add nvvidconv1 and filter1 to convert the frames to RGBA.

I already saw your that comments in existing posts. Reflected changes in main but it does not work : it return error

Traceback (most recent call last):
File “deepstream.py”, line 165, in tracker_src_pad_buffer_probe
n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
RuntimeError: get_nvds_buf_Surface: Currently we only support RGBA color Format
Saved image to /home/paymentinapp/DeepStream-Yolo-Face-master/roi/stream_0/frame_86.jpg

How to save roi ?

def main():
Gst.init(None)

loop = GLib.MainLoop()

# Create the pipeline
pipeline = Gst.Pipeline()
if not pipeline:
    sys.stderr.write('ERROR: Failed to create pipeline\n')
    sys.exit(1)

# Create and add the streammux element
streammux = Gst.ElementFactory.make('nvstreammux', 'nvstreammux')
if not streammux:
    sys.stderr.write('ERROR: Failed to create nvstreammux\n')
    sys.exit(1)
pipeline.add(streammux)

# Create and add the source_bin
source_bin = create_uridecode_bin(0, SOURCE, streammux)
if not source_bin:
    sys.stderr.write('ERROR: Failed to create source_bin\n')
    sys.exit(1)
pipeline.add(source_bin)

# Create and add the primary inference element (pgie)
pgie = Gst.ElementFactory.make('nvinfer', 'pgie')
if not pgie:
    sys.stderr.write('ERROR: Failed to create nvinfer\n')
    sys.exit(1)

# Create and add the tracker element
tracker = Gst.ElementFactory.make('nvtracker', 'nvtracker')
if not tracker:
    sys.stderr.write('ERROR: Failed to create nvtracker\n')
    sys.exit(1)

# Create and add the nvvideoconvert element
converter = Gst.ElementFactory.make('nvvideoconvert', 'nvvideoconvert')
if not converter:
    sys.stderr.write('ERROR: Failed to create nvvideoconvert\n')
    sys.exit(1)
pipeline.add(converter)

# Create and add the capsfilter element
capsfilter = Gst.ElementFactory.make('capsfilter', 'capsfilter')
if not capsfilter:
    sys.stderr.write('ERROR: Failed to create capsfilter\n')
    sys.exit(1)
capsfilter.set_property('caps', Gst.Caps.from_string('video/x-raw(memory:NVMM), format=RGBA'))
pipeline.add(capsfilter)

# Create and add the on-screen display element (osd)
osd = Gst.ElementFactory.make('nvdsosd', 'nvdsosd')
if not osd:
    sys.stderr.write('ERROR: Failed to create nvdsosd\n')
    sys.exit(1)

# Create and add the sink element
sink = None
if is_aarch64():
    sink = Gst.ElementFactory.make('nv3dsink', 'nv3dsink')
    if not sink:
        sys.stderr.write('ERROR: Failed to create nv3dsink\n')
        sys.exit(1)
else:
    sink = Gst.ElementFactory.make('nveglglessink', 'nveglglessink')
    if not sink:
        sys.stderr.write('ERROR: Failed to create nveglglessink\n')
        sys.exit(1)
sink.set_property('async', 0)
sink.set_property('sync', 0)
sink.set_property('qos', 0)
pipeline.add(sink)

# Set properties for elements
streammux.set_property('batch-size', STREAMMUX_BATCH_SIZE)
streammux.set_property('batched-push-timeout', 25000)
streammux.set_property('width', STREAMMUX_WIDTH)
streammux.set_property('height', STREAMMUX_HEIGHT)
streammux.set_property('enable-padding', 0)
streammux.set_property('live-source', 1)
streammux.set_property('attach-sys-ts', 1)

pgie.set_property('config-file-path', CONFIG_INFER)
pgie.set_property('qos', 0)

tracker.set_property('tracker-width', 640)
tracker.set_property('tracker-height', 384)
tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so')
tracker.set_property('ll-config-file', '/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml')
tracker.set_property('display-tracking-id', 1)
tracker.set_property('qos', 0)

osd.set_property('process-mode', int(pyds.MODE_GPU))
osd.set_property('qos', 0)

if 'file://' in SOURCE:
    streammux.set_property('live-source', 0)

if tracker.find_property('enable_batch_process') is not None:
    tracker.set_property('enable_batch_process', 1)

if tracker.find_property('enable_past_frame') is not None:
    tracker.set_property('enable_past_frame', 1)

if not is_aarch64():
    streammux.set_property('nvbuf-memory-type', 0)
    streammux.set_property('gpu_id', GPU_ID)
    pgie.set_property('gpu_id', GPU_ID)
    tracker.set_property('gpu_id', GPU_ID)
    converter.set_property('nvbuf-memory-type', 0)
    converter.set_property('gpu_id', GPU_ID)
    osd.set_property('gpu_id', GPU_ID)

# Add elements to the pipeline
pipeline.add(pgie)
pipeline.add(tracker)
pipeline.add(converter)
pipeline.add(capsfilter)
pipeline.add(osd)
pipeline.add(sink)

# Link the elements in the pipeline
streammux.link(pgie)
pgie.link(tracker)
tracker.link(converter)
converter.link(capsfilter)
capsfilter.link(osd)
osd.link(sink)

# Set up bus message handling
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', bus_call, loop)

# Add buffer probe to tracker's src pad
tracker_src_pad = tracker.get_static_pad('src')
if not tracker_src_pad:
    sys.stderr.write('ERROR: Failed to get tracker src pad\n')
    sys.exit(1)
else:
    tracker_src_pad.add_probe(Gst.PadProbeType.BUFFER, tracker_src_pad_buffer_probe, 0)

# Start the pipeline
pipeline.set_state(Gst.State.PLAYING)

sys.stdout.write('\n')

try:
    loop.run()
except:
    pass

# Clean up
pipeline.set_state(Gst.State.NULL)

sys.stdout.write('\n')

Do you have any solution for this ? or do you suggest any video ? Github Link ?

This new issue is duplicated with the 300762.

Thank you very much. Sir. Problem Solved . Its extracted frame successfully inside def osd_sink_pad_buffer_probe(pad, info, u_data): Your guidance truly helped solve the problem successfully.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.