How to save face roi?

Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) nano
• DeepStream Version 7.0
• JetPack Version (valid for Jetson only) 6.0
• TensorRT Version8.6.2.3-1+cuda12.2
• NVIDIA GPU Driver Version (valid for GPU only) NVIDIA-SMI 540.2.0
• Issue Type( questions, new requirements, bugs)

How to save face roi in DeepStream-Yolo-Face/deepstream.c at master · marcoslucianops/DeepStream-Yolo-Face · GitHub ?

When we import opencv to save roi it return error.

What does face roi mean? If you want save the faces detected, refer this sample.

/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-image-meta-test

nvds_obj_enc_process is used to save the objects or frames you are interested in.

Thank you sir for your replay. Wanted to save face ROI. Requesting solution for python code : DeepStream-Yolo-Face/deepstream.py at master · marcoslucianops/DeepStream-Yolo-Face · GitHub

Refer the sample at /opt/nvidia/deepstream/deepstream/sources/deepstream_python_apps/apps/deepstream-imagedata-multistream

Modify the function tracker_src_pad_buffer_probe like tiler_sink_pad_buffer_probe in deepstream_imagedata-multistream.py

Thank you Sir. I already done. But still getting Getting error : File “deepstream.py”, line 178, in tracker_src_pad_buffer_probe
n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
RuntimeError: get_nvds_buf_Surface: Currently we only support RGBA color Format.

def tracker_src_pad_buffer_probe(pad, info, user_data):
buf = info.get_buffer()
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buf))

l_frame = batch_meta.frame_meta_list
while l_frame:
try:
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break

current_index = frame_meta.source_id

l_obj = frame_meta.obj_meta_list
while l_obj:
    try:
        obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
    except StopIteration:
        break

    # Process each object here
    parse_face_from_meta(frame_meta, obj_meta)
    set_custom_bbox(obj_meta)

    # Save ROI (Region of Interest) as an image
    rect_params = obj_meta.rect_params
    top = max(int(rect_params.top), 0)
    left = max(int(rect_params.left), 0)
    width = max(int(rect_params.width), 0)
    height = max(int(rect_params.height), 0)

    # Get the frame data
    surface = pyds.get_nvds_buf_surface(hash(buf), frame_meta.batch_id)

    save_path = os.path.join(save_dir, f"face_{frame_meta.frame_num}_{obj_meta.object_id}.jpg")

    # Save the face ROI
    cv2.imwrite(save_path, face_roi)

    try:
        l_obj = l_obj.next
    except StopIteration:
        break

fps_streams['stream{0}'.format(current_index)].get_fps()

try:
    l_frame = l_frame.next
except StopIteration:
    break

return Gst.PadProbeReturn.OK



def main():
Gst.init(None)

loop = GLib.MainLoop()

# Create the pipeline
pipeline = Gst.Pipeline()
if not pipeline:
    sys.stderr.write('ERROR: Failed to create pipeline\n')
    sys.exit(1)

# Create and add the streammux element
streammux = Gst.ElementFactory.make('nvstreammux', 'nvstreammux')
if not streammux:
    sys.stderr.write('ERROR: Failed to create nvstreammux\n')
    sys.exit(1)
pipeline.add(streammux)

# Create and add the source_bin
source_bin = create_uridecode_bin(0, SOURCE, streammux)
if not source_bin:
    sys.stderr.write('ERROR: Failed to create source_bin\n')
    sys.exit(1)
pipeline.add(source_bin)

# Create and add the primary inference element (pgie)
pgie = Gst.ElementFactory.make('nvinfer', 'pgie')
if not pgie:
    sys.stderr.write('ERROR: Failed to create nvinfer\n')
    sys.exit(1)

# Create and add the tracker element
tracker = Gst.ElementFactory.make('nvtracker', 'nvtracker')
if not tracker:
    sys.stderr.write('ERROR: Failed to create nvtracker\n')
    sys.exit(1)

# Create and add the nvvideoconvert element
converter = Gst.ElementFactory.make('nvvideoconvert', 'nvvideoconvert')
if not converter:
    sys.stderr.write('ERROR: Failed to create nvvideoconvert\n')
    sys.exit(1)
pipeline.add(converter)

# Create and add the capsfilter element
capsfilter = Gst.ElementFactory.make('capsfilter', 'capsfilter')
if not capsfilter:
    sys.stderr.write('ERROR: Failed to create capsfilter\n')
    sys.exit(1)
capsfilter.set_property('caps', Gst.Caps.from_string('video/x-raw(memory:NVMM), format=RGBA'))
pipeline.add(capsfilter)

# Create and add the on-screen display element (osd)
osd = Gst.ElementFactory.make('nvdsosd', 'nvdsosd')
if not osd:
    sys.stderr.write('ERROR: Failed to create nvdsosd\n')
    sys.exit(1)

# Create and add the sink element
sink = None
if is_aarch64():
    sink = Gst.ElementFactory.make('nv3dsink', 'nv3dsink')
    if not sink:
        sys.stderr.write('ERROR: Failed to create nv3dsink\n')
        sys.exit(1)
else:
    sink = Gst.ElementFactory.make('nveglglessink', 'nveglglessink')
    if not sink:
        sys.stderr.write('ERROR: Failed to create nveglglessink\n')
        sys.exit(1)
sink.set_property('async', 0)
sink.set_property('sync', 0)
sink.set_property('qos', 0)
pipeline.add(sink)

# Set properties for elements
streammux.set_property('batch-size', STREAMMUX_BATCH_SIZE)
streammux.set_property('batched-push-timeout', 25000)
streammux.set_property('width', STREAMMUX_WIDTH)
streammux.set_property('height', STREAMMUX_HEIGHT)
streammux.set_property('enable-padding', 0)
streammux.set_property('live-source', 1)
streammux.set_property('attach-sys-ts', 1)

pgie.set_property('config-file-path', CONFIG_INFER)
pgie.set_property('qos', 0)

tracker.set_property('tracker-width', 640)
tracker.set_property('tracker-height', 384)
tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so')
tracker.set_property('ll-config-file', '/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml')
tracker.set_property('display-tracking-id', 1)
tracker.set_property('qos', 0)

osd.set_property('process-mode', int(pyds.MODE_GPU))
osd.set_property('qos', 0)

if 'file://' in SOURCE:
    streammux.set_property('live-source', 0)

if tracker.find_property('enable_batch_process') is not None:
    tracker.set_property('enable_batch_process', 1)

if tracker.find_property('enable_past_frame') is not None:
    tracker.set_property('enable_past_frame', 1)

if not is_aarch64():
    streammux.set_property('nvbuf-memory-type', 0)
    streammux.set_property('gpu_id', GPU_ID)
    pgie.set_property('gpu_id', GPU_ID)
    tracker.set_property('gpu_id', GPU_ID)
    converter.set_property('nvbuf-memory-type', 0)
    converter.set_property('gpu_id', GPU_ID)
    osd.set_property('gpu_id', GPU_ID)

# Add elements to the pipeline
pipeline.add(pgie)
pipeline.add(tracker)
pipeline.add(converter)
pipeline.add(capsfilter)
pipeline.add(osd)
pipeline.add(sink)

# Link the elements in the pipeline
streammux.link(pgie)
pgie.link(tracker)
tracker.link(converter)
converter.link(capsfilter)
capsfilter.link(osd)
osd.link(sink)

# Set up bus message handling
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', bus_call, loop)

# Add buffer probe to tracker's src pad
tracker_src_pad = tracker.get_static_pad('src')
if not tracker_src_pad:
    sys.stderr.write('ERROR: Failed to get tracker src pad\n')
    sys.exit(1)
else:
    tracker_src_pad.add_probe(Gst.PadProbeType.BUFFER, tracker_src_pad_buffer_probe, 0)

# Start the pipeline
pipeline.set_state(Gst.State.PLAYING)

sys.stdout.write('\n')

try:
    loop.run()
except:
    pass

# Clean up
pipeline.set_state(Gst.State.NULL)

sys.stdout.write('\n')

File “deepstream.py”, line 178, in tracker_src_pad_buffer_probe
n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
RuntimeError: get_nvds_buf_Surface: Currently we only support RGBA color Format.

This error log told the reason. move the probe function to nvdsosd sink pad.

Thank you very much. Sir. Problem Solved . Its extracted frame successfully inside def osd_sink_pad_buffer_probe(pad, info, u_data):. Received your guidance two times in different forum. It truly helped solve the problem successfully.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.