BBox filtering in deepstream6

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU)
Jetson Nano Developer Kit
• DeepStream Version
6.0
• JetPack Version (valid for Jetson only)
32.7.4-20230608212426

I have created code as follows to detect some classes out of 7 class, I want to filter some of them and see the only choosen one bboxes and label from many search I achieved my goal but in the terminal I gave strage behavior as follows:

code:

#!/usr/bin/env python3
import sys
import gi
gi.require_version('Gst', '1.0')
import numpy as np
import cv2
import pyds
import datetime
import os

sys.path.insert(0, '/path/')
from gi.repository import GLib, Gst, GObject
from path.utils.common.is_aarch_64 import is_aarch64
from path.utils.common.bus_call import bus_call
sys.path.append('/opt/nvidia/deepstream/deepstream-6.0/lib')
data = None
saved_count = {}
fps_streams = {}
sended = []
new_folder_name = None
MIN_CONFIDENCE = 0.3
MAX_CONFIDENCE = 0.4

folder_name = "/path/"
if not os.path.exists(folder_name):
    os.makedirs(folder_name)
else:
    i = 2
    new_folder_name = f"{folder_name}{i}"
    while os.path.exists(new_folder_name):
        i += 1
        new_folder_name = f"{folder_name}{i}"
    os.makedirs(new_folder_name)

def osd_sink_pad_buffer_probe(pad, info, u_data):
    gst_buffer = info.get_buffer()
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
            l_obj = frame_meta.obj_meta_list
            save_image = False
            is_first_obj = True
            temp_obj = []
            while l_obj:
                try:
                    obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
                    dis_meta = pyds.NvDsDisplayMeta.cast(l_obj.data)
                except StopIteration:
                    break

                l_tracker_object_coordinates = obj_meta.tracker_bbox_info.org_bbox_coords

                if obj_meta.obj_label == "oos" :
                    temp_obj.append(obj_meta.object_id)
                    if is_first_obj:
                        is_first_obj = False
                        n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
                        l_tracker_object_coordinates = obj_meta.tracker_bbox_info.org_bbox_coords

                        frame_copy = np.array(n_frame, copy=True, order='C')
                        frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA)
                        scale_x = 640 / 640
                        scale_y = 480 / 640
                        bbox_x = int(l_tracker_object_coordinates.left * scale_x)
                        bbox_y = int(l_tracker_object_coordinates.top * scale_y)
                        x1 = bbox_x
                        y1 = bbox_y
                        x2 = int(bbox_x + l_tracker_object_coordinates.width * scale_x)
                        y2 = int(bbox_y + l_tracker_object_coordinates.height * scale_y)
                        image = cv2.rectangle(frame_copy, (x1, y1), (x2, y2), (0, 255, 255), 2)
                    else:
                        scale_x = 640 / 640
                        scale_y = 480 / 640
                        bbox_x = int(l_tracker_object_coordinates.left * scale_x)
                        bbox_y = int(l_tracker_object_coordinates.top * scale_y)
                        x1 = bbox_x
                        y1 = bbox_y
                        x2 = int(bbox_x + l_tracker_object_coordinates.width * scale_x)
                        y2 = int(bbox_y + l_tracker_object_coordinates.height * scale_y)
                        image = cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 255), 2)
                    save_image = True
                else:
                    obj_meta.rect_params.border_width = 0
                    obj_meta.obj_label = ['oos']

                try:
                    l_obj = l_obj.next
                except StopIteration:
                    break

            stream_index = "stream{0}".format(frame_meta.pad_index)
            timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
            kontrol = all(eleman in sended for eleman in temp_obj)

            if kontrol:
                pass
            else:
                for eleman in temp_obj:
                    if eleman not in sended:
                        sended.append(eleman)
                if len(sended) > 60:
                    del sended[0]
                b_string = "_".join(map(str, temp_obj))
                img_path = "{}stream-{}-time-{}_{}.jpg".format(
                    "/path/" + new_folder_name.split("/")[-1] + "/",
                    frame_meta.pad_index,
                    timestamp,
                    b_string,
                )
                cv2.imwrite(img_path, image)

            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK

def main(args):
    global data
    # Check input arguments
    if len(args) != 2:
        sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0])
        sys.exit(1)

    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from the file
    print("Creating Source \n ")
    source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
    if not source:
        sys.stderr.write(" Unable to create Source \n")

    caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
    if not caps_v4l2src:
        sys.stderr.write(" Unable to create v4l2src capsfilter \n")

    print("Creating Video Converter \n")

    vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
    nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
    caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if is_aarch64():
        transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")

    print("Playing cam %s " % args[1])
    caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1, width=640, height=480"))
    caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
    source.set_property('device', args[1])

    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 384)
    tracker.set_property('gpu_id', 0)
    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_nvmultiobjecttracker.so')
    tracker.set_property('ll-config-file', 'trackers/config_tracker_IOU.yml')
    tracker.set_property('enable_batch_process', 1)
    tracker.set_property('enable_past_frame', 0)

    streammux.set_property('width', 640)
    streammux.set_property('height', 640)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "/path/utils/pgie_yolov7_tiny_config.txt")
    sink.set_property('sync', False)

    tiler.set_property("rows", 1)
    tiler.set_property("columns", 1)
    tiler.set_property("width", 640)
    tiler.set_property("height", 480)

    print("Adding elements to Pipeline \n")
    pipeline.add(source)
    pipeline.add(caps_v4l2src)
    pipeline.add(vidconvsrc)
    pipeline.add(nvvidconvsrc)
    pipeline.add(caps_vidconvsrc)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(sink)
    if is_aarch64():
        pipeline.add(transform)

    print("Linking elements in the Pipeline \n")
    source.link(caps_v4l2src)
    caps_v4l2src.link(vidconvsrc)
    vidconvsrc.link(nvvidconvsrc)
    nvvidconvsrc.link(caps_vidconvsrc)

    srcpad = caps_vidconvsrc.get_static_pad("src")
    sinkpad = streammux.get_request_pad("sink_0")
    srcpad.link(sinkpad)
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)

    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")
    else:
        osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass

    pipeline.set_state(Gst.State.NULL)

if __name__ == '__main__':
    sys.exit(main(sys.argv))

terminal o/p:

Invoked with: <pyds.NvDsObjectMeta object at 0x7f7845f5e0>, ['']
Traceback (most recent call last):
  File "camera_open_wo_ros.py", line 87, in osd_sink_pad_buffer_probe
    obj_meta.obj_label = ["oos"]
TypeError: (): incompatible function arguments. The following argument types are supported:
    1. (arg0: pyds.NvDsObjectMeta, arg1: str) -> None

Invoked with: <pyds.NvDsObjectMeta object at 0x7f7844a420>, ['']
Traceback (most recent call last):
  File "camera_open_wo_ros.py", line 87, in osd_sink_pad_buffer_probe
    obj_meta.obj_label = ["oos"]
TypeError: (): incompatible function arguments. The following argument types are supported:
    1. (arg0: pyds.NvDsObjectMeta, arg1: str) -> None

how can I solve this without touching my purpose?

1.I think modifying obj_label is not a correct approach, obj_label is set by the detector.
Can you share your reasons?

2.The type of obj_meta.obj_label is str, only obj_meta.obj_label = 'oos' can be executed by python vm.

In that case , what can I do to see only one class label out of 6 class and its bboxes in osd?
In the link obj_label is defined an array.

Ok, filtering the target object by obj_meta.obj_label can solve your problem, but you don’t need to modify it.

You misunderstood the documentation. ‘str’ is an array of characters. You can check the type by

print(f'type of label: {type(obj_meta.obj_label)}')

But if I don’t use this, their label names located in the left upper corner of the bbox will not be removed, I tried combinations as follows:

                    obj_meta.obj_label = 'oos'

                    obj_meta.obj_label = None

                    obj_meta.obj_label = ''

all of them gave same terminal o/p that bothers me:

but if it’s not a crucial for the camera or create wrong behavior it’s okay.

obj_label is used to identify the detector result. If you want to erase the osd label, use the following code

obj_meta.text_params.display_text = ''

it just creates black boxes in the frames, that’s not what I want, if it’s not a problem I will tun back to my solution.

I don’t think this conflicts with your requirements.

This is to erase the label in the upper left corner.

The black boxes are unexpected. However, since you are using Jetson Nano, I can’t do anything about it. The new version should work normally.

As a supplement, add filter-out-class-ids=class-id to your nvinfer configuration file, The output will be filter to the fixed class-id.

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.