Color returned in deepstream-test2

• Hardware Platform (Jetson / GPU) - GeForce RTX 2080 Ti
• DeepStream Version - 6.2
• TensorRT Version - 8.5.2
• NVIDIA GPU Driver Version (valid for GPU only) - 535.54.03

I made a back to back script for vehicle-license plate and person-face, for vehicle i want to get the color too, so i used sgie config from deepstream-test2 and i got always 0, 8, 16 or a long integer, for example:

Color: 0 for tracking id: 16
Color: 8 for tracking id: 12
Color: 139710991171592 for tracking id: 13
Color: 16 for tracking id: 3
Color: 0 for tracking id: 2
Color: 16 for tracking id: 1
Color: 0 for tracking id: 4
Color: 16 for tracking id: 10
Color: 16 for tracking id: 7
Color: 121009 for tracking id: 8
Color: 16 for tracking id: 6
Color: 0 for tracking id: 5
Color: 8 for tracking id: 9

this is my method:

def osd_sink_pad_buffer_probe(pad, info, u_data):
    # This time we maintain two separate counters for primary and secondary objects
    primary_obj_counter = {
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_VEHICLE: 0,
    }
    secondary_obj_counter = {
        SGIE_CLASS_ID_FACE: 0,
        SGIE_CLASS_ID_LP: 0,
    }

    frame_number = 0

    # global frame_number
    global SECOND_DETECTOR_IS_SECONDARY
    
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return Gst.PadProbeReturn.OK

    # Retrieve batch metadata from the gst_buffer
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)

        l_obj = frame_meta.obj_meta_list
        source_id = frame_meta.source_id
        frame_number = frame_meta.frame_num

        while l_obj is not None:
            obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            
            if obj_meta.unique_component_id == PRIMARY_DETECTOR_UID:
                if obj_meta.class_id in [PGIE_CLASS_ID_PERSON, PGIE_CLASS_ID_VEHICLE]:
                    # print(obj_meta.object_id)
                    primary_obj_counter[obj_meta.class_id] += 1

                    if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE:
                        vehicle_meta = pyds.NvDsVehicleObject.cast(l_obj)
                        print('Color:', vehicle_meta.color, 'for tracking id:', obj_meta.object_id)
            
            elif obj_meta.unique_component_id == SECONDARY_DETECTOR_UID:
                if obj_meta.class_id in [SGIE_CLASS_ID_FACE, SGIE_CLASS_ID_LP]:
                    secondary_obj_counter[obj_meta.class_id] += 1
                    
                    # A doua conditie este pentru ca detecteaza uneori gresit(poate gasi fata la oglinda masinii, de ex)
                    # print(obj_meta.parent)
                    if obj_meta.class_id == 0 and obj_meta.parent.class_id == 2:
                        print('Face detected at tracking id:', obj_meta.parent.object_id)
                    
                    if obj_meta.class_id == 1 and obj_meta.parent.class_id == 0:
                        print('License plate detected at tracking id:', obj_meta.parent.object_id)
                    # print(obj_meta.parent)
                    # if SECOND_DETECTOR_IS_SECONDARY == 1:
                        # print(obj_meta.parent)
                    #     print("{} found for parent object id={}".format(sgie_classes_str[obj_meta.class_id], obj_meta.parent.object_id))

            l_obj = l_obj.next
        # l_frame = l_frame.next

        print("Source ID:", source_id,
            "Frame Number:", frame_number, 
            "Person Count:", primary_obj_counter[PGIE_CLASS_ID_PERSON], 
            "Vehicle Count:", primary_obj_counter[PGIE_CLASS_ID_VEHICLE],
            "Face Count:", secondary_obj_counter[SGIE_CLASS_ID_FACE],
            "License Plate Count:", secondary_obj_counter[SGIE_CLASS_ID_LP])
        
        # frame_number += 1
        # Update frame rate through this probe
        # stream_index = "stream{0}".format(frame_meta.pad_index)
        # global perf_data
        # perf_data.update_fps(stream_index)
        
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK

I also tried: vehicle_meta = pyds.NvDsVehicleObject.cast(l_obj.data), but returns 0 for every vehicle

NvDsVehicleObject is used for msgconv.

If you want got the label from sgie.

You can replace this function in deepstream_test_2.py with the following code

def osd_sink_pad_buffer_probe(pad,info,u_data):
    frame_number=0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE:0,
        PGIE_CLASS_ID_PERSON:0,
        PGIE_CLASS_ID_BICYCLE:0,
        PGIE_CLASS_ID_ROADSIGN:0
    }
    num_rects=0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj=frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
                l_class = obj_meta.classifier_meta_list
                while l_class is not None:
                    try:
                        class_meta = pyds.NvDsClassifierMeta.cast(l_class.data)
                        if class_meta is not None:
                            if class_meta.unique_component_id == 2:
                                l_label = class_meta.label_info_list
                                while l_label is not None:
                                    try:
                                        label = pyds.NvDsLabelInfo.cast(l_label.data)
                                        if label is not None:
                                            #if label.label_id == 0 and label.result_class_id == 1:
                                            print("color label {} ".format(label.result_label))
                                    except StopIteration:
                                        break
                                    try:
                                        l_label = l_label.next
                                    except StopIteration:
                                        break
                    except StopIteration:
                        break
                    try:
                        l_class = l_class.next
                    except StopIteration:
                        break
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            try: 
                l_obj=l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame=l_frame.next
        except StopIteration:
            break
    #past traking meta data
    if(past_tracking_meta[0]==1):
        l_user=batch_meta.batch_user_meta_list
        while l_user is not None:
            try:
                # Note that l_user.data needs a cast to pyds.NvDsUserMeta
                # The casting is done by pyds.NvDsUserMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone
                user_meta=pyds.NvDsUserMeta.cast(l_user.data)
            except StopIteration:
                break
            if(user_meta and user_meta.base_meta.meta_type==pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META):
                try:
                    # Note that user_meta.user_meta_data needs a cast to pyds.NvDsPastFrameObjBatch
                    # The casting is done by pyds.NvDsPastFrameObjBatch.cast()
                    # The casting also keeps ownership of the underlying memory
                    # in the C code, so the Python garbage collector will leave
                    # it alone
                    pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast(user_meta.user_meta_data)
                except StopIteration:
                    break
                for trackobj in pyds.NvDsPastFrameObjBatch.list(pPastFrameObjBatch):
                    print("streamId=",trackobj.streamID)
                    print("surfaceStreamID=",trackobj.surfaceStreamID)
                    for pastframeobj in pyds.NvDsPastFrameObjStream.list(trackobj):
                        print("numobj=",pastframeobj.numObj)
                        print("uniqueId=",pastframeobj.uniqueId)
                        print("classId=",pastframeobj.classId)
                        print("objLabel=",pastframeobj.objLabel)
                        for objlist in pyds.NvDsPastFrameObjList.list(pastframeobj):
                            print('frameNum:', objlist.frameNum)
                            print('tBbox.left:', objlist.tBbox.left)
                            print('tBbox.width:', objlist.tBbox.width)
                            print('tBbox.top:', objlist.tBbox.top)
                            print('tBbox.right:', objlist.tBbox.height)
                            print('confidence:', objlist.confidence)
                            print('age:', objlist.age)
            try:
                l_user=l_user.next
            except StopIteration:
                break
    return Gst.PadProbeReturn.OK	

it works, but i have an issue if i use sgie for color and model type(as in deepstream-test2), i always got undetected for model and sometimes the model type instead of color

Source ID: 0 Frame Number: 89 Person Count: 8 Vehicle Count: 11 Face Count: 2 License Plate Count: 5
License plate detected for vehicle with tracking id: 15, color: hyundai, model: undetected
License plate detected for vehicle with tracking id: 15, color: hyundai, model: undetected
License plate detected for vehicle with tracking id: 13, color: grey, model: undetected
License plate detected for vehicle with tracking id: 9, color: silver, model: undetected
License plate detected for vehicle with tracking id: 33, color: silver, model: undetected

This is my code:

def osd_sink_pad_buffer_probe(pad, info, u_data):
    # This time we maintain two separate counters for primary and secondary objects
    primary_obj_counter = {
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_VEHICLE: 0,
    }
    secondary_obj_counter = {
        SGIE_CLASS_ID_FACE: 0,
        SGIE_CLASS_ID_LP: 0,
    }

    frame_number = 0

    # global frame_number
    global SECOND_DETECTOR_IS_SECONDARY
    
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return Gst.PadProbeReturn.OK

    # Retrieve batch metadata from the gst_buffer
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)

        l_obj = frame_meta.obj_meta_list
        source_id = frame_meta.source_id
        frame_number = frame_meta.frame_num

        while l_obj is not None:
            obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            l_class = obj_meta.classifier_meta_list
            
            if obj_meta.unique_component_id == PRIMARY_DETECTOR_UID:
                if obj_meta.class_id in [PGIE_CLASS_ID_PERSON, PGIE_CLASS_ID_VEHICLE]:
                    # print(obj_meta.object_id)
                    primary_obj_counter[obj_meta.class_id] += 1

                    # if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE and l_class is not None:
                    #     class_meta = pyds.NvDsClassifierMeta.cast(l_class.data)
                    #     l_label = class_meta.label_info_list
                    #     label = pyds.NvDsLabelInfo.cast(l_label.data)
                    #     color = label.result_label
                        
            elif obj_meta.unique_component_id == SECONDARY_DETECTOR_UID:
                if obj_meta.class_id in [SGIE_CLASS_ID_FACE, SGIE_CLASS_ID_LP]:
                    secondary_obj_counter[obj_meta.class_id] += 1
                    
                    # A doua conditie este pentru ca detecteaza uneori gresit(poate gasi fata la oglinda masinii, de ex)
                    if obj_meta.class_id == 0 and obj_meta.parent.class_id == 2:
                        print('Face detected at tracking id:', obj_meta.parent.object_id)
                    
                    if obj_meta.class_id == 1 and obj_meta.parent.class_id == 0:
                        l_class_parent = obj_meta.parent.classifier_meta_list

                        if l_class_parent is not None:
                            class_meta_parent = pyds.NvDsClassifierMeta.cast(l_class_parent.data)
                            l_label_parent = class_meta_parent.label_info_list
                            
                            label_color = pyds.NvDsLabelInfo.cast(l_label_parent.data)
                            color = label_color.result_label if label_color.result_label else 'undetected'

                            if l_label_parent.next is not None:
                                label_model = pyds.NvDsLabelInfo.cast(l_label_parent.next.data)
                                model_type = label_model.result_label if label_model.result_label else 'undetected'
                            else:
                                model_type = 'undetected'

                        print('License plate detected for vehicle with tracking id: {}, color: {}, model: {}'.format(obj_meta.parent.object_id, color, model_type))

            l_obj = l_obj.next

        print("Source ID:", source_id,
            "Frame Number:", frame_number, 
            "Person Count:", primary_obj_counter[PGIE_CLASS_ID_PERSON], 
            "Vehicle Count:", primary_obj_counter[PGIE_CLASS_ID_VEHICLE],
            "Face Count:", secondary_obj_counter[SGIE_CLASS_ID_FACE],
            "License Plate Count:", secondary_obj_counter[SGIE_CLASS_ID_LP])
        
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(sgie1)
    # pipeline.add(trackersec)
    pipeline.add(sgie2)
    pipeline.add(sgie3)
    pipeline.add(tiler)
    pipeline.add(nvvideoconvert)
    pipeline.add(nvosd)
    pipeline.add(sink)

    # We link elements in the following order:
    # sourcebin -> streammux -> nvinfer -> nvtracker -> nvdsanalytics ->
    # nvtiler -> nvvideoconvert -> nvdsosd -> (if aarch64, transform ->) sink
    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(sgie1)
    # sgie1.link(trackersec)
    sgie1.link(sgie2)
    sgie2.link(sgie3)
    sgie3.link(tiler)
    # sgie2.link(sgie3)
    # sgie2.link(tiler)
    tiler.link(nvvideoconvert)
    nvvideoconvert.link(nvosd)
    nvosd.link(sink)

In deepstream-test2, there is only one pgie, and the results of all sgie are stored in classifier_meta_list.

If you want to get carcolor and carmaker at the same time.Just like

 if class_meta.unique_component_id == 2:
   pass
elif class_meta.unique_component_id == 3:
   pass

2 and 3 respectively represent the id of sgie. that is gie-unique-id=2 and gie-unique-id=3 in the configuration file

Below is the full code.

def osd_sink_pad_buffer_probe(pad,info,u_data):
    frame_number=0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE:0,
        PGIE_CLASS_ID_PERSON:0,
        PGIE_CLASS_ID_BICYCLE:0,
        PGIE_CLASS_ID_ROADSIGN:0
    }
    num_rects=0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj=frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
                l_class = obj_meta.classifier_meta_list
                while l_class is not None:
                    try:
                        class_meta = pyds.NvDsClassifierMeta.cast(l_class.data)
                        if class_meta is not None:
                            if class_meta.unique_component_id == 2:
                                l_label = class_meta.label_info_list
                                while l_label is not None:
                                    try:
                                        label = pyds.NvDsLabelInfo.cast(l_label.data)
                                        if label is not None:
                                            #if label.label_id == 0 and label.result_class_id == 1:
                                            print("color {} ".format(label.result_label))
                                    except StopIteration:
                                        break
                                    try:
                                        l_label = l_label.next
                                    except StopIteration:
                                        break
                            elif class_meta.unique_component_id == 3:
                                l_label = class_meta.label_info_list
                                while l_label is not None:
                                    try:
                                        label = pyds.NvDsLabelInfo.cast(l_label.data)
                                        if label is not None:
                                            #if label.label_id == 0 and label.result_class_id == 1:
                                            print("car make {} ".format(label.result_label))
                                    except StopIteration:
                                        break
                                    try:
                                        l_label = l_label.next
                                    except StopIteration:
                                        break
                    except StopIteration:
                        break
                    try:
                        l_class = l_class.next
                    except StopIteration:
                        break
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            try: 
                l_obj=l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame=l_frame.next
        except StopIteration:
            break
    #past traking meta data
    if(past_tracking_meta[0]==1):
        l_user=batch_meta.batch_user_meta_list
        while l_user is not None:
            try:
                # Note that l_user.data needs a cast to pyds.NvDsUserMeta
                # The casting is done by pyds.NvDsUserMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone
                user_meta=pyds.NvDsUserMeta.cast(l_user.data)
            except StopIteration:
                break
            if(user_meta and user_meta.base_meta.meta_type==pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META):
                try:
                    # Note that user_meta.user_meta_data needs a cast to pyds.NvDsPastFrameObjBatch
                    # The casting is done by pyds.NvDsPastFrameObjBatch.cast()
                    # The casting also keeps ownership of the underlying memory
                    # in the C code, so the Python garbage collector will leave
                    # it alone
                    pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast(user_meta.user_meta_data)
                except StopIteration:
                    break
                for trackobj in pyds.NvDsPastFrameObjBatch.list(pPastFrameObjBatch):
                    print("streamId=",trackobj.streamID)
                    print("surfaceStreamID=",trackobj.surfaceStreamID)
                    for pastframeobj in pyds.NvDsPastFrameObjStream.list(trackobj):
                        print("numobj=",pastframeobj.numObj)
                        print("uniqueId=",pastframeobj.uniqueId)
                        print("classId=",pastframeobj.classId)
                        print("objLabel=",pastframeobj.objLabel)
                        for objlist in pyds.NvDsPastFrameObjList.list(pastframeobj):
                            print('frameNum:', objlist.frameNum)
                            print('tBbox.left:', objlist.tBbox.left)
                            print('tBbox.width:', objlist.tBbox.width)
                            print('tBbox.top:', objlist.tBbox.top)
                            print('tBbox.right:', objlist.tBbox.height)
                            print('confidence:', objlist.confidence)
                            print('age:', objlist.age)
            try:
                l_user=l_user.next
            except StopIteration:
                break
    return Gst.PadProbeReturn.OK	

it cant detect both color and model type, somehow model type overwrites color, this is a part of my terminal:

License plate detected for vehicle with tracking id: 34, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 34, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 34, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 46, color: brown, model: undetected
License plate detected for vehicle with tracking id: 34, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 46, color: brown, model: undetected
License plate detected for vehicle with tracking id: 34, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 46, color: brown, model: undetected
License plate detected for vehicle with tracking id: 34, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 34, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 46, color: brown, model: undetected
License plate detected for vehicle with tracking id: 34, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 46, color: brown, model: undetected
License plate detected for vehicle with tracking id: 34, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 34, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 40, color: orange, model: undetected
License plate detected for vehicle with tracking id: 34, color: undetected, model: volkswagen

and my code:

def osd_sink_pad_buffer_probe(pad, info, u_data):
    # This time we maintain two separate counters for primary and secondary objects
    primary_obj_counter = {
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_VEHICLE: 0,
    }
    secondary_obj_counter = {
        SGIE_CLASS_ID_FACE: 0,
        SGIE_CLASS_ID_LP: 0,
    }

    frame_number = 0

    # global frame_number
    global SECOND_DETECTOR_IS_SECONDARY
    
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return Gst.PadProbeReturn.OK

    # Retrieve batch metadata from the gst_buffer
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)

        l_obj = frame_meta.obj_meta_list
        source_id = frame_meta.source_id
        frame_number = frame_meta.frame_num

        while l_obj is not None:
            obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            l_class = obj_meta.classifier_meta_list
            
            if obj_meta.unique_component_id == PRIMARY_DETECTOR_UID:
                if obj_meta.class_id in [PGIE_CLASS_ID_PERSON, PGIE_CLASS_ID_VEHICLE]:
                    # print(obj_meta.object_id)
                    primary_obj_counter[obj_meta.class_id] += 1

                    # if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE and l_class is not None:
                    #     class_meta = pyds.NvDsClassifierMeta.cast(l_class.data)
                    #     l_label = class_meta.label_info_list
                    #     label = pyds.NvDsLabelInfo.cast(l_label.data)
                    #     color = label.result_label
                        
            elif obj_meta.unique_component_id == SECONDARY_DETECTOR_UID:
                if obj_meta.class_id in [SGIE_CLASS_ID_FACE, SGIE_CLASS_ID_LP]:
                    secondary_obj_counter[obj_meta.class_id] += 1
                    
                    # A doua conditie este pentru ca detecteaza uneori gresit(poate gasi fata la oglinda masinii, de ex)
                    if obj_meta.class_id == 0 and obj_meta.parent.class_id == 2:
                        print('Face detected at tracking id:', obj_meta.parent.object_id)
                    
                    if obj_meta.class_id == 1 and obj_meta.parent.class_id == 0:
                        l_class_parent = obj_meta.parent.classifier_meta_list
                        
                        color = 'undetected'
                        model_type = 'undetected'

                        if l_class_parent is not None:
                            class_meta_parent = pyds.NvDsClassifierMeta.cast(l_class_parent.data)

                            if class_meta_parent.unique_component_id == 3:
                                l_label_parent = class_meta_parent.label_info_list
                                label_color = pyds.NvDsLabelInfo.cast(l_label_parent.data)
                                color = label_color.result_label if label_color.result_label else 'undetected'

                            if class_meta_parent.unique_component_id == 4:
                                l_label_parent = class_meta_parent.label_info_list
                                label_model_type = pyds.NvDsLabelInfo.cast(l_label_parent.data)
                                model_type = label_model_type.result_label if label_model_type.result_label else 'undetected'

                        print('License plate detected for vehicle with tracking id: {}, color: {}, model: {}'.format(obj_meta.parent.object_id, color, model_type))

            l_obj = l_obj.next

        # print("Source ID:", source_id,
        #     "Frame Number:", frame_number, 
        #     "Person Count:", primary_obj_counter[PGIE_CLASS_ID_PERSON], 
        #     "Vehicle Count:", primary_obj_counter[PGIE_CLASS_ID_VEHICLE],
        #     "Face Count:", secondary_obj_counter[SGIE_CLASS_ID_FACE],
        #     "License Plate Count:", secondary_obj_counter[SGIE_CLASS_ID_LP])
        
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK

Maybe there are some errors in your pipeline or configuration.

I tried to use two sgies with back to back,It can work normally.

Here is my code, you can refer to it.
back-to-back.tar.gz (7.7 KB)

I tried your code and configs, but have same issue, cant detect both color and type, example from terminal:

License plate detected for vehicle with tracking id: 138, color: silver, model: undetected
License plate detected for vehicle with tracking id: 141, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 135, color: white, model: undetected
License plate detected for vehicle with tracking id: 138, color: silver, model: undetected
License plate detected for vehicle with tracking id: 141, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 135, color: white, model: undetected
License plate detected for vehicle with tracking id: 138, color: silver, model: undetected
License plate detected for vehicle with tracking id: 141, color: undetected, model: volkswagen
License plate detected for vehicle with tracking id: 135, color: white, model: undetected
License plate detected for vehicle with tracking id: 138, color: silver, model: undetected
License plate detected for vehicle with tracking id: 141, color: undetected, model: volkswagen

if both are detected, it prints only model and color remains undetected, because gie-unique-id is 3 and ignores 2

This is my code also:

#!/usr/bin/env python3

################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################

import sys
sys.path.append('../')
import gi
import configparser
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib
from gi.repository import GLib
from ctypes import *
import time
import sys
import math
import random
import platform
from common.is_aarch_64 import is_aarch64
import argparse
from common.FPS import PERF_DATA
from common.bus_call import bus_call

import pyds

perf_data = None

OSD_PROCESS_MODE = 0
OSD_DISPLAY_TEXT = 1
MAX_DISPLAY_LEN=64
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
SGIE_CLASS_ID_FACE = 0
SGIE_CLASS_ID_LP = 1
PRIMARY_DETECTOR_UID = 1
SECONDARY_DETECTOR_UID = 2
SECOND_DETECTOR_IS_SECONDARY = 1
MUXER_OUTPUT_WIDTH=1920
MUXER_OUTPUT_HEIGHT=1080
MUXER_BATCH_TIMEOUT_USEC=4000000
TILED_OUTPUT_WIDTH=1280
TILED_OUTPUT_HEIGHT=720
GPU_ID = 0
MAX_NUM_SOURCES = 1
SINK_ELEMENT = "nveglglessink"
PGIE_CONFIG_FILE = "dstest2_pgie_config.txt"
TRACKER_CONFIG_FILE = "dstest2_tracker_config2.txt"

SGIE1_CONFIG_FILE = "dstest2_sgie1_config_license_face.txt"
SGIE2_CONFIG_FILE = "dstest2_sgie1_config.txt"
SGIE3_CONFIG_FILE = "dstest2_sgie2_config.txt"

CONFIG_GPU_ID = "gpu-id"
CONFIG_GROUP_TRACKER = "tracker"
CONFIG_GROUP_TRACKER_WIDTH = "tracker-width"
CONFIG_GROUP_TRACKER_HEIGHT = "tracker-height"
CONFIG_GROUP_TRACKER_LL_CONFIG_FILE = "ll-config-file"
CONFIG_GROUP_TRACKER_LL_LIB_FILE = "ll-lib-file"
CONFIG_GROUP_TRACKER_ENABLE_BATCH_PROCESS = "enable-batch-process"

g_num_sources = 0
g_source_id_list = [0] * MAX_NUM_SOURCES
g_eos_list = [False] * MAX_NUM_SOURCES
g_source_enabled = [False] * MAX_NUM_SOURCES
g_source_bin_list = [None] * MAX_NUM_SOURCES

# pgie_classes_str= ["Vehicle", "TwoWheeler", "Person","RoadSign"]
# sgie_classes_str = ["Face", "License Plate"]
# frame_number = 0

uri = ""

loop = None
pipeline = None
streammux = None
sink = None
pgie = None
sgie1 = None
sgie2 = None
sgie3 = None
nvvideoconvert = None
nvosd = None
tiler = None
tracker = None
trackersec = None

def osd_sink_pad_buffer_probe(pad, info, u_data):
    # This time we maintain two separate counters for primary and secondary objects
    primary_obj_counter = {
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_VEHICLE: 0,
    }
    secondary_obj_counter = {
        SGIE_CLASS_ID_FACE: 0,
        SGIE_CLASS_ID_LP: 0,
    }

    frame_number = 0
    
    # global frame_number
    global SECOND_DETECTOR_IS_SECONDARY
    
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return Gst.PadProbeReturn.OK

    # Retrieve batch metadata from the gst_buffer
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)

        l_obj = frame_meta.obj_meta_list
        source_id = frame_meta.source_id
        frame_number = frame_meta.frame_num

        while l_obj is not None:
            obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            l_class = obj_meta.classifier_meta_list
            
            if obj_meta.unique_component_id == PRIMARY_DETECTOR_UID:
                if obj_meta.class_id in [PGIE_CLASS_ID_PERSON, PGIE_CLASS_ID_VEHICLE]:
                    # print(obj_meta.object_id)
                    primary_obj_counter[obj_meta.class_id] += 1

                    # if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE and l_class is not None:
                    #     class_meta = pyds.NvDsClassifierMeta.cast(l_class.data)
                    #     l_label = class_meta.label_info_list
                    #     label = pyds.NvDsLabelInfo.cast(l_label.data)
                    #     color = label.result_label
                        
            elif obj_meta.unique_component_id == SECONDARY_DETECTOR_UID:
                if obj_meta.class_id in [SGIE_CLASS_ID_FACE, SGIE_CLASS_ID_LP]:
                    secondary_obj_counter[obj_meta.class_id] += 1
                    
                    # A doua conditie este pentru ca detecteaza uneori gresit(poate gasi fata la oglinda masinii, de ex)
                    # if obj_meta.class_id == 0 and obj_meta.parent.class_id == 2:
                    #     print('Face detected at tracking id:', obj_meta.parent.object_id)
                    
                    if obj_meta.class_id == 1 and obj_meta.parent.class_id == 0:
                        l_class_parent = obj_meta.parent.classifier_meta_list
                        
                        color = 'undetected'
                        model_type = 'undetected'

                        if l_class_parent is not None:
                            class_meta_parent = pyds.NvDsClassifierMeta.cast(l_class_parent.data)

                            if class_meta_parent.unique_component_id == 3:
                                l_label_parent = class_meta_parent.label_info_list
                                label_color = pyds.NvDsLabelInfo.cast(l_label_parent.data)
                                color = label_color.result_label if label_color.result_label else 'undetected'
                                
                            if class_meta_parent.unique_component_id == 4:
                                l_label_parent = class_meta_parent.label_info_list
                                label_model_type = pyds.NvDsLabelInfo.cast(l_label_parent.data)
                                model_type = label_model_type.result_label if label_model_type.result_label else 'undetected'

                            # l_class_parent = l_class_parent.next

                        print('License plate detected for vehicle with tracking id: {}, color: {}, model: {}'.format(obj_meta.parent.object_id, color, model_type))

            l_obj = l_obj.next

        # print("Source ID:", source_id,
        #     "Frame Number:", frame_number, 
        #     "Person Count:", primary_obj_counter[PGIE_CLASS_ID_PERSON], 
        #     "Vehicle Count:", primary_obj_counter[PGIE_CLASS_ID_VEHICLE],
        #     "Face Count:", secondary_obj_counter[SGIE_CLASS_ID_FACE],
        #     "License Plate Count:", secondary_obj_counter[SGIE_CLASS_ID_LP])
        
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK

def cb_newpad(decodebin, decoder_src_pad, data):
    """
    The function is called when a new pad is created by the decodebin. 
    The function checks if the new pad is for video and not audio. 
    If the new pad is for video, the function checks if the pad caps contain NVMM memory features. 
    If the pad caps contain NVMM memory features, the function links the decodebin pad to the source bin
    ghost pad. 
    If the pad caps do not contain NVMM memory features, the function prints an error message.
    :param decodebin: The decodebin element that is creating the new pad
    :param decoder_src_pad: The source pad created by the decodebin element
    :param data: This is the data that was passed to the callback function. In this case, it is the
    source_bin
    """
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)

    # Need to check if the pad created by the decodebin is for video and not
    # audio.
    print("gstname=", gstname)
    if gstname.find("video") != -1:
        # Link the decodebin pad only if decodebin has picked nvidia
        # decoder plugin nvdec_*. We do this by checking if the pad caps contain
        # NVMM memory features.
        print("features=", features)
        if features.contains("memory:NVMM"):
            # Get the source bin ghost pad
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write(
                    "Failed to link decoder src pad to source bin ghost pad\n"
                )
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")


def decodebin_child_added(child_proxy, Object, name, user_data):
    """
    If the child added to the decodebin is another decodebin, connect to its child-added signal. If the
    child added is a source, set its drop-on-latency property to True.
    
    :param child_proxy: The child element that was added to the decodebin
    :param Object: The object that emitted the signal
    :param name: The name of the element that was added
    :param user_data: This is a pointer to the data that you want to pass to the callback function
    """
    print("Decodebin child added:", name, "\n")
    if name.find("decodebin") != -1:
        Object.connect("child-added", decodebin_child_added, user_data)

    if "source" in name:
        source_element = child_proxy.get_by_name("source")
        if source_element.find_property("drop-on-latency") != None:
            Object.set_property("drop-on-latency", True)


def create_source_bin(index, uri):
    """
    It creates a GstBin, adds a uridecodebin to it, and connects the uridecodebin's pad-added signal to
    a callback function
    
    :param index: The index of the source bin
    :param uri: The URI of the video file to be played
    :return: A bin with a uri decode bin and a ghost pad.
    """
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    # Source element for reading from the uri.
    # We will use decodebin and let it figure out the container format of the
    # stream and the codec and plug the appropriate demux and decode plugins.
    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    # We set the input uri to the source element
    uri_decode_bin.set_property("uri", uri)
    # Connect to the "pad-added" signal of the decodebin which generates a
    # callback once a new pad for raw data has beed created by the decodebin
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)

    # We need to create a ghost pad for the source bin which will act as a proxy
    # for the video decoder src pad. The ghost pad will not have a target right
    # now. Once the decode bin creates the video decoder and generates the
    # cb_newpad callback, we will set the ghost pad target to the video decoder
    # src pad.
    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin


def make_element(element_name, i):
    """
    Creates a Gstreamer element with unique name
    Unique name is created by adding element type and index e.g. `element_name-i`
    Unique name is essential for all the element in pipeline otherwise gstreamer will throw exception.
    :param element_name: The name of the element to create
    :param i: the index of the element in the pipeline
    :return: A Gst.Element object
    """
    element = Gst.ElementFactory.make(element_name, element_name)
    if not element:
        sys.stderr.write(" Unable to create {0}".format(element_name))
    element.set_property("name", "{0}-{1}".format(element_name, str(i)))
    return element


def main(args, requested_pgie=None, config=None, disable_probe=False):
    global g_num_sources
    global g_source_bin_list
    global uri

    global loop
    global pipeline
    global streammux
    global sink
    global pgie
    global sgie1
    global sgie2
    global sgie3
    global nvvideoconvert
    global nvosd
    global tiler
    global tracker
    global trackersec

    input_sources = args
    number_sources = len(input_sources)
    global perf_data
    perf_data = PERF_DATA(number_sources)

    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin ", i, " \n ")
        uri_name = input_sources[i]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)

    # queue1 = Gst.ElementFactory.make("queue", "queue1")
    # pipeline.add(queue1)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    print("Creating nvtracker \n ")
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")
    
    print("Creating nvtracker2 \n ")
    trackersec = Gst.ElementFactory.make("nvtracker", "trackersec")
    if not trackersec:
        sys.stderr.write(" Unable to create tracker \n")

    print("Creating sgie1 \n ")
    sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine")
    if not sgie1:
        sys.stderr.write(" Unable to make sgie1 \n")

    print("Creating sgie2 \n ")
    sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine")
    if not sgie2:
        sys.stderr.write(" Unable to make sgie2 \n")

    print("Creating sgie3 \n ")
    sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine")
    if not sgie3:
        sys.stderr.write(" Unable to make sgie3 \n")

    print("Creating tiler \n ")
    tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")

    print("Creating nvvidconv \n ")
    nvvideoconvert = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvideoconvert:
        sys.stderr.write(" Unable to create nvvidconv \n")

    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    
    if is_aarch64():
        print("Creating nv3dsink \n")
        sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink")
        if not sink:
            sys.stderr.write(" Unable to create nv3dsink \n")
    else:
        print("Creating EGLSink \n")
        sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
        if not sink:
            sys.stderr.write(" Unable to create egl sink \n")
    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    print("Creating nvstreamdemux \n ")
    nvstreamdemux = Gst.ElementFactory.make("nvstreamdemux", "nvstreamdemux")
    if not nvstreamdemux:
        sys.stderr.write(" Unable to create nvstreamdemux \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property("live-source", 1)

    streammux.set_property("width", 960)
    streammux.set_property("height", 540)
    streammux.set_property("batch-size", number_sources)
    streammux.set_property("batched-push-timeout", 4000000)
 
    pgie.set_property("config-file-path", PGIE_CONFIG_FILE)
    pgie.set_property("unique-id", PRIMARY_DETECTOR_UID)
    pgie_batch_size = pgie.get_property("batch-size")
 
    if pgie_batch_size != number_sources:
        print(
            "WARNING: Overriding infer-config batch-size",
            pgie_batch_size,
            " with number of sources ",
            number_sources,
            " \n",
        )
        pgie.set_property("batch-size", number_sources)

    sgie1.set_property('config-file-path', SGIE1_CONFIG_FILE)
    sgie1.set_property("unique-id", SECONDARY_DETECTOR_UID)
    sgie1.set_property("process-mode", 2 if SECOND_DETECTOR_IS_SECONDARY else 1)

    sgie2.set_property('config-file-path', SGIE2_CONFIG_FILE)
    sgie2.set_property("unique-id", 3)
    sgie2.set_property("process-mode", 2)
    
    sgie3.set_property('config-file-path', SGIE3_CONFIG_FILE)
    sgie3.set_property("unique-id", 4)
    sgie3.set_property("process-mode", 2)

    config = configparser.ConfigParser()
    config.read(TRACKER_CONFIG_FILE)
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
            trackersec.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
            trackersec.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
            trackersec.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
            trackersec.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
            trackersec.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process' :
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)
            trackersec.set_property('enable_batch_process', tracker_enable_batch_process)
    
    tiler_rows=int(math.sqrt(number_sources))
    tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows))
    tiler.set_property("rows",tiler_rows)
    tiler.set_property("columns",tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    #Set gpu IDs of tiler, nvvideoconvert, and nvosd
    tiler.set_property("gpu_id", GPU_ID)
    nvvideoconvert.set_property("gpu_id", GPU_ID)
    nvosd.set_property("gpu_id", GPU_ID)

    #Set gpu ID of sink if not aarch64
    if(not is_aarch64()):
        sink.set_property("gpu_id", GPU_ID)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(sgie1)
    # pipeline.add(trackersec)
    pipeline.add(sgie2)
    pipeline.add(sgie3)
    pipeline.add(tiler)
    pipeline.add(nvvideoconvert)
    pipeline.add(nvosd)
    pipeline.add(sink)

    # We link elements in the following order:
    # sourcebin -> streammux -> nvinfer -> nvtracker -> nvdsanalytics ->
    # nvtiler -> nvvideoconvert -> nvdsosd -> (if aarch64, transform ->) sink
    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(sgie1)
    # sgie1.link(trackersec)
    sgie1.link(sgie2)
    sgie2.link(sgie3)
    sgie3.link(tiler)
    # sgie2.link(sgie3)
    # sgie2.link(tiler)
    tiler.link(nvvideoconvert)
    nvvideoconvert.link(nvosd)
    nvosd.link(sink)

    print("Linking elements in the Pipeline \n")
    sink.set_property("sync", 0)
    sink.set_property("qos",0)
    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)
    pgie_src_pad = tiler.get_static_pad("sink")
    if not pgie_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        pgie_src_pad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
        # perf callback function to print fps every 5 sec
        GLib.timeout_add(5000, perf_data.perf_print_callback)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(input_sources):
        print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

def parse_args():
    parser = argparse.ArgumentParser(prog="deepstream_demux_multi_in_multi_out.py", 
        description="deepstream-demux-multi-in-multi-out takes multiple URI streams as input" \
            "and uses `nvstreamdemux` to split batches and output separate buffer/streams")
    parser.add_argument(
        "-i",
        "--input",
        help="Path to input streams",
        nargs="+",
        metavar="URIs",
        default=["a"],
        required=True,
    )

    args = parser.parse_args()
    stream_paths = args.input
    return stream_paths

if __name__ == "__main__":
    stream_paths = parse_args()
    sys.exit(main(stream_paths))

What is the value of opera-on-gie-id in the files dstest2_sgie1_config.txt and dstest2_sgie2_config.txt?

It should be 1 if not changed. This means that both sgie2 and sgie3 will operate on pgie’s metadata.

However in your code, you are dealing with metadata on the secondary detector.

My sample code gives the correct sample。

both are operate-on-gie-id=1

[property]
gpu-id=0
net-scale-factor=1
model-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarColor/resnet18.caffemodel
proto-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarColor/resnet18.prototxt
model-engine-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarColor/resnet18.caffemodel_b16_gpu0_int8.engine
mean-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarColor/mean.ppm
labelfile-path=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarColor/labels.txt
int8-calib-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarColor/cal_trt.bin
force-implicit-batch-dim=1
batch-size=16
# 0=FP32 and 1=INT8 mode
network-mode=1
input-object-min-width=64
input-object-min-height=64
process-mode=2
model-color-format=1
gpu-id=0
gie-unique-id=3
operate-on-gie-id=1
operate-on-class-ids=0
is-classifier=1
output-blob-names=predictions/Softmax
classifier-async-mode=1
classifier-threshold=0.51
process-mode=2
#scaling-filter=0
#scaling-compute-hw=0
[property]
gpu-id=0
net-scale-factor=1
model-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarMake/resnet18.caffemodel
proto-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarMake/resnet18.prototxt
model-engine-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarMake/resnet18.caffemodel_b16_gpu0_int8.engine
mean-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarMake/mean.ppm
labelfile-path=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarMake/labels.txt
int8-calib-file=/opt/nvidia/deepstream/deepstream/samples/models/Secondary_CarMake/cal_trt.bin
force-implicit-batch-dim=1
batch-size=16
# 0=FP32 and 1=INT8 mode
network-mode=1
input-object-min-width=64
input-object-min-height=64
process-mode=2
model-color-format=1
gpu-id=0
gie-unique-id=4
operate-on-gie-id=1
operate-on-class-ids=0
is-classifier=1
output-blob-names=predictions/Softmax
classifier-async-mode=1
classifier-threshold=0.51
process-mode=2
#scaling-filter=0
#scaling-compute-hw=0

I think its from pipeline

if obj_meta.unique_component_id == PRIMARY_DETECTOR_UID:
    if obj_meta.class_id in [PGIE_CLASS_ID_PERSON, PGIE_CLASS_ID_VEHICLE]:
        # print(obj_meta.object_id)
        primary_obj_counter[obj_meta.class_id] += 1

        # if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE and l_class is not None:
        #     class_meta = pyds.NvDsClassifierMeta.cast(l_class.data)
        #     l_label = class_meta.label_info_list
        #     label = pyds.NvDsLabelInfo.cast(l_label.data)
        #     color = label.result_label
        
        l_class = obj_meta.classifier_meta_list

        color = 'undetected'
        model_type = 'undetected'

        if l_class is not None:
            while l_class is not None:
                try:
                    class_meta = pyds.NvDsClassifierMeta.cast(l_class.data)

                    if class_meta.unique_component_id == 3:
                        l_label_parent = class_meta.label_info_list
                        label_color = pyds.NvDsLabelInfo.cast(l_label_parent.data)
                        color = label_color.result_label if label_color.result_label else 'undetected'

                    if class_meta.unique_component_id == 4:
                        l_label_parent = class_meta.label_info_list
                        label_model_type = pyds.NvDsLabelInfo.cast(l_label_parent.data)
                        model_type = label_model_type.result_label if label_model_type.result_label else 'undetected'
                except 
                    StopIteration:
                    break
                try:
                    l_class = l_class.next
                except StopIteration:
                    break
            print(' color: {}, model: {}'.format(color, model_type))
        
elif obj_meta.unique_component_id == SECONDARY_DETECTOR_UID:
    if obj_meta.class_id in [SGIE_CLASS_ID_FACE, SGIE_CLASS_ID_LP]:
        secondary_obj_counter[obj_meta.class_id] += 1
    
    # A doua conditie este pentru ca detecteaza uneori gresit(poate gasi fata la oglinda masinii, de ex)
    if obj_meta.class_id == 0 and obj_meta.parent.class_id == 2:
        print('Face detected at tracking id:', obj_meta.parent.object_id)

Replace your code in the corresponding place

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.