Unable to get result of secondary1-nvinference-engine and secondary2-nvinference-engine on each frame

Here is my sample code

                      obj_meta2=pyds.glist_get_nvds_object_meta(l_obj.data)
                        l_classifier= obj_meta2.classifier_meta_list
                        print("Step 6 ",l_classifier.data)
                        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
                        display_meta.num_labels = 1
                        Tracker_id = obj_meta2.object_id
                        print("================  frame_meta.frame_num  ======================",frame_meta.frame_num)
                        print("================  Tracker_id  ======================",Tracker_id)    
                        if l_classifier is not None:
                            classifier_meta=pyds.glist_get_nvds_classifier_meta(l_classifier.data)
                            l_label=classifier_meta.label_info_list
                            uid=classifier_meta.unique_component_id
                            numLabel=classifier_meta.num_labels
                            label_info=pyds.glist_get_nvds_label_info(l_label.data)
                            classifier_class = label_info.result_class_id
                            num_classes = label_info.num_classes
                            label_id = label_info.label_id
                            result_prob = label_info.result_prob
                            
                            print("1 l_label         :",l_label)
                            print("1 u id      ------------  :",uid)
                            print("1 numLabel        :",numLabel)
                            print("1 label_info      :",label_info)
                            print("1 classifier_class:",classifier_class)
                            print("1 num_classes     :",num_classes)
                            print("1 label_id        :",label_id)

@thakur.sandeep.srs

Could you please show us clearer information about what you are handling with, such as

  1. what task you are using DeepStream pipeline for,
  2. what inference model you are dealing with,
  3. what configurations you have done already, and finally
  4. what problems you have met?

Thank you very much.

Thanks @ersheng
My problem is I am not getting both classifier result on single frame

below is my gst pipeline linking

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):

        #os.mkdir("/root/Smarg/Gateway_11May/Gateway_11May/Images/stream_"+str(i))
        self.frame_count["stream_"+str(i)]=0
        self.saved_count["stream_"+str(i)]=0
        
        print("Creating source_bin ",i," \n ")
        uri_name=args[i]
        if uri_name.find("rtsp://") == 0 :
            is_live = True
        source_bin=self.create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname="sink_%u" %i
        sinkpad= streammux.get_request_pad(padname) 
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad=source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    # which is easier to work with in Python.
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine")
    if not sgie1:
        sys.stderr.write(" Unable to make sgie1 \n")

    sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine")
    if not sgie2:
        sys.stderr.write(" Unable to make sgie2 \n")

    print("Creating tiler \n ")
    tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
       sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    print("Creating nvvidconv1 \n ")
    #nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)

    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if(is_aarch64()):
        print("Creating transform \n ")
        transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")


    codec = "H264"
    bitrate=4000000
    # Make the encoder
    
    if codec == "H264":
        encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
        print("Creating H264 Encoder")
    elif codec == "H265":
        encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder")
        print("Creating H265 Encoder")
    if not encoder:
        sys.stderr.write(" Unable to create encoder")
    encoder.set_property('bitrate', bitrate)
    if is_aarch64():
        encoder.set_property('preset-level', 1)
        encoder.set_property('insert-sps-pps', 1)
        encoder.set_property('bufapi-version', 1)
   
    # Make the payload-encode video into RTP packets
    if codec == "H264":
        rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay")
        print("Creating H264 rtppay")
    elif codec == "H265":
        rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay")
        print("Creating H265 rtppay")
    if not rtppay:
        sys.stderr.write(" Unable to create rtppay")
    
    # Make the UDP sink
    updsink_port_num = 5401
    sink = Gst.ElementFactory.make("udpsink", "udpsink")
    if not sink:
        sys.stderr.write(" Unable to create udpsink")
    
    sink.set_property('host', '224.224.255.255')
    sink.set_property('port', updsink_port_num)
    sink.set_property('async', False)
    sink.set_property('sync', 1)
    nvosd.set_property('process-mode', 1)



    # print("Creating EGLSink \n")
    # sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    # #sink = Gst.ElementFactory.make("nvoverlaysink", "nvvideo-renderer")
    # if not sink:
    #     sys.stderr.write(" Unable  to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 960)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size',number_sources )
    streammux.set_property('batched-push-timeout', 4000000)
    
    print(os.getcwd())
    
    pgie.set_property('config-file-path', "../config/people_config.txt")
    
    sgie1.set_property('config-file-path', "../config/abc.txt")
    sgie2.set_property('config-file-path', "../config/xyz.txt")
    #pgie.set_property('interval', self.interval)
    #pgie.set_property('interval', 10)
    #sgie1.set_property('secondary-reinfer-interval', 10)
    pgie_batch_size=pgie.get_property("batch-size")
    if(pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n")
        pgie.set_property("batch-size",number_sources)
    tiler_rows=int(math.sqrt(number_sources))
    tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows))
    tiler.set_property("rows",tiler_rows)
    tiler.set_property("columns",tiler_columns)
    tiler.set_property("width", self.TILED_OUTPUT_WIDTH)
    tiler.set_property("height", self.TILED_OUTPUT_HEIGHT)
    #sink.set_property("sync", 0)

    #Set properties of tracker
    config = configparser.ConfigParser()
    config.read('../config/mask_nomask_tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process' :
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)


    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        print("Adding elements to Pipeline----------------",int(pyds.NVBUF_MEM_CUDA_UNIFIED))
        #mem_type = int(pyds.NVBUF_MEM_CUDA_DEVICE)
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        #mem_type = 3
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    print("Adding elements to Pipeline \n")

print(“Adding elements to Pipeline \n”)
pipeline.add(pgie)
pipeline.add(tracker)
pipeline.add(sgie1)
pipeline.add(sgie2)

    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    #new added
    pipeline.add(nvosd)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(encoder)
    pipeline.add(rtppay)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")

    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(sgie2)
    sgie2.link(sgie1)
    pgie.link(nvvidconv)
    nvvidconv.link(filter1)
    filter1.link(tiler)
    tiler.link(nvosd)
    nvosd.link(nvvidconv1)
    nvvidconv1.link(encoder)
    

    encoder.link(rtppay)
    if is_aarch64():
        rtppay.link(transform)
        transform.link(sink)
    else:
        rtppay.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    rtsp_port_num = 8554
    
    server = GstRtspServer.RTSPServer.new()
    server.props.service = "%d" % rtsp_port_num
    server.attach(None)
    
    factory = GstRtspServer.RTSPMediaFactory.new()
    factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec))
    factory.set_shared(True)
    server.get_mount_points().add_factory("/ds-test", factory)
    
    print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num)




    
    # tiler_src_pad=pgie.get_static_pad("src")
    # if not tiler_src_pad:
    #     sys.stderr.write(" Unable to get src pad \n")
    # else:
    #     tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, self.tiler_src_pad_buffer_probe, 0)

    tiler_sink_pad=tiler.get_static_pad("sink")
    if not tiler_sink_pad:
       sys.stderr.write(" Unable to get src pad \n")
    else:
       tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, self.tiler_sink_pad_buffer_probe, 0)

    # osdsinkpad = nvosd.get_static_pad("sink")
    # if not osdsinkpad:
    #     sys.stderr.write(" Unable to get sink pad of nvosd \n")
    # osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, self.osd_sink_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events      
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)

The pipeline is wrong. Can you refer to DeepStream User Guide and sample codes to get the correct plugin information and pipeline samples?

https://docs.nvidia.com/metropolis/deepstream/dev-guide/index.html


https://gstreamer.freedesktop.org/documentation/tutorials/basic/debugging-tools.html