How to pass variables to next frame?

• Hardware Platform (Jetson / GPU)
Jetson Xavier NX
• DeepStream Version
DeepStream 6.0
• JetPack Version (valid for Jetson only)
4.6.1-b110
• TensorRT Version
8.2.1.8

Hi, I am trying to implement people counter by combining centroid tracker to deepstream with python binding (based on deepstream_test_1_usb.py).

However, it is likely every frame starts from the beginning of the function osd_sink_pad_buffer_probe and variables of the previous frame are not passed. I am glad if you can tell how to pass the variables to next frame. Here is the code(I am modifying osd_sink_pad_buffer_probe function):

#!/usr/bin/env python3

################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################

import sys
from tkinter import W
sys.path.append('../')
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call

import pyds
import cv2, dlib
import numpy as np

from mylib.centroidtracker import CentroidTracker
from mylib.trackableobject import TrackableObject


PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_FACE = 3



def osd_sink_pad_buffer_probe(pad,info,u_data):

    ####################
    # counter preparation
    ####################
    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=20)
    trackers = []
    trackableObjects = {}

    rects = []

    # initialize the total number of frames processed thus far, along
	# with the total number of objects that have moved either up or down
    totalDown = 0
    totalUp = 0
    x = []
    empty=[]
    empty1=[]

    

    check = False



    frame_number=0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE:0,
        PGIE_CLASS_ID_PERSON:0,
        PGIE_CLASS_ID_BICYCLE:0,
        PGIE_CLASS_ID_FACE:0
    }
    num_rects=0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
           frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break


        print(check)
        if check == True:
            print(to)

        frame_number=frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj=frame_meta.obj_meta_list

        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break



            ################################
            # counting and face recognizing
            ################################
            obj_ucid = obj_meta.unique_component_id
            obj_id = obj_meta.class_id
            obj_label = obj_meta.obj_label
            confidence = obj_meta.confidence
            x_left = int(obj_meta.rect_params.left)
            x_right = int(obj_meta.rect_params.left + obj_meta.rect_params.width)
            y_top = int(obj_meta.rect_params.top)
            y_bottom = int(obj_meta.rect_params.top + obj_meta.rect_params.height)
            coordinates = (y_top, x_right, y_bottom, x_left)

            if obj_label == "Person":
                rects.append(coordinates)


            obj_counter[obj_meta.class_id] += 1
            try: 
                l_obj=l_obj.next
            except StopIteration:
                break

 
        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        l_frame = batch_meta.frame_meta_list
        frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        W = frame_meta.source_frame_width
        H = frame_meta.source_frame_height

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        print("rects", rects)
        print("objects", objects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            print("objectID", objectID, "cntroid", centroid)
            to = trackableObjects.get(objectID, None)
            

            # if there is no existing trackable object, create one
            print("to0", to) #Why none?
            if to is None:
                to = TrackableObject(objectID, centroid)
                print("to1", to)

            # otherwise, there is a trackable object so we can utilize it
			# to determine direction
            else:
				# the difference between the y-coordinate of the *current*
				# centroid and the mean of *previous* centroids will tell
				# us in which direction the object is moving (negative for
				# 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                print("to.continued", to.counted)
				# check to see if the object has been counted or not
                if not to.counted:
					# if the direction is negative (indicating the object
					# is moving up) AND the centroid is above the center
					# line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        empty.append(totalUp)
                        to.counted = True
                        print("totalUp", totalUp)

					# if the direction is positive (indicating the object
					# is moving down) AND the centroid is below the
					# center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        print("totalDown", totalDown)
                        empty1.append(totalDown)


                        to.counted = True
						
                    x = []
					# compute the sum of total people inside
                    x.append(len(empty1)-len(empty))
                    print("Total people inside:", x)


            # store the trackable object in our dictionary
            print("trackableObjectID", objectID)
            print(trackableObjects)
            trackableObjects[objectID] = to
            print("to2", to)
            print("\n")

            # # draw both the ID of the object and the centroid of the
            # # object on the output frame
            # Acquiring a display meta object. The memory ownership remains in
            # the C code so downstream plugins can still access it. Otherwise
            # the garbage collector will claim it when this probe function exits.
            display_meta.num_circles = 1
            py_nvosd_circle_params = display_meta.circle_params[display_meta.num_circles-1]
            py_nvosd_circle_params.xc = centroid[1]
            py_nvosd_circle_params.yc = centroid[0]
            py_nvosd_circle_params.radius = 10
            py_nvosd_circle_params.bg_color.set(1.0, 0.0, 0.0, 1.0)
            py_nvosd_circle_params.circle_color.set(1.0, 1.0, 1.0, 1.0)

            display_meta.num_labels = 1
            py_nvosd_text_params = display_meta.text_params[0]
            py_nvosd_text_params.display_text =  "ID {}".format(objectID)
            py_nvosd_text_params.x_offset = centroid[1]
            py_nvosd_text_params.y_offset = centroid[0]
             # Font , font-color and font-size
            py_nvosd_text_params.font_params.font_name = "Serif"
            py_nvosd_text_params.font_params.font_size = 30
            # set(red, green, blue, alpha); set to White
            py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)


        # draw a horizontal line in the center of the frame -- once an
		# object crosses this line we will determine whether they were
		# moving 'up' or 'down'
        display_meta.num_lines = 1
        py_nvosd_line_params = display_meta.line_params[0]
        py_nvosd_line_params.line_width = 4
        py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0)
        py_nvosd_line_params.x1 = 0
        py_nvosd_line_params.y1 = H // 2
        py_nvosd_line_params.x2 = W
        py_nvosd_line_params.y2 = H // 2

        check = True
        

        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame=l_frame.next
        except StopIteration:
            break
			
    return Gst.PadProbeReturn.OK	


def main(args):
    # Check input arguments
    if len(args) != 2:
        sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0])
        sys.exit(1)

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from the file
    print("Creating Source \n ")
    source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
    if not source:
        sys.stderr.write(" Unable to create Source \n")

    caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
    if not caps_v4l2src:
        sys.stderr.write(" Unable to create v4l2src capsfilter \n")


    print("Creating Video Converter \n")

    # Adding videoconvert -> nvvideoconvert as not all
    # raw formats are supported by nvvideoconvert;
    # Say YUYV is unsupported - which is the common
    # raw format for many logi usb cams
    # In case we have a camera with raw format supported in
    # nvvideoconvert, GStreamer plugins' capability negotiation
    # shall be intelligent enough to reduce compute by
    # videoconvert doing passthrough (TODO we need to confirm this)


    # videoconvert to make sure a superset of raw formats are supported
    vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
    if not vidconvsrc:
        sys.stderr.write(" Unable to create videoconvert \n")

    # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
    nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
    if not nvvidconvsrc:
        sys.stderr.write(" Unable to create Nvvideoconvert \n")

    caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
    if not caps_vidconvsrc:
        sys.stderr.write(" Unable to create capsfilter \n")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    # Use nvinfer to run inferencing on camera's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    # Finally render the osd output
    if is_aarch64():
        transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    print("Playing cam %s " %args[1])
    caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1"))
    caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
    source.set_property('device', args[1])
    streammux.set_property('width', 640)
    streammux.set_property('height', 480)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "shelltus_edge_pgie_config.txt")
    # Set sync = false to avoid late frame drops at the display-sink
    sink.set_property('sync', False)

    print("Adding elements to Pipeline \n")
    pipeline.add(source)
    pipeline.add(caps_v4l2src)
    pipeline.add(vidconvsrc)
    pipeline.add(nvvidconvsrc)
    pipeline.add(caps_vidconvsrc)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(sink)
    if is_aarch64():
        pipeline.add(transform)

    # we link the elements together
    # v4l2src -> nvvideoconvert -> mux -> 
    # nvinfer -> nvvideoconvert -> nvosd -> video-renderer
    print("Linking elements in the Pipeline \n")
    source.link(caps_v4l2src)
    caps_v4l2src.link(vidconvsrc)
    vidconvsrc.link(nvvidconvsrc)
    nvvidconvsrc.link(caps_vidconvsrc)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux \n")
    srcpad = caps_vidconvsrc.get_static_pad("src")
    if not srcpad:
        sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
    srcpad.link(sinkpad)
    streammux.link(pgie)
    pgie.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)


    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    pipeline.set_state(Gst.State.NULL)

if __name__ == '__main__':
    sys.exit(main(sys.argv))


What kind of “variables of the previous frame” do you need?

@Fiona.Chen , thank you for replying.
I want to pass a variable “to” where trackableObjects are stored.

Do you mean you want to get some “variables” from previous frame in osd_sink_pad_buffer_probe()?

The probe function will be triggered for every GstBuffer. You can get the variable of current frame and store it in a global data structure, so that when the function is triggered again by the next frame, the stored variable is from the last frame.

You can design your app according to your requirement. It has nothing to do with DeepStream.

1 Like

Thank you very much. I am not sure about “storing variables in a global data structure”…
Could you show me any reference if you could? I am sorry for this question not relating to Deepstream itself.

It is python coding skills. Maybe something like this [Python Multithreading Tutorial: Concurrency and Parallelism | Toptal](Python Multithreading Tutorial: Concurrency and Parallelism | Toptal

Please refer to python resources.

1 Like

Thank you very much. I will learn it. I appreciate for your support.
And I will try deepstream-occupancy-analysis as well.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.