Timeoverlay in sample python deepstream app

• Hardware Platform : dGPU
• DeepStream Version : 6.0.1
• NVIDIA GPU Driver Version : 510.85.02
• Issue Type : Questions

I want to implement Timeoverlay and Clockoverlay in the sample python deepstream app i.e. in deepstream_imagedata-multistream.py python file . Please help me in implementing the Clockoverlay and time Overlay in demo python app.
Thanks in advance

Do you have a specific technical question regarding to the implementation?

I want to ask that where In the pipeline I can add the ClockOverlay element and link it in the python demo app.
Also provide some links for implementaions of ClockOverlay in python.

I have implemented like this but its not working

import sys

sys.path.append('../')
import gi
import configparser

gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
from ctypes import *
import time
import sys
import math
import platform
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from common.FPS import PERF_DATA
import numpy as np
import pyds
import cv2
import os
import os.path
from os import path

perf_data = None
frame_count = {}
saved_count = {}
global PGIE_CLASS_ID_VEHICLE
PGIE_CLASS_ID_VEHICLE = 0
global PGIE_CLASS_ID_PERSON
PGIE_CLASS_ID_PERSON = 2

MAX_DISPLAY_LEN = 64
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
MUXER_BATCH_TIMEOUT_USEC = 4000000
TILED_OUTPUT_WIDTH = 1920
TILED_OUTPUT_HEIGHT = 1080
GST_CAPS_FEATURES_NVMM = "memory:NVMM"
pgie_classes_str = ["Vehicle", "TwoWheeler", "Person", "RoadSign"]

MIN_CONFIDENCE = 0.3
MAX_CONFIDENCE = 0.4


# tiler_sink_pad_buffer_probe  will extract metadata received on tiler src pad
# and update params for drawing rectangle, object information etc.
def tiler_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
        }
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            # Periodically check for objects with borderline confidence value that may be false positive detections.
            # If such detections are found, annotate the frame with bboxes and confidence value.
            # Save the annotated frame to file.
            if saved_count["stream_{}".format(frame_meta.pad_index)] % 30 == 0 and (
                    MIN_CONFIDENCE < obj_meta.confidence < MAX_CONFIDENCE):
                if is_first_obj:
                    is_first_obj = False
                    # Getting Image data using nvbufsurface
                    # the input should be address of buffer and batch_id
                    n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
                    n_frame = draw_bounding_boxes(n_frame, obj_meta, obj_meta.confidence)
                    # convert python array into numpy array format in the copy mode.
                    frame_copy = np.array(n_frame, copy=True, order='C')
                    # convert the array into cv2 default color format
                    frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA)


                save_image = True

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        print("Frame Number=", frame_number, "Number of Objects=", num_rects, "Vehicle_count=",
              obj_counter[PGIE_CLASS_ID_VEHICLE], "Person_count=", obj_counter[PGIE_CLASS_ID_PERSON])
        # update frame rate through this probe
        stream_index = "stream{0}".format(frame_meta.pad_index)
        global perf_data
        perf_data.update_fps(stream_index)
        if save_image:
            img_path = "{}/stream_{}/frame_{}.jpg".format(folder_name, frame_meta.pad_index, frame_number)
            cv2.imwrite(img_path, frame_copy)
        saved_count["stream_{}".format(frame_meta.pad_index)] += 1
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK


def draw_bounding_boxes(image, obj_meta, confidence):
    confidence = '{0:.2f}'.format(confidence)
    rect_params = obj_meta.rect_params
    top = int(rect_params.top)
    left = int(rect_params.left)
    width = int(rect_params.width)
    height = int(rect_params.height)
    obj_name = pgie_classes_str[obj_meta.class_id]
    # image = cv2.rectangle(image, (left, top), (left + width, top + height), (0, 0, 255, 0), 2, cv2.LINE_4)
    color = (0, 0, 255, 0)
    w_percents = int(width * 0.05) if width > 100 else int(width * 0.1)
    h_percents = int(height * 0.05) if height > 100 else int(height * 0.1)
    linetop_c1 = (left + w_percents, top)
    linetop_c2 = (left + width - w_percents, top)
    image = cv2.line(image, linetop_c1, linetop_c2, color, 6)
    linebot_c1 = (left + w_percents, top + height)
    linebot_c2 = (left + width - w_percents, top + height)
    image = cv2.line(image, linebot_c1, linebot_c2, color, 6)
    lineleft_c1 = (left, top + h_percents)
    lineleft_c2 = (left, top + height - h_percents)
    image = cv2.line(image, lineleft_c1, lineleft_c2, color, 6)
    lineright_c1 = (left + width, top + h_percents)
    lineright_c2 = (left + width, top + height - h_percents)
    image = cv2.line(image, lineright_c1, lineright_c2, color, 6)
    # Note that on some systems cv2.putText erroneously draws horizontal lines across the image
    image = cv2.putText(image, obj_name + ',C=' + str(confidence), (left - 10, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (0, 0, 255, 0), 2)
    return image


def cb_newpad(decodebin, decoder_src_pad, data):
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)

    # Need to check if the pad created by the decodebin is for video and not
    # audio.
    if (gstname.find("video") != -1):
        # Link the decodebin pad only if decodebin has picked nvidia
        # decoder plugin nvdec_*. We do this by checking if the pad caps contain
        # NVMM memory features.
        if features.contains("memory:NVMM"):
            # Get the source bin ghost pad
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")


def decodebin_child_added(child_proxy, Object, name, user_data):
    print("Decodebin child added:", name, "\n")
    if name.find("decodebin") != -1:
        Object.connect("child-added", decodebin_child_added, user_data)

    if "source" in name:
        source_element = child_proxy.get_by_name("source")
        if source_element.find_property('drop-on-latency') != None:
            Object.set_property("drop-on-latency", True)

def create_source_bin(index, uri):
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    # Source element for reading from the uri.
    # We will use decodebin and let it figure out the container format of the
    # stream and the codec and plug the appropriate demux and decode plugins.
    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    # We set the input uri to the source element
    uri_decode_bin.set_property("uri", uri)
    # Connect to the "pad-added" signal of the decodebin which generates a
    # callback once a new pad for raw data has beed created by the decodebin
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)

    # We need to create a ghost pad for the source bin which will act as a proxy
    # for the video decoder src pad. The ghost pad will not have a target right
    # now. Once the decode bin creates the video decoder and generates the
    # cb_newpad callback, we will set the ghost pad target to the video decoder
    # src pad.
    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin

def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" % args[0])
        sys.exit(1)

    global perf_data
    perf_data = PERF_DATA(len(args) - 2)
    number_sources = len(args) - 2

    global folder_name
    folder_name = args[-1]
    if path.exists(folder_name):
        sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name)
        sys.exit(1)

    os.mkdir(folder_name)
    print("Frames will be saved in ", folder_name)
    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        os.mkdir(folder_name + "/stream_" + str(i))
        frame_count["stream_" + str(i)] = 0
        saved_count["stream_" + str(i)] = 0
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i + 1]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

    ##CLOCKOVERLAY
    clockOverlay = Gst.ElementFactory.make("clockoverlay")
    clockOverlay.set_property("text","HAPPPY SKANJAKDNSDKJCNSDCKASKSCLAKC")
    clockOverlay.set_property("valign", "bottom")
    clockOverlay.set_property("halign", "right")
    clockOverlay.set_property("shaded-background", True)

    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if (is_aarch64()):
        print("Creating transform \n ")
        transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")

    print("Creating EGLSink \n")
    # sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    sink = Gst.ElementFactory.make("xvimagesink", "fakesink")

    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "dstest_imagedata_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
              number_sources, " \n")
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    sink.set_property("sync", 0)
    sink.set_property("qos", 0)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    # pipeline.add(timeOverlay)
    # CLOCKOVERLAY
    pipeline.add(clockOverlay)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        # nvosd.link(sink)


       #CLOCKOVERLAY
        nvosd.link(clockOverlay)
        clockOverlay.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    tiler_sink_pad = tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)
        # perf callback function to print fps every 5 sec
        GLib.timeout_add(5000, perf_data.perf_print_callback)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args[:-1]):
        if i != 0:
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)


if __name__ == '__main__':
    sys.exit(main(sys.argv))

There is no demo for this plug-in In our example. This plugin may not support NVMM memory type. So you can try to add nvvideoconvert plugin before and after it.

I have implemented like this but this is also not working, Please see if I have implemented it correctly.
Please also suggest what should I try to change next to make it work.

import sys

sys.path.append('../')
import gi
import configparser

gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
from ctypes import *
import time
import sys
import math
import platform
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from common.FPS import PERF_DATA
import numpy as np
import pyds
import cv2
import os
import os.path
from os import path

perf_data = None
frame_count = {}
saved_count = {}
global PGIE_CLASS_ID_VEHICLE
PGIE_CLASS_ID_VEHICLE = 0
global PGIE_CLASS_ID_PERSON
PGIE_CLASS_ID_PERSON = 2

MAX_DISPLAY_LEN = 64
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
MUXER_OUTPUT_WIDTH = 1920
MUXER_OUTPUT_HEIGHT = 1080
MUXER_BATCH_TIMEOUT_USEC = 4000000
TILED_OUTPUT_WIDTH = 1920
TILED_OUTPUT_HEIGHT = 1080
GST_CAPS_FEATURES_NVMM = "memory:NVMM"
pgie_classes_str = ["Vehicle", "TwoWheeler", "Person", "RoadSign"]

MIN_CONFIDENCE = 0.3
MAX_CONFIDENCE = 0.4


# tiler_sink_pad_buffer_probe  will extract metadata received on tiler src pad
# and update params for drawing rectangle, object information etc.
def tiler_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
        }
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            # Periodically check for objects with borderline confidence value that may be false positive detections.
            # If such detections are found, annotate the frame with bboxes and confidence value.
            # Save the annotated frame to file.
            if saved_count["stream_{}".format(frame_meta.pad_index)] % 30 == 0 and (
                    MIN_CONFIDENCE < obj_meta.confidence < MAX_CONFIDENCE):
                if is_first_obj:
                    is_first_obj = False
                    # Getting Image data using nvbufsurface
                    # the input should be address of buffer and batch_id
                    n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
                    n_frame = draw_bounding_boxes(n_frame, obj_meta, obj_meta.confidence)
                    # convert python array into numpy array format in the copy mode.
                    frame_copy = np.array(n_frame, copy=True, order='C')
                    # convert the array into cv2 default color format
                    frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA)
                    # cv2.imwrite(f"{frame_number}.jpg",frame_copy)


                save_image = True

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        print("Frame Number=", frame_number, "Number of Objects=", num_rects, "Vehicle_count=",
              obj_counter[PGIE_CLASS_ID_VEHICLE], "Person_count=", obj_counter[PGIE_CLASS_ID_PERSON])
        # update frame rate through this probe
        stream_index = "stream{0}".format(frame_meta.pad_index)
        global perf_data
        perf_data.update_fps(stream_index)
        if save_image:
            img_path = "{}/stream_{}/frame_{}.jpg".format(folder_name, frame_meta.pad_index, frame_number)
            cv2.imwrite(img_path, frame_copy)
        saved_count["stream_{}".format(frame_meta.pad_index)] += 1
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK


def draw_bounding_boxes(image, obj_meta, confidence):
    confidence = '{0:.2f}'.format(confidence)
    rect_params = obj_meta.rect_params
    top = int(rect_params.top)
    left = int(rect_params.left)
    width = int(rect_params.width)
    height = int(rect_params.height)
    obj_name = pgie_classes_str[obj_meta.class_id]
    # image = cv2.rectangle(image, (left, top), (left + width, top + height), (0, 0, 255, 0), 2, cv2.LINE_4)
    color = (0, 0, 255, 0)
    w_percents = int(width * 0.05) if width > 100 else int(width * 0.1)
    h_percents = int(height * 0.05) if height > 100 else int(height * 0.1)
    linetop_c1 = (left + w_percents, top)
    linetop_c2 = (left + width - w_percents, top)
    image = cv2.line(image, linetop_c1, linetop_c2, color, 6)
    linebot_c1 = (left + w_percents, top + height)
    linebot_c2 = (left + width - w_percents, top + height)
    image = cv2.line(image, linebot_c1, linebot_c2, color, 6)
    lineleft_c1 = (left, top + h_percents)
    lineleft_c2 = (left, top + height - h_percents)
    image = cv2.line(image, lineleft_c1, lineleft_c2, color, 6)
    lineright_c1 = (left + width, top + h_percents)
    lineright_c2 = (left + width, top + height - h_percents)
    image = cv2.line(image, lineright_c1, lineright_c2, color, 6)
    # Note that on some systems cv2.putText erroneously draws horizontal lines across the image
    image = cv2.putText(image, obj_name + ',C=' + str(confidence), (left - 10, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (0, 0, 255, 0), 2)
    return image


def cb_newpad(decodebin, decoder_src_pad, data):
    print("In cb_newpad\n")
    caps = decoder_src_pad.get_current_caps()
    gststruct = caps.get_structure(0)
    gstname = gststruct.get_name()
    source_bin = data
    features = caps.get_features(0)

    # Need to check if the pad created by the decodebin is for video and not
    # audio.
    if (gstname.find("video") != -1):
        # Link the decodebin pad only if decodebin has picked nvidia
        # decoder plugin nvdec_*. We do this by checking if the pad caps contain
        # NVMM memory features.
        if features.contains("memory:NVMM"):
            # Get the source bin ghost pad
            bin_ghost_pad = source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")


def decodebin_child_added(child_proxy, Object, name, user_data):
    print("Decodebin child added:", name, "\n")
    if name.find("decodebin") != -1:
        Object.connect("child-added", decodebin_child_added, user_data)

    if "source" in name:
        source_element = child_proxy.get_by_name("source")
        if source_element.find_property('drop-on-latency') != None:
            Object.set_property("drop-on-latency", True)

def create_source_bin(index, uri):
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    # Source element for reading from the uri.
    # We will use decodebin and let it figure out the container format of the
    # stream and the codec and plug the appropriate demux and decode plugins.
    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    # We set the input uri to the source element
    uri_decode_bin.set_property("uri", uri)
    # Connect to the "pad-added" signal of the decodebin which generates a
    # callback once a new pad for raw data has beed created by the decodebin
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)

    # We need to create a ghost pad for the source bin which will act as a proxy
    # for the video decoder src pad. The ghost pad will not have a target right
    # now. Once the decode bin creates the video decoder and generates the
    # cb_newpad callback, we will set the ghost pad target to the video decoder
    # src pad.

    timeoverlay = Gst.ElementFactory.make("timeoverlay")
    timeoverlay.set_property("valignment", "center")
    timeoverlay.set_property("halignment", "center")
    nbin.add(timeoverlay)
    pad = timeoverlay.get_static_pad("video_sink")
    ghostpad = Gst.GhostPad.new("sink", pad)
    nbin.add_pad(ghostpad)
    # videosink = Gst.ElementFactory.make("fakesink")
    # nbin.add(videosink)
    # timeoverlay.link(videosink)
    timeoverlay.link(uri_decode_bin)
    # uri_decode_bin.link(timeoverlay)

    Gst.Bin.add(nbin, uri_decode_bin)
    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin

def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" % args[0])
        sys.exit(1)

    global perf_data
    perf_data = PERF_DATA(len(args) - 2)
    number_sources = len(args) - 2

    global folder_name
    folder_name = args[-1]
    if path.exists(folder_name):
        sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name)
        sys.exit(1)

    os.mkdir(folder_name)
    print("Frames will be saved in ", folder_name)
    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        os.mkdir(folder_name + "/stream_" + str(i))
        frame_count["stream_" + str(i)] = 0
        saved_count["stream_" + str(i)] = 0
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i + 1]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

    # #HAPPY CHANGES

    clockOverlay = Gst.ElementFactory.make("clockoverlay")
    clockOverlay.set_property("text","HAPPPY SKANJAKDNSDKJCNSDCKASKSCLAKC")
    clockOverlay.set_property("valignment", "bottom")
    clockOverlay.set_property("halignment", "right")
    clockOverlay.set_property("shaded-background", True)

    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if (is_aarch64()):
        print("Creating transform \n ")
        transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")

    print("Creating EGLSink \n")
    # sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    sink = Gst.ElementFactory.make("fakesink", "fakesink")

    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "dstest_imagedata_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
              number_sources, " \n")
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    sink.set_property("sync", 0)
    sink.set_property("qos", 0)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    # pipeline.add(timeOverlay)
    pipeline.add(clockOverlay)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    
    ## SEE HERE 
    else:
        # nvosd.link(sink)
        nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "convertor2") 
        nvvidconv3 = Gst.ElementFactory.make("nvvideoconvert", "convertor3")
        pipeline.add(nvvidconv2)
        pipeline.add(nvvidconv3)
        nvosd.link(nvvidconv2)
        nvvidconv2.link(clockOverlay)
        clockOverlay.link(nvvidconv3)
        nvvidconv3.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    tiler_sink_pad = tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)
        # perf callback function to print fps every 5 sec
        GLib.timeout_add(5000, perf_data.perf_print_callback)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args[:-1]):
        if i != 0:
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)


if __name__ == '__main__':
    sys.exit(main(sys.argv))

Could you use the GST_DEBUG=3 at the beginning of your command line to open the log and attach it to us?

This is the output after putting GST_DEBUG=3

Frames will be saved in  frames
Creating Pipeline 
 
Creating streamux 
 
Creating source_bin  0  
 
Creating source bin
source-bin-00
Creating Pgie 
 
Creating nvvidconv1 
 
Creating filter1 
 
Creating tiler 
 
Creating nvvidconv 
 
Creating nvosd 
 
Creating EGLSink 

Adding elements to Pipeline 

Linking elements in the Pipeline 

Now playing...
1 :  file:///opt/nvidia/deepstream/deepstream-6.0/sources/deepstream_python_apps/apps/deepstream-imagedata-multistream/timer.mp4
Starting pipeline 

0:00:01.118410512 75437      0x3423b80 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1900> [UID = 1]: deserialized trt engine from :/opt/nvidia/deepstream/deepstream-6.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_int8.engine
INFO: ../nvdsinfer/nvdsinfer_model_builder.cpp:610 [Implicit Engine Info]: layers num: 3
0   INPUT  kFLOAT input_1         3x368x640       
1   OUTPUT kFLOAT conv2d_bbox     16x23x40        
2   OUTPUT kFLOAT conv2d_cov/Sigmoid 4x23x40         

0:00:01.118459759 75437      0x3423b80 INFO                 nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger:<primary-inference> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2004> [UID = 1]: Use deserialized engine model: /opt/nvidia/deepstream/deepstream-6.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_int8.engine
0:00:01.119112471 75437      0x3423b80 INFO                 nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus:<primary-inference> [UID 1]: Load new model:dstest_imagedata_config.txt sucessfully
0:00:01.119672492 75437      0x3423b80 WARN                 basesrc gstbasesrc.c:3583:gst_base_src_start_complete:<source> pad not activated yet
Decodebin child added: source 

Decodebin child added: decodebin0 

0:00:01.120825600 75437      0x3423b80 WARN                 basesrc gstbasesrc.c:3583:gst_base_src_start_complete:<source> pad not activated yet
Decodebin child added: qtdemux0 

0:00:01.125946563 75437 0x7f5ee00794f0 WARN                 qtdemux qtdemux_types.c:233:qtdemux_type_get: unknown QuickTime node type gsst
0:00:01.125982240 75437 0x7f5ee00794f0 WARN                 qtdemux qtdemux_types.c:233:qtdemux_type_get: unknown QuickTime node type gstd
0:00:01.125997710 75437 0x7f5ee00794f0 WARN                 qtdemux qtdemux.c:3031:qtdemux_parse_trex:<qtdemux0> failed to find fragment defaults for stream 1
0:00:01.126085527 75437 0x7f5ee00794f0 WARN                 qtdemux qtdemux.c:3031:qtdemux_parse_trex:<qtdemux0> failed to find fragment defaults for stream 2
Decodebin child added: multiqueue0 

Decodebin child added: h264parse0 

Decodebin child added: capsfilter0 

Decodebin child added: aacparse0 

Decodebin child added: avdec_aac0 

Decodebin child added: nvv4l2decoder0 

0:00:01.140322295 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140335672 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2937:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe minimum capture size for pixelformat MJPG
0:00:01.140342211 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140346856 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2943:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe maximum capture size for pixelformat MJPG
0:00:01.140358208 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140362871 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2937:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe minimum capture size for pixelformat MPG4
0:00:01.140366868 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140371521 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2943:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe maximum capture size for pixelformat MPG4
0:00:01.140382282 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140387053 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2937:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe minimum capture size for pixelformat MPG2
0:00:01.140391107 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140395447 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2943:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe maximum capture size for pixelformat MPG2
0:00:01.140403588 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140408065 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2937:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe minimum capture size for pixelformat H265
0:00:01.140413235 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140417596 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2943:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe maximum capture size for pixelformat H265
0:00:01.140424799 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140429297 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2937:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe minimum capture size for pixelformat VP90
0:00:01.140433337 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140437450 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2943:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe maximum capture size for pixelformat VP90
0:00:01.140443722 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140448324 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2937:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe minimum capture size for pixelformat VP80
0:00:01.140452516 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140456987 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2943:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe maximum capture size for pixelformat VP80
0:00:01.140464237 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140468844 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2937:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe minimum capture size for pixelformat H264
0:00:01.140472960 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:sink> Unable to try format: Unknown error -1
0:00:01.140477484 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2943:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:sink> Could not probe maximum capture size for pixelformat H264
0:00:01.140744186 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:src> Unable to try format: Unknown error -1
0:00:01.140752419 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2937:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:src> Could not probe minimum capture size for pixelformat NM12
0:00:01.140757058 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:3051:gst_v4l2_object_get_nearest_size:<nvv4l2decoder0:src> Unable to try format: Unknown error -1
0:00:01.140761918 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2943:gst_v4l2_object_probe_caps_for_format:<nvv4l2decoder0:src> Could not probe maximum capture size for pixelformat NM12
0:00:01.140768124 75437 0x7f5ed4011590 WARN                    v4l2 gstv4l2object.c:2388:gst_v4l2_object_add_interlace_mode:0x7f5ed8018380 Failed to determine interlace mode
In cb_newpad

In cb_newpad

0:00:01.254448169 75437 0x7f5ed4011590 WARN            v4l2videodec gstv4l2videodec.c:1685:gst_v4l2_video_dec_decide_allocation:<nvv4l2decoder0> Duration invalid, not setting latency
0:00:01.254478288 75437 0x7f5ed4011590 WARN          v4l2bufferpool gstv4l2bufferpool.c:1065:gst_v4l2_buffer_pool_start:<nvv4l2decoder0:pool:src> Uncertain or not enough buffers, enabling copy threshold
0:00:01.254866959 75437 0x7f5ed8328c00 WARN          v4l2bufferpool gstv4l2bufferpool.c:1512:gst_v4l2_buffer_pool_dqbuf:<nvv4l2decoder0:pool:src> Driver should never set v4l2_buffer.field to ANY
Frame Number= 0 Number of Objects= 3 Vehicle_count= 0 Person_count= 3
Frame Number= 1 Number of Objects= 3 Vehicle_count= 0 Person_count= 3
Frame Number= 2 Number of Objects= 3 Vehicle_count= 0 Person_count= 3
Frame Number= 3 Number of Objects= 3 Vehicle_count= 0 Person_count= 3
Frame Number= 4 Number of Objects= 3 Vehicle_count= 0 Person_count= 3

The log didn’t show obvious errors. Could you use the gst-launch-1.0 cli to play your pipeline first? Like: https://forums.developer.nvidia.com/t/gstreamer-issue-with-adding-timeoverlay-on-rtmp-stream/108155.

Here is the command with gst-launch
gst-launch-1.0 uridecodebin uri=file:///workspace/timer.mp4 ! nvvideoconvert ! timeoverlay ! nvvideoconvert ! autovideosink

This command is working but when I am implementing it in python code it is not working but pipeline is running fine.But the output is not overlayed.

Also tell how to connect it with nvstreammux in gst-launch command

This is how I have used it in the deepstream sample app.

def create_source_bin(index, uri):
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name = "source-bin-%02d" % index
    print(bin_name)
    nbin = Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    # Source element for reading from the uri.
    # We will use decodebin and let it figure out the container format of the
    # stream and the codec and plug the appropriate demux and decode plugins.
    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    # We set the input uri to the source element
    uri_decode_bin.set_property("uri", uri)
    timeoverlay = Gst.ElementFactory.make("timeoverlay")

    nvvideoconv1 = Gst.ElementFactory.make("nvvideoconvert","nvvideoconv1")

    nvvideoconv2 = Gst.ElementFactory.make("nvvideoconvert","nvvideoconv2")
    # Connect to the "pad-added" signal of the decodebin which generates a
    # callback once a new pad for raw data has beed created by the decodebin
    uri_decode_bin.connect("pad-added", cb_newpad, nbin)
    uri_decode_bin.connect("child-added", decodebin_child_added, nbin)

    # We need to create a ghost pad for the source bin which will act as a proxy
    # for the video decoder src pad. The ghost pad will not have a target right
    # now. Once the decode bin creates the video decoder and generates the
    # cb_newpad callback, we will set the ghost pad target to the video decoder
    # src pad.
    Gst.Bin.add(nbin, uri_decode_bin)
    Gst.Bin.add(nbin, nvvideoconv1)
    Gst.Bin.add(nbin, nvvideoconv2)
    Gst.Bin.add(nbin, timeoverlay)
    uri_decode_bin.link(nvvideoconv1)
    nvvideoconv1.link(timeoverlay)
    timeoverlay.link(nvvideoconv2)
    srcpad = nvvideoconv2.get_static_pad("src")
    if not srcpad:
        logger.error("Unable to create src pad filter")
    print("Inside SOurce bin")
    ghost_pad_src = Gst.GhostPad.new("src", srcpad)
    Gst.Pad.set_active(ghost_pad_src, True)
    Gst.Element.add_pad(nbin, ghost_pad_src)
    # bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))
    # if not bin_pad:
    #     sys.stderr.write(" Failed to add ghost pad in source bin \n")
    #     return None
    return nbin

Thanks in Advance

You can refer to the following example:

gst-launch-1.0 -v uridecodebin uri=xxx ! queue ! nvstreammux0.sink_0 nvstreammux name=nvstreammux0 batch-size=1 batched-push-timeout=-1 width=1920 height=1080 live-source=1 ! queue ! nvinfer config-file-path=xxx model-engine-file=xxx ! queue ! nvvideoconvert ! video/x-raw,format=RGBA ! videoconvert ! fakesink silent=false uridecodebin uri=xxx ! queue ! nvstreammux0.sink_1

Is this run with the gst-lunch-1.0? Could you attach your timer.mp4 file?
Also, you can try to use nvdsosd plugin to draw the text or others in the video.

No, I ran it in python deepstream app.

Can you tell how to draw clockoverlay using nvdsosd and also provide me the gst-launch command using uridecodebin , nvstreammux, nvdsosd

Thanks in advance.

You can refer our open source code deepstream_test_1.py:
In the osd_sink_pad_buffer_probe function, you can learn how to draw text in the video by py_nvosd_text_params. You can get the timestamp and set it to py_nvosd_text_params.display_text.

I have tried it, it is now displaying timestamp but it is also drawing the bounding box which I don’t want. Please tell how stop it from displaying the bounding box and labels and only display TIMESTAMP.

Thanks in advance

You can try to set the bbox alpha value to 0, like below:

obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0 /*alpha*/ )

NO , It Didn’t Work setting alpha to zero
this is how my probe looks like

def osd_sink_buffer_probe(pad , info):
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        logger.error("Unable to get GstBuffer.\n")
        return Gst.PadProbeReturn.DROP
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    for l_frame in pyds_iterate_from(batch_meta.frame_meta_list):
        frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        # frame_number = frame_meta.frame_num
        # num_rects = frame_meta.num_obj_meta

        # for l_obj in pyds_iterate_from(frame_meta.obj_meta_list):
            # obj_meta_1 = pyds.NvDsObjectMeta.cast(l_obj.data)
            # obj_meta_1.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)
        obj_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        obj_meta.num_labels = 1

        py_nvosd_rect_params = obj_meta.rect_params[0]
        py_nvosd_rect_params.border_color.set(0.0, 0.0, 1.0, 0.0) #alpha setting to zero
        py_nvosd_text_params = obj_meta.text_params[0]

        from datetime import datetime
        # py_nvosd_rect_params.border_width = 0
        py_nvosd_text_params.display_text = f"{datetime.now()}"
        py_nvosd_text_params.set_bg_clr = 0
        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12
        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 20
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        # print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, obj_meta)


    return Gst.PadProbeReturn.OK

There is no update from you for a period, assuming this is not an issue anymore.
Hence we are closing this topic. If need further support, please open a new one.
Thanks

It’s weird. Could you run our demo code deepstream_test_1.py first? You can change the following code to check if bbox has changed:

obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)->obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 1.0)

If it is not shown after change it, it may be a problem of your env. You can try to update deepstream to the latest version.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.