Visualization bug when using preprocessing and metamux

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) NVIDIA RTX 6000 Ada Generation
• DeepStream Version 7.0.0
• JetPack Version (valid for Jetson only)
• TensorRT Version 8.6.1.6
• NVIDIA GPU Driver Version (valid for GPU only) 535.183.01
• Issue Type( questions, new requirements, bugs) Bug

Hello,

we would like to kindly ask for help to solve an issue of the following DeepStream pipeline.

As can bee seen in the drawing of the pipeline below, the pipeline incorporates a metamux element that combines a detection branch and a simple pass through of the input video sources. This is a minimal representation of a larger pipeline with multiple parallel inferences. The detection branch includes a preprocessing element and a tracking element.

If the output of the pipeline is observed, an issue can be identified: The detections from the first source are sporadically displayed on the output from the second source and vice versa. This behaviour can be observed in the video attached below.

Please note that when the preprocessing is disabled via

enabled=0

in the corresponding configuration file, the issue does not appear.

Below we attached the pipeline’s implementation in Python and the configuration files of the preprocessing and the metamux elements.

We would be very happy to receive assistance in solving this issue, thank you for your time!

Demo video of the issue:

In case the video does not work, here are two consecutive frames showing the issue:


Pipeline drawing:

Pipeline implementation:

import sys

import gi
gi.require_version("Gst", "1.0")
from gi.repository import GLib, Gst

# taken from deepstream-test3
def cb_newpad(decodebin, decoder_src_pad,data):
    print("In cb_newpad\n")
    caps=decoder_src_pad.get_current_caps()
    if not caps:
        caps = decoder_src_pad.query_caps()
    gststruct=caps.get_structure(0)
    gstname=gststruct.get_name()
    source_bin=data
    features=caps.get_features(0)
    print("gstname=",gstname)
    if(gstname.find("video")!=-1):
        print("features=",features)
        if features.contains("memory:NVMM"):
            bin_ghost_pad=source_bin.get_static_pad("src")
            if not bin_ghost_pad.set_target(decoder_src_pad):
                sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
        else:
            sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")


# taken from deepstream-test3
def decodebin_child_added(child_proxy,Object,name,user_data):
    print("Decodebin child added:", name, "\n")
    if(name.find("decodebin") != -1):
        Object.connect("child-added",decodebin_child_added,user_data)
    if "source" in name:
        source_element = child_proxy.get_by_name("source")
        if source_element.find_property("drop-on-latency") != None:
            Object.set_property("drop-on-latency", True)


# taken from deepstream-test3
def create_source_bin(index,uri):
    print("Creating source bin")
    bin_name="source-bin-%02d" %index
    print(bin_name)
    nbin=Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")
    uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    uri_decode_bin.set_property("uri",uri)
    uri_decode_bin.connect("pad-added",cb_newpad,nbin)
    uri_decode_bin.connect("child-added",decodebin_child_added,nbin)
    Gst.Bin.add(nbin,uri_decode_bin)
    bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin


if __name__ == "__main__":
    Gst.init(None)
    pipeline = Gst.Pipeline()

    # ====================================================
    # CREATE ELEMENTS FROM LEFT TO RIGHT
    # ====================================================

    source_0_bin = create_source_bin(0, "file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4")
    source_1_bin = create_source_bin(1, "file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_push.mov")
    
    streammux = Gst.ElementFactory.make("nvstreammux", "streammux")
    streammux.set_property("width", 1920)
    streammux.set_property("height", 1080)
    streammux.set_property("batch-size", 2)

    queue_streammux_to_streamdemux = Gst.ElementFactory.make("queue", "queue_streammux_to_streamdemux")

    streamdemux = Gst.ElementFactory.make("nvstreamdemux", "streamdemux")

    tee_source_0 = Gst.ElementFactory.make("tee", "tee_source_0")
    tee_source_1 = Gst.ElementFactory.make("tee", "tee_source_1")

    streammux_skip = Gst.ElementFactory.make("nvstreammux", "streammux_skip")
    streammux_skip.set_property("width", 1920)
    streammux_skip.set_property("height", 1080)
    streammux_skip.set_property("batch-size", 2)

    streammux_detect = Gst.ElementFactory.make("nvstreammux", "streammux_detect")
    streammux_detect.set_property("width", 1920)
    streammux_detect.set_property("height", 1080)
    streammux_detect.set_property("batch-size", 2)
    
    queue_streammux_skip_to_metamux = Gst.ElementFactory.make("queue", "queue_streammux_skip_to_metamux")
    queue_streammux_detect_to_preprocess = Gst.ElementFactory.make("queue", "queue_streammux_detect_to_preprocess")

    preprocess = Gst.ElementFactory.make("nvdspreprocess", "preprocess")
    preprocess.set_property("config-file", "config_preprocess.txt")

    queue_preprocess_to_pgie = Gst.ElementFactory.make("queue", "queue_preprocess_to_pgie")

    pgie = Gst.ElementFactory.make("nvinfer", "pgie")
    pgie.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt")
    if preprocess.get_property('enable'):
        pgie.set_property("input-tensor-meta", 1)

    queue_pgie_to_tracker = Gst.ElementFactory.make("queue", "queue_pgie_to_tracker")
    
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    tracker.set_property("tracker-width", 960)
    tracker.set_property("tracker-height", 544)
    tracker.set_property("ll-lib-file", "/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so")
    tracker.set_property("ll-config-file",
                        "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml")
    if preprocess.get_property('enable'):
       tracker.set_property("input-tensor-meta", 1)
       tracker.set_property("tensor-meta-gie-id", 1)

    queue_tracker_to_metamux = Gst.ElementFactory.make("queue", "queue_tracker_to_metamux")

    metamux = Gst.ElementFactory.make("nvdsmetamux", "metamux")
    metamux.set_property("config-file", "config_metamux.txt")

    queue_metamux_to_multistreamtiler = Gst.ElementFactory.make("queue", "queue_metamux_to_multistreamtiler")

    multistreamtiler = Gst.ElementFactory.make("nvmultistreamtiler", "multistreamtiler")
    multistreamtiler.set_property("rows", 2)
    multistreamtiler.set_property("columns", 1)
    multistreamtiler.set_property("width", 960)
    multistreamtiler.set_property("height", 1080)

    queue_multistreamtiler_to_videoconvert = Gst.ElementFactory.make("queue", "queue_multistreamtiler_to_videoconvert")

    videoconvert = Gst.ElementFactory.make("nvvideoconvert", "videoconvert")

    queue_videoconvert_to_osd = Gst.ElementFactory.make("queue", "queue_videoconvert_to_osd")

    osd = Gst.ElementFactory.make("nvdsosd", "osd")

    queue_osd_to_eglglessink = Gst.ElementFactory.make("queue", "queue_osd_to_eglglessink")

    eglglessink = Gst.ElementFactory.make("nveglglessink", "eglglessink")
    eglglessink.set_property("sync", 1)

    # ====================================================
    # ADD ELEMENTS TO PIPELINE FROM LEFT TO RIGHT
    # ====================================================

    pipeline.add(source_0_bin)
    pipeline.add(source_1_bin)
    pipeline.add(streammux)
    pipeline.add(queue_streammux_to_streamdemux)
    pipeline.add(streamdemux)
    pipeline.add(tee_source_0)
    pipeline.add(tee_source_1)
    pipeline.add(streammux_detect)
    pipeline.add(streammux_skip)
    pipeline.add(queue_streammux_skip_to_metamux)
    pipeline.add(queue_streammux_detect_to_preprocess)
    pipeline.add(preprocess)
    pipeline.add(queue_preprocess_to_pgie)
    pipeline.add(pgie)
    pipeline.add(queue_pgie_to_tracker)
    pipeline.add(tracker)
    pipeline.add(queue_tracker_to_metamux)
    pipeline.add(metamux)
    pipeline.add(queue_metamux_to_multistreamtiler)
    pipeline.add(multistreamtiler)
    pipeline.add(queue_multistreamtiler_to_videoconvert)
    pipeline.add(videoconvert)
    pipeline.add(queue_videoconvert_to_osd)
    pipeline.add(osd)
    pipeline.add(queue_osd_to_eglglessink)
    pipeline.add(eglglessink)

    # ====================================================
    # LINK ELEMENTS FROM LEFT TO RIGHT
    # ====================================================

    # sources to streammux
    source_0_bin_src_pad = source_0_bin.get_static_pad("src")
    source_1_bin_src_pad = source_1_bin.get_static_pad("src")
    streammux_sink_pad_0 = streammux.request_pad_simple("sink_0")
    streammux_sink_pad_1 = streammux.request_pad_simple("sink_1")
    source_0_bin_src_pad.link(streammux_sink_pad_0)
    source_1_bin_src_pad.link(streammux_sink_pad_1)

    # streammux to streamdemux
    streammux.link(queue_streammux_to_streamdemux)
    queue_streammux_to_streamdemux.link(streamdemux)

    # streamdemux to tees
    streamdemux_src_pad_0 = streamdemux.request_pad_simple("src_0")
    streamdemux_src_pad_1 = streamdemux.request_pad_simple("src_1")
    tee_source_0_sink_pad = tee_source_0.get_static_pad("sink")
    tee_source_1_sink_pad = tee_source_1.get_static_pad("sink")
    streamdemux_src_pad_0.link(tee_source_0_sink_pad)
    streamdemux_src_pad_1.link(tee_source_1_sink_pad)

    # tees to streammuxer (skip and detect)
    tee_source_0_src_pad_0 = tee_source_0.request_pad_simple("src_0")
    tee_source_0_src_pad_1 = tee_source_0.request_pad_simple("src_1")
    tee_source_1_src_pad_0 = tee_source_1.request_pad_simple("src_0")
    tee_source_1_src_pad_1 = tee_source_1.request_pad_simple("src_1")
    streammux_skip_sink_pad_0 = streammux_skip.request_pad_simple("sink_0")
    streammux_skip_sink_pad_1 = streammux_skip.request_pad_simple("sink_1")
    streammux_detect_sink_pad_0 = streammux_detect.request_pad_simple("sink_0")
    streammux_detect_sink_pad_1 = streammux_detect.request_pad_simple("sink_1")
    tee_source_0_src_pad_0.link(streammux_detect_sink_pad_0)
    tee_source_0_src_pad_1.link(streammux_skip_sink_pad_0)
    tee_source_1_src_pad_0.link(streammux_detect_sink_pad_1)
    tee_source_1_src_pad_1.link(streammux_skip_sink_pad_1)

    # streammux_skip to metamux
    streammux_skip.link(queue_streammux_skip_to_metamux)
    metamux_sink_pad_0 = metamux.request_pad_simple("sink_0")
    queue_streammux_skip_to_metamux_src_pad = queue_streammux_skip_to_metamux.get_static_pad("src")
    queue_streammux_skip_to_metamux_src_pad.link(metamux_sink_pad_0)

    # streammux_detect to preprocess
    streammux_detect.link(queue_streammux_detect_to_preprocess)
    queue_streammux_detect_to_preprocess.link(preprocess)

    # preprocess to pgie
    preprocess.link(queue_preprocess_to_pgie)
    queue_preprocess_to_pgie.link(pgie)

    # pgie to tracker
    pgie.link(queue_pgie_to_tracker)
    queue_pgie_to_tracker.link(tracker)

    # tracker to metamux
    tracker.link(queue_tracker_to_metamux)
    metamux_sink_pad_1 = metamux.request_pad_simple("sink_1")
    queue_tracker_to_metamux = queue_tracker_to_metamux.get_static_pad("src")
    queue_tracker_to_metamux.link(metamux_sink_pad_1)
    
    # metamux to multistreamtiler
    metamux.link(queue_metamux_to_multistreamtiler)
    queue_metamux_to_multistreamtiler.link(multistreamtiler)

    # multistreamtiler to videoconvert
    multistreamtiler.link(queue_multistreamtiler_to_videoconvert)
    queue_multistreamtiler_to_videoconvert.link(videoconvert)

    # videoconvert to osd
    videoconvert.link(queue_videoconvert_to_osd)
    queue_videoconvert_to_osd.link(osd)

    # osd to eglglessink.
    osd.link(queue_osd_to_eglglessink)
    queue_osd_to_eglglessink.link(eglglessink)

    loop = GLib.MainLoop()
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except Exception as e:
        print(e)

    pipeline.set_state(Gst.State.NULL)

Configuration preprocessing:

[property]
enable=1
    # list of component gie-id for which tensor is prepared
target-unique-ids=1
    # 0=NCHW, 1=NHWC, 2=CUSTOM
network-input-order=0
    # 0=process on objects 1=process on frames
process-on-frame=1
    #uniquely identify the metadata generated by this element
unique-id=1
    # gpu-id to be used
gpu-id=0
    # if enabled maintain the aspect ratio while scaling
maintain-aspect-ratio=1
    # if enabled pad symmetrically with maintain-aspect-ratio enabled
symmetric-padding=1
    # processig width/height at which image scaled
processing-width=960
processing-height=544
    # max buffer in scaling buffer pool
scaling-buf-pool-size=6
    # max buffer in tensor buffer pool
tensor-buf-pool-size=6
    # tensor shape based on network-input-order
network-input-shape= 8;3;544;960
    # 0=RGB, 1=BGR, 2=GRAY
network-color-format=0
    # 0=FP32, 1=UINT8, 2=INT8, 3=UINT32, 4=INT32, 5=FP16
tensor-data-type=0
    # tensor name same as input layer name
tensor-name=input_1
    # 0=NVBUF_MEM_DEFAULT 1=NVBUF_MEM_CUDA_PINNED 2=NVBUF_MEM_CUDA_DEVICE 3=NVBUF_MEM_CUDA_UNIFIED
scaling-pool-memory-type=0
    # 0=NvBufSurfTransformCompute_Default 1=NvBufSurfTransformCompute_GPU 2=NvBufSurfTransformCompute_VIC
scaling-pool-compute-hw=0
    # Scaling Interpolation method
    # 0=NvBufSurfTransformInter_Nearest 1=NvBufSurfTransformInter_Bilinear 2=NvBufSurfTransformInter_Algo1
    # 3=NvBufSurfTransformInter_Algo2 4=NvBufSurfTransformInter_Algo3 5=NvBufSurfTransformInter_Algo4
    # 6=NvBufSurfTransformInter_Default
scaling-filter=0
    # custom library .so path having custom functionality
custom-lib-path=/opt/nvidia/deepstream/deepstream/lib/gst-plugins/libcustom2d_preprocess.so
    # custom tensor preparation function name having predefined input/outputs
    # check the default custom library nvdspreprocess_lib for more info
custom-tensor-preparation-function=CustomTensorPreparation

[user-configs]
   # Below parameters get used when using default custom library nvdspreprocess_lib
   # network scaling factor
pixel-normalization-factor=0.003921568
   # mean file path in ppm format
#mean-file=
   # array of offsets for each channel
#offsets=

[group-0]
src-ids=0;1
custom-input-transformation-function=CustomAsyncTransformation
process-on-roi=1
roi-params-src-0=0;0;1920;1080
roi-params-src-1=0;0;1920;1080

Configuration Metamux:

[property]
enable=1
# sink pad name which data will be pass to src pad.
active-pad=sink_0
# default pts-tolerance is 60 ms.
pts-tolerance=60000

[user-configs]

[group-0]
# src-ids-model-<model unique ID>=<source ids>
# mux all source if don't set it.
# src-ids-model-1=0
# src-ids-model-2=1

Could you try to check that with new nvstreammux?

Using the new nvstreammux solved the issue.
Thank you for your fast reply!

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.