Please provide complete information as applicable to your setup.
System 1 (dGPU):
• Hardware Platform (Jetson / GPU) NVIDIA RTX 6000 Ada Generation
• DeepStream Version 7.0.0
• JetPack Version (valid for Jetson only)
• TensorRT Version 8.6.1.6
• NVIDIA GPU Driver Version (valid for GPU only) 535.183.01
• Issue Type( questions, new requirements, bugs) Bug
System 2 (Jetson):
• Hardware Platform (Jetson / GPU) Jetson Orin NX 16GB
• DeepStream Version 7.0.0
• JetPack Version (valid for Jetson only) 6.0
• TensorRT Version 8.6.2.3
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs) Bug
Hello,
below you’ll find a detection pipeline featuring a nvdspreprocess-element and a nvtracker-element.
This pipeline runs without errors on System 1 (dGPU) as specified above.
However, running this pipeline on System 2 (Jetson) throws the following exception:
!![Exception] [NvMOTContext::processFrame()] motFrame->bufferList[i]->colorFormat != m_Config.perTransformBatchConfig[i].colorFormat
An exception occurred. [NvMOTContext::processFrame()] motFrame->bufferList[i]->colorFormat != m_Config.perTransformBatchConfig[i].colorFormat
gstnvtracker: Low-level tracker lib returned error 1
0:00:05.788979232 19188 0xaaaaf6c92060 WARN nvinfer gstnvinfer.cpp:2420:gst_nvinfer_output_loop:<pgie> error: Internal data stream error.
0:00:05.789019296 19188 0xaaaaf6c92060 WARN nvinfer gstnvinfer.cpp:2420:gst_nvinfer_output_loop:<pgie> error: streaming stopped, reason error (-5)
When “input-tensor-meta”-property of the nvtracker-element is set to 0 via
pgie.set_property(“input-tensor-meta”, 0)
the exception is not thrown.
How could this issue be solved?
Thank you for your time!
Drawing of pipeline:
Implementation of pipeline:
import os
import sys
import gi
gi.require_version("Gst", "1.0")
from gi.repository import GLib, Gst
# taken from deepstream-test3
def cb_newpad(decodebin, decoder_src_pad,data):
print("In cb_newpad\n")
caps=decoder_src_pad.get_current_caps()
if not caps:
caps = decoder_src_pad.query_caps()
gststruct=caps.get_structure(0)
gstname=gststruct.get_name()
source_bin=data
features=caps.get_features(0)
print("gstname=",gstname)
if(gstname.find("video")!=-1):
print("features=",features)
if features.contains("memory:NVMM"):
bin_ghost_pad=source_bin.get_static_pad("src")
if not bin_ghost_pad.set_target(decoder_src_pad):
sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
else:
sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")
# taken from deepstream-test3
def decodebin_child_added(child_proxy,Object,name,user_data):
print("Decodebin child added:", name, "\n")
if(name.find("decodebin") != -1):
Object.connect("child-added",decodebin_child_added,user_data)
if "source" in name:
source_element = child_proxy.get_by_name("source")
if source_element.find_property("drop-on-latency") != None:
Object.set_property("drop-on-latency", True)
# taken from deepstream-test3
def create_source_bin(index,uri):
print("Creating source bin")
bin_name="source-bin-%02d" %index
print(bin_name)
nbin=Gst.Bin.new(bin_name)
if not nbin:
sys.stderr.write(" Unable to create source bin \n")
uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
if not uri_decode_bin:
sys.stderr.write(" Unable to create uri decode bin \n")
uri_decode_bin.set_property("uri",uri)
uri_decode_bin.connect("pad-added",cb_newpad,nbin)
uri_decode_bin.connect("child-added",decodebin_child_added,nbin)
Gst.Bin.add(nbin,uri_decode_bin)
bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
if not bin_pad:
sys.stderr.write(" Failed to add ghost pad in source bin \n")
return None
return nbin
if __name__ == "__main__":
os.putenv("USE_NEW_NVSTREAMMUX", "yes")
Gst.init(None)
pipeline = Gst.Pipeline()
# ====================================================
# CREATE ELEMENTS FROM LEFT TO RIGHT
# ====================================================
source_bin = create_source_bin(0, "file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4")
streammux = Gst.ElementFactory.make("nvstreammux", "streammux")
streammux.set_property("batch-size", 1)
queue_streammux_to_preprocess = Gst.ElementFactory.make("queue", "queue_streammux_to_preprocess")
preprocess = Gst.ElementFactory.make("nvdspreprocess", "preprocess")
preprocess.set_property("config-file", "config_preprocess.txt")
queue_preprocess_to_pgie = Gst.ElementFactory.make("queue", "queue_preprocess_to_pgie")
pgie = Gst.ElementFactory.make("nvinfer", "pgie")
pgie.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt")
if preprocess.get_property('enable'):
pgie.set_property("input-tensor-meta", 1)
queue_pgie_to_tracker = Gst.ElementFactory.make("queue", "queue_pgie_to_tracker")
tracker = Gst.ElementFactory.make("nvtracker", "tracker")
tracker.set_property("tracker-width", 960)
tracker.set_property("tracker-height", 544)
tracker.set_property("ll-lib-file", "/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so")
tracker.set_property("ll-config-file",
"/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml")
if preprocess.get_property('enable'):
tracker.set_property("input-tensor-meta", 1)
tracker.set_property("tensor-meta-gie-id", 1)
queue_tracker_to_multistreamtiler = Gst.ElementFactory.make("queue", "queue_tracker_to_multistreamtiler")
multistreamtiler = Gst.ElementFactory.make("nvmultistreamtiler", "multistreamtiler")
multistreamtiler.set_property("rows", 1)
multistreamtiler.set_property("columns", 1)
multistreamtiler.set_property("width", 1280)
multistreamtiler.set_property("height", 720)
queue_multistreamtiler_to_videoconvert = Gst.ElementFactory.make("queue", "queue_multistreamtiler_to_videoconvert")
videoconvert = Gst.ElementFactory.make("nvvideoconvert", "videoconvert")
queue_videoconvert_to_osd = Gst.ElementFactory.make("queue", "queue_videoconvert_to_osd")
osd = Gst.ElementFactory.make("nvdsosd", "osd")
queue_osd_to_eglglessink = Gst.ElementFactory.make("queue", "queue_osd_to_eglglessink")
eglglessink = Gst.ElementFactory.make("nveglglessink", "eglglessink")
eglglessink.set_property("sync", 1)
# ====================================================
# ADD ELEMENTS TO PIPELINE FROM LEFT TO RIGHT
# ====================================================
pipeline.add(source_bin)
pipeline.add(streammux)
pipeline.add(queue_streammux_to_preprocess)
pipeline.add(preprocess)
pipeline.add(queue_preprocess_to_pgie)
pipeline.add(pgie)
pipeline.add(queue_pgie_to_tracker)
pipeline.add(tracker)
pipeline.add(queue_tracker_to_multistreamtiler)
pipeline.add(multistreamtiler)
pipeline.add(queue_multistreamtiler_to_videoconvert)
pipeline.add(videoconvert)
pipeline.add(queue_videoconvert_to_osd)
pipeline.add(osd)
pipeline.add(queue_osd_to_eglglessink)
pipeline.add(eglglessink)
# ====================================================
# LINK ELEMENTS FROM LEFT TO RIGHT
# ====================================================
# source to streammux
source_bin_src_pad = source_bin.get_static_pad("src")
streammux_sink_pad = streammux.request_pad_simple("sink_0")
source_bin_src_pad.link(streammux_sink_pad)
# streammux to preprocess
streammux.link(queue_streammux_to_preprocess)
queue_streammux_to_preprocess.link(preprocess)
# preprocess to pgie
preprocess.link(queue_preprocess_to_pgie)
queue_preprocess_to_pgie.link(pgie)
# pgie to tracker
pgie.link(queue_pgie_to_tracker)
queue_pgie_to_tracker.link(tracker)
# tracker to multistreamtiler
tracker.link(queue_tracker_to_multistreamtiler)
queue_tracker_to_multistreamtiler.link(multistreamtiler)
# multistreamtiler to videoconvert
multistreamtiler.link(queue_multistreamtiler_to_videoconvert)
queue_multistreamtiler_to_videoconvert.link(videoconvert)
# videoconvert to osd
videoconvert.link(queue_videoconvert_to_osd)
queue_videoconvert_to_osd.link(osd)
# osd to eglglessink.
osd.link(queue_osd_to_eglglessink)
queue_osd_to_eglglessink.link(eglglessink)
loop = GLib.MainLoop()
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except Exception as e:
print(e)
pipeline.set_state(Gst.State.NULL)
Configuration file of nvdspreprocess-element:
[property]
enable=1
# list of component gie-id for which tensor is prepared
target-unique-ids=1
# 0=NCHW, 1=NHWC, 2=CUSTOM
network-input-order=0
# 0=process on objects 1=process on frames
process-on-frame=1
#uniquely identify the metadata generated by this element
unique-id=1
# gpu-id to be used
gpu-id=0
# if enabled maintain the aspect ratio while scaling
maintain-aspect-ratio=1
# if enabled pad symmetrically with maintain-aspect-ratio enabled
symmetric-padding=1
# processig width/height at which image scaled
processing-width=960
processing-height=544
# max buffer in scaling buffer pool
scaling-buf-pool-size=6
# max buffer in tensor buffer pool
tensor-buf-pool-size=6
# tensor shape based on network-input-order
network-input-shape= 8;3;544;960
# 0=RGB, 1=BGR, 2=GRAY
network-color-format=0
# 0=FP32, 1=UINT8, 2=INT8, 3=UINT32, 4=INT32, 5=FP16
tensor-data-type=0
# tensor name same as input layer name
tensor-name=input_1
# 0=NVBUF_MEM_DEFAULT 1=NVBUF_MEM_CUDA_PINNED 2=NVBUF_MEM_CUDA_DEVICE 3=NVBUF_MEM_CUDA_UNIFIED
scaling-pool-memory-type=0
# 0=NvBufSurfTransformCompute_Default 1=NvBufSurfTransformCompute_GPU 2=NvBufSurfTransformCompute_VIC
scaling-pool-compute-hw=0
# Scaling Interpolation method
# 0=NvBufSurfTransformInter_Nearest 1=NvBufSurfTransformInter_Bilinear 2=NvBufSurfTransformInter_Algo1
# 3=NvBufSurfTransformInter_Algo2 4=NvBufSurfTransformInter_Algo3 5=NvBufSurfTransformInter_Algo4
# 6=NvBufSurfTransformInter_Default
scaling-filter=0
# custom library .so path having custom functionality
custom-lib-path=/opt/nvidia/deepstream/deepstream/lib/gst-plugins/libcustom2d_preprocess.so
# custom tensor preparation function name having predefined input/outputs
# check the default custom library nvdspreprocess_lib for more info
custom-tensor-preparation-function=CustomTensorPreparation
[user-configs]
# Below parameters get used when using default custom library nvdspreprocess_lib
# network scaling factor
pixel-normalization-factor=0.003921568
# mean file path in ppm format
#mean-file=
# array of offsets for each channel
#offsets=
[group-0]
src-ids=0
custom-input-transformation-function=CustomAsyncTransformation
process-on-roi=1
roi-params-src-0=200;200;640;640