Hi,
So, I think the pipeline works, I can see the video being shown from the sink.
Now my problem is, I cannot access the tensors using the probes.
Neither nvinfer (src and sink) nor nvdspreproess (src) seem to populate l_user at any point. (It’s always “None”)
Could you help me out?
Here the current testing pipeline, graph and code:
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GObject
import sys
sys.path.append('../')
from common.bus_call import bus_call
from common.is_aarch_64 import is_aarch64
import os
import shutil
import pyds
from utils import cb_newpad, decodebin_child_added, create_source_bin, create_and_add_element
os.environ["GST_DEBUG_DUMP_DOT_DIR"] = "debugging/"
# Callback function for the probe on nvdspreprocess src pad
def nvdspreprocess_src_pad_probe(pad, info, user_data):
print("Buffer from nvdspreprocess")
gst_buffer = info.get_buffer()
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
if not batch_meta:
print("No batch meta found")
return Gst.PadProbeReturn.OK
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
print("Checking for user meta")
l_user = batch_meta.batch_user_meta_list
while l_user:
user_meta = pyds.NvDsUserMeta.cast(l_user.data)
print(f"Found user meta of type: {user_meta.base_meta.meta_type}")
l_user = l_user.next
try:
l_frame = l_frame.next
except StopIteration:
break
# #@TODO
return Gst.PadProbeReturn.OK
# Callback function for the probe on nvinfer src pad
def nvinfer_src_pad_probe(pad, info, user_data):
#@TODO:
return Gst.PadProbeReturn.OK
def create_graph(pipeline):
print("Graph done")
Gst.debug_bin_to_dot_file(pipeline, Gst.DebugGraphDetails.ALL, "test_preprocess_dot")
# Initialize GStreamer
Gst.init(None)
# Create the GStreamer pipeline
pipeline = Gst.Pipeline()
# Define the elements of the pipeline
source = create_source_bin(0, "rtsp://user:pw@<IP>")
# source = create_source_bin(0, "file:///opt/nvidia/deepstream/deepstream/sources/deepstream_python_apps/apps/XX/video_output_cam0.mp4")
streammux = create_and_add_element("nvstreammux", "streammux", pipeline)
streammux.set_property('batch-size', 1)
streammux.set_property('width', 1280)
streammux.set_property('height', 720)
streammux.set_property('live-source', 0)
streammux.set_property('batched-push-timeout', 4000000)
nvconv = Gst.ElementFactory.make("nvvideoconvert", "nvconv")
nvdspreprocess = Gst.ElementFactory.make("nvdspreprocess", "nvdspreprocess")
nvinfer = Gst.ElementFactory.make("nvinfer", "nvinfer")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
# Finally render the osd output
if DISPLAY:
if is_aarch64():
print("Creating nv3dsink \n")
sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink")
if not sink:
sys.stderr.write(" Unable to create nv3dsink \n")
else:
print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
if not sink:
sys.stderr.write(" Unable to create egl sink \n")
else:
sink = Gst.ElementFactory.make("fakesink", "in_fakesink")
if not pipeline or not source or not nvdspreprocess or not nvinfer or not sink:
print("One element could not be created. Exiting.")
# Set properties for the elements
# nvinfer configuration
nvinfer.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream-6.3/sources/deepstream_python_apps/apps/XX/c_nvinfer_1.txt")
nvinfer.set_property("input-tensor-meta", 1) # Use preprocessed input from nvdspreprocess
nvdspreprocess.set_property('config-file', '/opt/nvidia/deepstream/deepstream/sources/deepstream_python_apps/apps/XX/configs/nvdspreprocess_conf.txt')
# Add elements to the pipeline
pipeline.add(source)
pipeline.add(nvconv)
pipeline.add(nvdspreprocess)
pipeline.add(nvinfer)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
# Create capsfilter
capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
capsfilter.set_property("caps", Gst.caps_from_string("video/x-raw(memory:NVMM),format=RGBA"))
pipeline.add(capsfilter)
sinkpad = streammux.get_request_pad("sink_1")
if not sinkpad:
sys.stderr.write("Unable to create sink pad bin \n")
srcpad = source.get_static_pad("src")
if not srcpad:
sys.stderr.write("Unable to create src pad bin \n")
srcpad.link(sinkpad)
# Link the elements together
streammux.link(nvconv)
nvconv.link(capsfilter)
capsfilter.link(nvdspreprocess)
# Insert into the pipeline
nvdspreprocess.link(nvinfer)
nvinfer.link(nvvidconv)
nvvidconv.link(nvosd)
nvosd.link(sink)
# Add probes
nvdspreprocess_src_pad = nvdspreprocess.get_static_pad("src")
if nvdspreprocess_src_pad:
nvdspreprocess_src_pad.add_probe(Gst.PadProbeType.BUFFER, nvdspreprocess_src_pad_probe, 0)
nvinfer_src_pad = nvinfer.get_static_pad("src")
if nvinfer_src_pad:
nvinfer_src_pad.add_probe(Gst.PadProbeType.BUFFER, nvinfer_src_pad_probe, 0)
# nvinfer_snk_pad = nvinfer.get_static_pad("sink")
# if nvinfer_snk_pad:
# nvinfer_snk_pad.add_probe(Gst.PadProbeType.BUFFER, nvinfer_snk_pad_probe, 0)
# Start the pipeline
pipeline.set_state(Gst.State.PLAYING)
mainloop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
def on_message(bus, message):
if message.type == Gst.MessageType.ERROR:
err, debug = message.parse_error()
print(f"Error from capsfilter: {err}, {debug}")
elif message.type == Gst.MessageType.WARNING:
warn, debug = message.parse_warning()
print(f"Warning from capsfilter: {warn}, {debug}")
# ... handle other messages as needed
bus.connect("message", on_message)
Gst.debug_bin_to_dot_file(pipeline, Gst.DebugGraphDetails.ALL, "test_preprocess_dotnorun")
try:
mainloop.run()
GObject.timeout_add_seconds(3, create_graph, pipeline)
except KeyboardInterrupt:
print("Exiting on user request.")
mainloop.quit()
pipeline.set_state(Gst.State.NULL)
# Clean up
pipeline.set_state(Gst.State.NULL)
Here’s the nvdspreprocess config:
[property]
enable=1
# list of component gie-id for which tensor is prepared
target-unique-ids=1
# 0=NCHW, 1=NHWC, 2=CUSTOM
network-input-order=0
# 0=process on objects 1=process on frames
process-on-frame=1
#uniquely identify the metadata generated by this element
unique-id=0
# gpu-id to be used
gpu-id=0
# if enabled maintain the aspect ratio while scaling
maintain-aspect-ratio=1
# if enabled pad symmetrically with maintain-aspect-ratio enabled
symmetric-padding=1
# processig width/height at which image scaled
processing-width=120
processing-height=60
# max buffer in scaling buffer pool
scaling-buf-pool-size=6
# max buffer in tensor buffer pool
tensor-buf-pool-size=6
# tensor shape based on network-input-order
network-input-shape= 1;1;60;120
# 0=RGB, 1=BGR, 2=GRAY
network-color-format=2
# 0=FP32, 1=UINT8, 2=INT8, 3=UINT32, 4=INT32, 5=FP16
tensor-data-type=0
# tensor name same as input layer name
tensor-name=input_1
# 0=NVBUF_MEM_DEFAULT 1=NVBUF_MEM_CUDA_PINNED 2=NVBUF_MEM_CUDA_DEVICE 3=NVBUF_MEM_CUDA_UNIFIED
scaling-pool-memory-type=0
# 0=NvBufSurfTransformCompute_Default 1=NvBufSurfTransformCompute_GPU 2=NvBufSurfTransformCompute_VIC
scaling-pool-compute-hw=0
# Scaling Interpolation method
# 0=NvBufSurfTransformInter_Nearest 1=NvBufSurfTransformInter_Bilinear 2=NvBufSurfTransformInter_Algo1
# 3=NvBufSurfTransformInter_Algo2 4=NvBufSurfTransformInter_Algo3 5=NvBufSurfTransformInter_Algo4
# 6=NvBufSurfTransformInter_Default
scaling-filter=0
# custom library .so path having custom functionality
custom-lib-path=/opt/nvidia/deepstream/deepstream/lib/gst-plugins/libcustom2d_preprocess.so
# custom tensor preparation function name having predefined input/outputs
# check the default custom library nvdspreprocess_lib for more info
custom-tensor-preparation-function=CustomTensorPreparation
[user-configs]
# Below parameters get used when using default custom library nvdspreprocess_lib
# network scaling factor
pixel-normalization-factor=1.0
# mean file path in ppm format
#mean-file=
# array of offsets for each channel
#offsets=
#[group-0]
#src-ids=0
#custom-input-transformation-function=CustomAsyncTransformation
#process-on-roi=0
#roi-params-src-0=0;540;900;500;960;0;900;500;0;0;540;900;
#roi-params-src-1=0;540;900;500;960;0;900;500;0;0;540;900;
#roi-params-src-2=0;540;900;500;960;0;900;500;0;0;540;900;
#roi-params-src-3=0;540;900;500;960;0;900;500;0;0;540;900;
Here the nvinfer config:
[property]
gpu-id=0
#net-scale-factor=0.0039215697906911373
net-scale-factor=0.0039215686274509803921568627451
#onnx-file=/home/YYY/Desktop/MODELS/modelrgb2.onnx
#model-engine-file=/home/YYY/Desktop/MODELS/modelrgb2.onnx_b1_gpu0_fp16.engine
model-engine-file=/opt/nvidia/deepstream/deepstream-6.3/sources/deepstream_python_apps/apps/XX/Models/model_db.onnx_b1_gpu0_fp16.engine
onnx-file=/opt/nvidia/deepstream/deepstream-6.3/sources/deepstream_python_apps/apps/XX/Models/model_db.onnx
batch-size=1
network-mode=1
network-type=100
output-tensor-meta=1
#num-detected-classes=4
interval=0
gie-unique-id=1
output-blob-names=conv2d_2
model-color-format=2 # 0: RGB, 1: BGR, 2: GRAY, 3: RGBA, 4: BGRx, 5: ???
uff-input-order=1 # 0: NCHW, 1: NHWC, 2: NC
#onnx-input-order=1 # 0: NCHW, 1: NHWC, 2: NC
#infer-dims=60;120;1
input-tensor-from-meta=1