Hello Kesong,
In the deepstream-test2 there is no output about the state of the tracklets to know if they are ACTIVE, TENTATIVE, SHADOW,…
This is how it is tested:
Getting an error
Traceback (most recent call last):
File "/workspace/scripts/detector2.py", line 40, in appsink_callback
review_tracking_data(batch_meta)
File "/workspace/scripts/detector2.py", line 279, in review_tracking_data
past_frame_obj_list = pyds.NvDsPastFrameObjList.cast(user_meta.user_meta_data)
^^^^^^^^^^^^^^^^^^^^^^^^^
AttributeError: module 'pyds' has no attribute 'NvDsPastFrameObjList'
Working when removing the indicated code above:
past_frame_obj_list = pyds.NvDsPastFrameObjList.cast(user_meta.user_meta_data)
past_frame_obj_list.print_list()
if past_frame_obj_list:
print("past_frame_obj_list")
for past_frame_obj in past_frame_obj_list:
tracklet_state = past_frame_obj.tracklet_state
if tracklet_state == pyds.NvDsTrackletState.ACTIVE:
print("Tracklet State: ACTIVE")
elif tracklet_state == pyds.NvDsTrackletState.TENTATIVE:
print("Tracklet State: TENTATIVE")
elif tracklet_state == pyds.NvDsTrackletState.SHADOW:
print("Tracklet State: SHADOW")
#!/usr/bin/env python3
"""
DeepStream detector with bbox matching probe - Python GStreamer version of detector.py
This replicates detector.py but uses Python GStreamer to allow attaching a probe
for bbox matching and soft label extraction.
"""
import argparse
import sys
import signal
import ctypes
import gi
from colorama import Fore, Style
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib
import pyds
import numpy as np
def appsink_callback(appsink):
"""
Appsink callback to match tracked objects with raw boxes and extract soft labels.
"""
sample = appsink.emit("pull-sample")
if not sample:
return Gst.FlowReturn.OK
gst_buffer = sample.get_buffer()
if not gst_buffer:
return Gst.FlowReturn.OK
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
print("\n--- Review Data ---")
review_tracking_data(batch_meta)
print("\n--- NVDS Frame Meta ---")
review_nvdsframemeta(batch_meta)
while l_frame is not None:
try:
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
# Find tensor metadata
l_user = frame_meta.frame_user_meta_list
tensor_meta = None
while l_user is not None:
try:
user_meta = pyds.NvDsUserMeta.cast(l_user.data)
if user_meta.base_meta.meta_type == pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META:
tensor_meta = pyds.NvDsInferTensorMeta.cast(user_meta.user_meta_data)
break
except StopIteration:
break
try:
l_user = l_user.next
except StopIteration:
break
if tensor_meta is None:
try:
l_frame = l_frame.next
except StopIteration:
break
continue
# Extract raw tensors from model outputs
raw_boxes = None
raw_scores = None
raw_labels = None
for i in range(tensor_meta.num_output_layers):
layer = pyds.get_nvds_LayerInfo(tensor_meta, i)
layer_name = layer.layerName
if layer_name == "boxes":
# Shape: [1, 300, 4] = 1200 elements
ptr = pyds.get_ptr(layer.buffer)
raw_boxes = np.ctypeslib.as_array(
(ctypes.c_float * 1200).from_address(ptr)
).reshape(1, 300, 4)
elif layer_name == "scores":
# Shape: [1, 300] = 300 elements
ptr = pyds.get_ptr(layer.buffer)
raw_scores = np.ctypeslib.as_array(
(ctypes.c_float * 300).from_address(ptr)
).reshape(1, 300)
elif layer_name == "labels":
# Shape: [1, 300, 12] = 3600 elements
ptr = pyds.get_ptr(layer.buffer)
raw_labels = np.ctypeslib.as_array(
(ctypes.c_float * 3600).from_address(ptr)
).reshape(1, 300, 12)
if raw_boxes is None or raw_labels is None:
try:
l_frame = l_frame.next
except StopIteration:
break
continue
# Print raw detector outputs (pre-tracker)
print(f"{Fore.RED} frame {frame_meta.frame_num} {Style.RESET_ALL}:")
for idx in range(300):
score = raw_scores[0, idx] if raw_scores is not None else 0.0
if score > 0.15: # Only print detections above threshold
box = raw_boxes[0, idx]
label = int(np.argmax(raw_labels[0, idx])) if raw_labels is not None else -1
print(f"{Fore.GREEN} Detector: idx={idx}, Class={label}, Score={score:.2f}, BBox: x1={box[0]:.0f}, y1={box[1]:.0f}, x2={box[2]:.0f}, y2={box[3]:.0f} {Style.RESET_ALL}")
# Match tracked objects with raw boxes using exact coordinate matching
l_obj = frame_meta.obj_meta_list
matched_detections = []
while l_obj is not None:
try:
obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
print(f"{Fore.BLUE} Tracker: Track ID={obj_meta.object_id}, Class ID={obj_meta.class_id}, Confidence={obj_meta.confidence:.2f}, BBox: left={obj_meta.rect_params.left:.0f}, top={obj_meta.rect_params.top:.0f}, width={obj_meta.rect_params.width:.0f}, height={obj_meta.rect_params.height:.0f}{Style.RESET_ALL}")
# Object bbox in x1y1x2y2 format
obj_x1 = obj_meta.rect_params.left
obj_y1 = obj_meta.rect_params.top
obj_x2 = obj_meta.rect_params.left + obj_meta.rect_params.width
obj_y2 = obj_meta.rect_params.top + obj_meta.rect_params.height
print("## bbbboxx", (obj_x1, obj_y1, obj_x2, obj_y2))
# Find exact matching raw box
matched_idx = -1
for idx in range(300):
raw_box = raw_boxes[0, idx]
# Check if coordinates match exactly (or very close due to float precision)
if (abs(raw_box[0] - obj_x1) < 0.01 and
abs(raw_box[1] - obj_y1) < 0.01 and
abs(raw_box[2] - obj_x2) < 0.01 and
abs(raw_box[3] - obj_y2) < 0.01):
matched_idx = idx
break
print(f" ### Matched raw idx: {matched_idx}")
# Only add if we found exact match
if matched_idx != -1:
soft_labels = raw_labels[0, matched_idx]
matched_detections.append({
'track_id': obj_meta.object_id,
'class_id': obj_meta.class_id,
'confidence': obj_meta.confidence,
'bbox': [obj_x1, obj_y1, obj_x2, obj_y2],
'soft_labels': soft_labels.copy(),
'raw_idx': matched_idx
})
try:
l_obj = l_obj.next
except StopIteration:
break
# For now, just print summary every 30 frames
if frame_meta.frame_num % 30 == 0 and len(matched_detections) > 0:
print(f"\n=== Frame {frame_meta.frame_num} ===")
print(f"Matched {len(matched_detections)} detections")
for i, det in enumerate(matched_detections[:3]): # Print first 3
print(f" Detection {i}:")
print(f" Track ID: {det['track_id']}")
print(f" Class ID: {det['class_id']}")
print(f" Confidence: {det['confidence']:.3f}")
print(f" Raw idx: {det['raw_idx']}")
print(f" Soft labels (top 3): {np.sort(det['soft_labels'])[-3:]}")
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.FlowReturn.OK
def review_nvdsframemeta(batch_meta: pyds.NvDsBatchMeta) -> None:
"""
Review NvDsFrameMeta data in the batch meta for debugging.
"""
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
print(f"Frame: pad_index={frame_meta.pad_index} batch_id={frame_meta.batch_id} "
f"frame_num={frame_meta.frame_num} buf_pts={frame_meta.buf_pts} "
f"ntp_timestamp={frame_meta.ntp_timestamp} source_id={frame_meta.source_id} "
f"num_surfaces_per_frame={frame_meta.num_surfaces_per_frame}")
print(f" source_frame_width={frame_meta.source_frame_width} "
f"source_frame_height={frame_meta.source_frame_height} "
f"surface_type={frame_meta.surface_type} surface_index={frame_meta.surface_index}")
print(f" num_obj_meta={frame_meta.num_obj_meta} bInferDone={frame_meta.bInferDone}")
# Iterate through object meta list
l_obj = frame_meta.obj_meta_list
while l_obj is not None:
try:
obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
color_map = {
0: Fore.LIGHTBLUE_EX,
1: Fore.GREEN,
2: Fore.LIGHTCYAN_EX,
3: Fore.MAGENTA,
4: Fore.CYAN,
}
color = color_map.get(obj_meta.class_id, Fore.YELLOW)
print(f"{color} Object: class_id={obj_meta.class_id} object_id={obj_meta.object_id} "
f"confidence={obj_meta.confidence:.2f} tracker_confidence={obj_meta.tracker_confidence:.2f}{Style.RESET_ALL}")
print(f" rect: left={obj_meta.rect_params.left:.1f} top={obj_meta.rect_params.top:.1f} "
f"width={obj_meta.rect_params.width:.1f} height={obj_meta.rect_params.height:.1f}")
print(f" detector: left={obj_meta.detector_bbox_info.org_bbox_coords.left:.1f} "
f"top={obj_meta.detector_bbox_info.org_bbox_coords.top:.1f} "
f"width={obj_meta.detector_bbox_info.org_bbox_coords.width:.1f} "
f"height={obj_meta.detector_bbox_info.org_bbox_coords.height:.1f}")
print(f" tracker: left={obj_meta.tracker_bbox_info.org_bbox_coords.left:.1f} "
f"top={obj_meta.tracker_bbox_info.org_bbox_coords.top:.1f} "
f"width={obj_meta.tracker_bbox_info.org_bbox_coords.width:.1f} "
f"height={obj_meta.tracker_bbox_info.org_bbox_coords.height:.1f}")
try:
l_obj = l_obj.next
except StopIteration:
break
try:
l_frame = l_frame.next
except StopIteration:
break
def review_tracking_data(batch_meta : pyds.NvDsBatchMeta) -> None:
"""
Review tracking data in the batch meta for debugging.
"""
l_user=batch_meta.batch_user_meta_list #Retrieve glist of NvDsUserMeta objects from given NvDsBatchMeta object
while l_user is not None:
try:
user_meta=pyds.NvDsUserMeta.cast(l_user.data)
except StopIteration:
break
if(user_meta and user_meta.base_meta.meta_type==pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META): #Make sure metatype is correct
try:
pPastFrameObjBatch = pyds.NvDsTargetMiscDataBatch.cast(user_meta.user_meta_data) #See NvDsTargetMiscDataBatch for details
except StopIteration:
break
for trackobj in pyds.NvDsTargetMiscDataBatch.list(pPastFrameObjBatch): # NvDsTargetMiscDataStream objects
# NvDsTargetMiscDataStream attributes
print(f"streamId={trackobj.streamID} surfaceStreamID={trackobj.surfaceStreamID} numAllocated={trackobj.numAllocated} numFilled={trackobj.numFilled}")
for pastframeobj in pyds.NvDsTargetMiscDataStream.list(trackobj): # NvDsFrameObjList objects
# NvDsTargetMiscDataObject attributes
print(f"numobj={pastframeobj.numObj} uniqueId= {pastframeobj.uniqueId} classId={pastframeobj.classId} objLabel={pastframeobj.objLabel}")
for objlist in pyds.NvDsTargetMiscDataObject.list(pastframeobj): # NvDsFrameObj objects
# NvDsTargetMiscDataFrame attributes
print(f"frameNum: {objlist.frameNum} confidence:{objlist.confidence:.2f} age:{objlist.age} [left:{objlist.tBbox.left:.1f} width:{objlist.tBbox.width:.1f} top:{objlist.tBbox.top:.1f} height:{objlist.tBbox.height:.1f}]")
past_frame_obj_list = pyds.NvDsPastFrameObjList.cast(user_meta.user_meta_data)
past_frame_obj_list.print_list()
if past_frame_obj_list:
print("past_frame_obj_list")
for past_frame_obj in past_frame_obj_list:
tracklet_state = past_frame_obj.tracklet_state
if tracklet_state == pyds.NvDsTrackletState.ACTIVE:
print("Tracklet State: ACTIVE")
elif tracklet_state == pyds.NvDsTrackletState.TENTATIVE:
print("Tracklet State: TENTATIVE")
elif tracklet_state == pyds.NvDsTrackletState.SHADOW:
print("Tracklet State: SHADOW")
try:
l_user=l_user.next
except StopIteration:
break
def osd_sink_pad_buffer_probe(pad, info, u_data):
"""
Probe to modify OSD colors for shadow tracks (lost but still projected).
Shadow tracks are shown in yellow/orange, active tracks in green.
"""
gst_buffer = info.get_buffer()
if not gst_buffer:
return Gst.PadProbeReturn.OK
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
if not batch_meta:
return Gst.PadProbeReturn.OK
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
l_obj = frame_meta.obj_meta_list
while l_obj is not None:
try:
obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_meta.rect_params.border_color.set(0.0, 1.0, 1.0, 1.0)
obj_meta.rect_params.border_width = 1
# Make text orange too
obj_meta.text_params.font_params.font_color.set(1.0, 0.6, 0.0, 1.0)
try:
l_obj = l_obj.next
except StopIteration:
break
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
class DeepStreamDetector:
def __init__(self, socket_path: str):
self.socket_path = socket_path
self.pipeline = None
self.loop = None
Gst.init(None)
def create_pipeline(self):
"""Create GStreamer pipeline matching detector.py structure"""
self.pipeline = Gst.Pipeline()
# Source
source = Gst.ElementFactory.make("nvunixfdsrc", "source")
source.set_property("socket-path", self.socket_path)
# Caps filter for source
caps_source = Gst.ElementFactory.make("capsfilter", "caps_source")
caps_source.set_property("caps",
Gst.Caps.from_string("video/x-raw(memory:NVMM),format=NV12"))
# Video convert
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "nvvidconv")
# Caps filter after convert
caps_convert = Gst.ElementFactory.make("capsfilter", "caps_convert")
caps_convert.set_property("caps",
Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
# Stream mux
streammux = Gst.ElementFactory.make("nvstreammux", "mux")
streammux.set_property("batch-size", 1)
streammux.set_property("width", 1280)
streammux.set_property("height", 1280)
streammux.set_property("nvbuf-memory-type", 0)
# Inference
nvinfer = Gst.ElementFactory.make("nvinfer", "nvinfer")
nvinfer.set_property("config-file-path", "/workspace/configs/rtdetr_config.txt")
# Queue after inference
queue_infer = Gst.ElementFactory.make("queue", "queue_infer")
queue_infer.set_property("max-size-buffers", 30)
queue_infer.set_property("max-size-time", 0)
queue_infer.set_property("max-size-bytes", 0)
queue_infer.set_property("leaky", 2) # downstream
# Tracker
nvtracker = Gst.ElementFactory.make("nvtracker", "nvtracker")
nvtracker.set_property("tracker-width", 320)
nvtracker.set_property("tracker-height", 320)
nvtracker.set_property("ll-lib-file",
"/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so")
nvtracker.set_property("ll-config-file","/workspace/configs/config_tracker_NvDCF_accuracy.yml")
# Tee after tracker for display + appsink branches
tee = Gst.ElementFactory.make("tee", "tee")
# Branch 1: Display path
queue_display_branch = Gst.ElementFactory.make("queue", "queue_display_branch")
queue_display_branch.set_property("max-size-buffers", 10)
queue_display_branch.set_property("max-size-time", 0)
queue_display_branch.set_property("max-size-bytes", 0)
# OSD
nvosd = Gst.ElementFactory.make("nvdsosd", "nvosd")
# Branch 2: Appsink path
queue_appsink = Gst.ElementFactory.make("queue", "queue_appsink")
queue_appsink.set_property("max-size-buffers", 10)
queue_appsink.set_property("max-size-time", 0)
queue_appsink.set_property("max-size-bytes", 0)
appsink = Gst.ElementFactory.make("appsink", "appsink")
appsink.set_property("emit-signals", True)
appsink.set_property("sync", False)
appsink.connect("new-sample", appsink_callback)
# Video convert for display
nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "nvvidconv2")
# Caps for display
caps_display = Gst.ElementFactory.make("capsfilter", "caps_display")
caps_display.set_property("caps",
Gst.Caps.from_string("video/x-raw(memory:NVMM),width=1920,height=1080,pixel-aspect-ratio=1/1"))
# Queue for display
queue_display = Gst.ElementFactory.make("queue", "queue_display")
queue_display.set_property("max-size-buffers", 5)
queue_display.set_property("max-size-time", 0)
queue_display.set_property("max-size-bytes", 0)
queue_display.set_property("leaky", 2)
# Sink
sink = Gst.ElementFactory.make("nveglglessink", "sink")
sink.set_property("sync", False)
# Add all elements to pipeline
elements = [
source, caps_source, nvvidconv, caps_convert,
streammux, nvinfer, queue_infer, nvtracker, tee,
# Branch 1: Display
queue_display_branch, nvosd, nvvidconv2, caps_display, queue_display, sink,
# Branch 2: Appsink
queue_appsink, appsink
]
for elem in elements:
if not elem:
print(f"ERROR: Failed to create element")
return False
self.pipeline.add(elem)
# Link elements
if not source.link(caps_source):
print("ERROR: Failed to link source -> caps_source")
return False
if not caps_source.link(nvvidconv):
print("ERROR: Failed to link caps_source -> nvvidconv")
return False
if not nvvidconv.link(caps_convert):
print("ERROR: Failed to link nvvidconv -> caps_convert")
return False
# Get streammux sink pad and link
sinkpad = streammux.request_pad_simple("sink_0")
srcpad = caps_convert.get_static_pad("src")
if srcpad.link(sinkpad) != Gst.PadLinkReturn.OK:
print("ERROR: Failed to link to streammux")
return False
# Link rest of pipeline up to tracker
if not streammux.link(nvinfer):
print("ERROR: Failed to link streammux -> nvinfer")
return False
if not nvinfer.link(queue_infer):
print("ERROR: Failed to link nvinfer -> queue_infer")
return False
if not queue_infer.link(nvtracker):
print("ERROR: Failed to link queue_infer -> nvtracker")
return False
if not nvtracker.link(tee):
print("ERROR: Failed to link nvtracker -> tee")
return False
# Branch 1: Display path
tee_display_pad = tee.request_pad_simple("src_%u")
queue_display_sink = queue_display_branch.get_static_pad("sink")
if tee_display_pad.link(queue_display_sink) != Gst.PadLinkReturn.OK:
print("ERROR: Failed to link tee -> queue_display_branch")
return False
if not queue_display_branch.link(nvosd):
print("ERROR: Failed to link queue_display_branch -> nvosd")
return False
# Add probe to color shadow tracks differently
osd_sink_pad = nvosd.get_static_pad("sink")
if osd_sink_pad:
osd_sink_pad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
print("Added OSD probe for shadow track coloring")
if not nvosd.link(nvvidconv2):
print("ERROR: Failed to link nvosd -> nvvidconv2")
return False
if not nvvidconv2.link(caps_display):
print("ERROR: Failed to link nvvidconv2 -> caps_display")
return False
if not caps_display.link(queue_display):
print("ERROR: Failed to link caps_display -> queue_display")
return False
if not queue_display.link(sink):
print("ERROR: Failed to link queue_display -> sink")
return False
# Branch 2: Appsink path for ROS2
tee_appsink_pad = tee.request_pad_simple("src_%u")
queue_appsink_sink = queue_appsink.get_static_pad("sink")
if tee_appsink_pad.link(queue_appsink_sink) != Gst.PadLinkReturn.OK:
print("ERROR: Failed to link tee -> queue_appsink")
return False
if not queue_appsink.link(appsink):
print("ERROR: Failed to link queue_appsink -> appsink")
return False
print("Pipeline created with tee: display branch + appsink branch for ROS2")
return True
def bus_call(self, bus, message, loop):
"""Handle bus messages"""
t = message.type
if t == Gst.MessageType.EOS:
print("End-of-stream")
loop.quit()
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
print(f"ERROR: {err.message}")
print(f"Debug info: {debug}")
loop.quit()
elif t == Gst.MessageType.WARNING:
warn, debug = message.parse_warning()
print(f"WARNING: {warn.message}")
return True
def run(self):
"""Run the pipeline"""
if not self.create_pipeline():
print("Failed to create pipeline")
return False
self.loop = GLib.MainLoop()
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", self.bus_call, self.loop)
print("Starting detector...")
ret = self.pipeline.set_state(Gst.State.PLAYING)
if ret == Gst.StateChangeReturn.FAILURE:
print("ERROR: Unable to set pipeline to PLAYING state")
return False
print("Pipeline running...")
print("Press Ctrl+C to stop")
try:
self.loop.run()
except KeyboardInterrupt:
print("\nStopping pipeline...")
self.pipeline.set_state(Gst.State.NULL)
return True
def main():
parser = argparse.ArgumentParser(
description="Run DeepStream Detection with Bbox Matching Probe"
)
parser.add_argument(
"--socket-path",
default="/tmp/deepstream-video.sock",
help="Unix socket path for video input",
)
args = parser.parse_args()
detector = DeepStreamDetector(socket_path=args.socket_path)
success = detector.run()
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()
%YAML:1.0
---
####################################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
####################################################################################################
BaseConfig:
minDetectorConfidence: 0.10 # If the confidence of a detector bbox is lower than this, then it won't be considered for tracking
TargetManagement:
enableBboxUnClipping: 1 # In case the bbox is likely to be clipped by image border, unclip bbox
preserveStreamUpdateOrder: 0 # When assigning new target ids, preserve input streams' order to keep target ids in a deterministic order over multuple runs
maxTargetsPerStream: 10 # Max number of targets to track per stream. Recommended to set >10. Note: this value should account for the targets being tracked in shadow mode as well. Max value depends on the GPU memory capacity
# [Creation & Termination Policy]
minIouDiff4NewTarget: 0.3 # If the IOU between the newly detected object and any of the existing targets is higher than this threshold, this newly detected object will be discarded.
minTrackerConfidence: 0.15 # If the confidence of an object tracker is lower than this on the fly, then it will be tracked in shadow mode. Valid Range: [0.0, 1.0]
probationAge: 5 # If the target's age exceeds this, the target will be considered to be valid.
maxShadowTrackingAge: 60 # Max length of shadow tracking. If the shadowTrackingAge exceeds this limit, the tracker will be terminated.
earlyTerminationAge: 100 # If the shadowTrackingAge reaches this threshold while in TENTATIVE period, the target will be terminated prematurely.
outputShadowTracks: 1 # Output shadow tracks to downstream elements (required to display them)
TrajectoryManagement:
useUniqueID: 0 # Use 64-bit long Unique ID when assignining tracker ID. Default is [true]
enableReAssoc: 1 # Enable Re-Assoc
# [Re-Assoc Metric: Thresholds for valid candidates]
minMatchingScore4Overall: 0.65 # min matching score for overall
minTrackletMatchingScore: 0.30 # min tracklet similarity score for re-assoc
minMatchingScore4ReidSimilarity: 0.05 # min reid similarity score for re-assoc
# [Re-Assoc Metric: Weights]
matchingScoreWeight4TrackletSimilarity: 0.80 # weight for tracklet similarity score
matchingScoreWeight4ReidSimilarity: 0.40 # weight for reid similarity score
# [Re-Assoc: Motion-based]
minTrajectoryLength4Projection: 34 # min trajectory length required to make projected trajectory
prepLength4TrajectoryProjection: 58 # the length of the trajectory during which the state estimator is updated to make projections
trajectoryProjectionLength: 33 # the length of the projected trajectory
maxAngle4TrackletMatching: 67 # max angle difference for tracklet matching [degree]
minSpeedSimilarity4TrackletMatching: 0.0574 # min speed similarity for tracklet matching
minBboxSizeSimilarity4TrackletMatching: 0.1013 # min bbox size similarity for tracklet matching
maxTrackletMatchingTimeSearchRange: 27 # the search space in time for max tracklet similarity
trajectoryProjectionProcessNoiseScale: 0.0100 # trajectory projector's process noise scale w.r.t. state estimator
trajectoryProjectionMeasurementNoiseScale: 100 # trajectory projector's measurement noise scale w.r.t. state estimator
trackletSpacialSearchRegionScale: 0.0100 # the search region scale for peer tracklet
# [Re-Assoc: Reid based. Reid model params are set in ReID section]
reidExtractionInterval: 8 # frame interval to extract reid features per target
DataAssociator:
dataAssociatorType: 0 # the type of data associator among { DEFAULT= 0 }
associationMatcherType: 1 # the type of matching algorithm among { GREEDY=0, CASCADED=1 }
checkClassMatch: 1 # If checked, only the same-class objects are associated with each other. Default: true
# [Association Metric: Thresholds for valid candidates]
minMatchingScore4Overall: 0.0222 # Min total score
minMatchingScore4SizeSimilarity: 0.3552 # Min bbox size similarity score
minMatchingScore4Iou: 0.0548 # Min IOU score
minMatchingScore4VisualSimilarity: 0.5043 # Min visual similarity score
# [Association Metric: Weights]
matchingScoreWeight4VisualSimilarity: 0.3951 # Weight for the visual similarity (in terms of correlation response ratio)
matchingScoreWeight4SizeSimilarity: 0.6003 # Weight for the Size-similarity score
matchingScoreWeight4Iou: 0.4033 # Weight for the IOU score
# [Association Metric: Tentative detections] only uses iou similarity for tentative detections
tentativeDetectorConfidence: 0.1024 # If a detection's confidence is lower than this but higher than minDetectorConfidence, then it's considered as a tentative detection
minMatchingScore4TentativeIou: 0.2852 # Min iou threshold to match targets and tentative detection
StateEstimator:
stateEstimatorType: 1 # the type of state estimator among { DUMMY=0, SIMPLE=1, REGULAR=2 }
# [Dynamics Modeling]
processNoiseVar4Loc: 6810.8668 # Process noise variance for bbox center
processNoiseVar4Size: 1541.8647 # Process noise variance for bbox size
processNoiseVar4Vel: 1348.4874 # Process noise variance for velocity
measurementNoiseVar4Detector: 100.0000 # Measurement noise variance for detector's detection
measurementNoiseVar4Tracker: 293.3238 # Measurement noise variance for tracker's localization
VisualTracker:
visualTrackerType: 2 # the type of visual tracker among { DUMMY=0, NvDCF_legacy=1, NvDCF_VPI=2 }
# [NvDCF: Feature Extraction]
useColorNames: 1 # Use ColorNames feature
useHog: 1 # Use Histogram-of-Oriented-Gradient (HOG) feature
featureImgSizeLevel: 3 # Size of a feature image. Valid range: {1, 2, 3, 4, 5}, from the smallest to the largest
featureFocusOffsetFactor_y: -0.1054 # The offset for the center of hanning window relative to the feature height. The center of hanning window would move by (featureFocusOffsetFactor_y*featureMatSize.height) in vertical direction
# [NvDCF: Correlation Filter]
filterLr: 0.0767 # learning rate for DCF filter in exponential moving average. Valid Range: [0.0, 1.0]
filterChannelWeightsLr: 0.0339 # learning rate for the channel weights among feature channels. Valid Range: [0.0, 1.0]
gaussianSigma: 0.5687 # Standard deviation for Gaussian for desired response when creating DCF filter [pixels]
ReID:
reidType: 2 # The type of reid among { DUMMY=0, NvDEEPSORT=1, Reid based reassoc=2, both NvDEEPSORT and reid based reassoc=3}
# [Reid Network Info]
batchSize: 100 # Batch size of reid network
workspaceSize: 1000 # Workspace size to be used by reid engine, in MB
reidFeatureSize: 256 # Size of reid feature
reidHistorySize: 100 # Max number of reid features kept for one object
inferDims: [3, 256, 128] # Reid network input dimension CHW or HWC based on inputOrder
networkMode: 1 # Reid network inference precision mode among {fp32=0, fp16=1, int8=2 }
# [Input Preprocessing]
inputOrder: 0 # Reid network input order among { NCHW=0, NHWC=1 }. Batch will be converted to the specified order before reid input.
colorFormat: 0 # Reid network input color format among {RGB=0, BGR=1 }. Batch will be converted to the specified color before reid input.
offsets: [123.6750, 116.2800, 103.5300] # Array of values to be subtracted from each input channel, with length equal to number of channels
netScaleFactor: 0.01735207 # Scaling factor for reid network input after substracting offsets
keepAspc: 1 # Whether to keep aspc ratio when resizing input objects for reid
useVPICropScaler: 1 # Use VPI for image cropping and rescaling
# [Output Postprocessing]
addFeatureNormalization: 1 # If reid feature is not normalized in network, adding normalization on output so each reid feature has l2 norm equal to 1
minVisibility4GalleryUpdate: 0.6 # Add ReID embedding to the gallery only if the visibility is not lower than this
# [Paths and Names]
# tltEncodedModel: "/opt/nvidia/deepstream/deepstream/samples/models/Tracker/resnet50_market1501.etlt" # NVIDIA TAO model path
# tltModelKey: "nvidia_tao" # NVIDIA TAO model key
# modelEngineFile: "/opt/nvidia/deepstream/deepstream/samples/models/Tracker/resnet50_market1501.etlt_b100_gpu0_fp16.engine" # Engine file path
modelEngineFile: "../model/new_model.onnx_b1_gpu0_fp16.engine"
useBufferedOutput: 1 # Enable for smoothing
Thanks,