Please provide complete information as applicable to your setup.
• Hardware Platform GPU
• DeepStream Version 7.0
• TensorRT Version 10.6.0
• NVIDIA GPU Driver Version 560.35.03
I’m running 3 streams in a pipeline
for each stream, I need different tracker config, how to approach this problem were 3 streams should be run in a pipeline.
stream - 1, tracker config
BaseConfig:
minDetectorConfidence: 0.3 # If the confidence of a detector bbox is lower than this, then it won't be considered for tracking
TargetManagement:
enableBboxUnClipping: 1 # In case the bbox is likely to be clipped by image border, unclip bbox
maxTargetsPerStream: 1500 # Max number of targets to track per stream. Recommended to set >10. Note: this value should account for the targets being tracked in shadow mode as well. Max value depends on the GPU memory capacity
# [Creation & Termination Policy]
minIouDiff4NewTarget: 0.5 # If the IOU between the newly detected object and any of the existing targets is higher than this threshold, this newly detected object will be discarded.
minTrackerConfidence: 0.2 # If the confidence of an object tracker is lower than this on the fly, then it will be tracked in shadow mode. Valid Range: [0.0, 1.0]
probationAge: 5 # If the target's age exceeds this, the target will be considered to be valid.
maxShadowTrackingAge: 50 # Max length of shadow tracking. If the shadowTrackingAge exceeds this limit, the tracker will be terminated.
earlyTerminationAge: 3 # If the shadowTrackingAge reaches this threshold while in TENTATIVE period, the target will be terminated prematurely.
TrajectoryManagement:
useUniqueID: 0 # Use 64-bit long Unique ID when assignining tracker ID. Default is [true]
DataAssociator:
dataAssociatorType: 0 # the type of data associator among { DEFAULT= 0 }
associationMatcherType: 0 # the type of matching algorithm among { GREEDY=0, GLOBAL=1 }
checkClassMatch: 1 # If checked, only the same-class objects are associated with each other. Default: true
# [Association Metric: Thresholds for valid candidates]
minMatchingScore4Overall: 0.2 # Min total score
minMatchingScore4SizeSimilarity: 0.6 # Min bbox size similarity score
minMatchingScore4Iou: 0.1 # Min IOU score
minMatchingScore4VisualSimilarity: 0.6 # Min visual similarity score
# [Association Metric: Weights]
matchingScoreWeight4VisualSimilarity: 0.7 # Weight for the visual similarity (in terms of correlation response ratio)
matchingScoreWeight4SizeSimilarity: 0.0 # Weight for the Size-similarity score
matchingScoreWeight4Iou: 0.3 # Weight for the IOU score
StateEstimator:
stateEstimatorType: 1 # the type of state estimator among { DUMMY=0, SIMPLE=1, REGULAR=2 }
# [Dynamics Modeling]
processNoiseVar4Loc: 2.0 # Process noise variance for bbox center
processNoiseVar4Size: 1.0 # Process noise variance for bbox size
processNoiseVar4Vel: 0.1 # Process noise variance for velocity
measurementNoiseVar4Detector: 4.0 # Measurement noise variance for detector's detection
measurementNoiseVar4Tracker: 16.0 # Measurement noise variance for tracker's localization
VisualTracker:
visualTrackerType: 1 # the type of visual tracker among { DUMMY=0, NvDCF=1 }
# [NvDCF: Feature Extraction]
useColorNames: 1 # Use ColorNames feature
useHog: 1 # Use Histogram-of-Oriented-Gradient (HOG) feature
featureImgSizeLevel: 3 # Size of a feature image. Valid range: {1, 2, 3, 4, 5}, from the smallest to the largest
featureFocusOffsetFactor_y: -0.2 # The offset for the center of hanning window relative to the feature height. The center of hanning window would move by (featureFocusOffsetFactor_y*featureMatSize.height) in vertical direction
# [NvDCF: Correlation Filter]
filterLr: 0.05 # learning rate for DCF filter in exponential moving average. Valid Range: [0.0, 1.0]
filterChannelWeightsLr: 0.1 # learning rate for the channel weights among feature channels. Valid Range: [0.0, 1.0]
gaussianSigma: 0.75 # Standard deviation for Gaussian for desired response when creating DCF filter [pixels]
stream - 2 and 3, tracker config
%YAML:1.0
BaseConfig:
minDetectorConfidence: 0.1 # If the confidence of a detector bbox is lower than this, then it won't be considered for tracking
TargetManagement:
enableBboxUnClipping: 1 # In case the bbox is likely to be clipped by image border, unclip bbox
maxTargetsPerStream: 1500 # Max number of targets to track per stream. Recommended to set >10. Note: this value should account for the targets being tracked in shadow mode as well. Max value depends on the GPU memory capacity
# [Creation & Termination Policy]
minIouDiff4NewTarget: 0.5 # If the IOU between the newly detected object and any of the existing targets is higher than this threshold, this newly detected object will be discarded.
minTrackerConfidence: 0.2 # If the confidence of an object tracker is lower than this on the fly, then it will be tracked in shadow mode. Valid Range: [0.0, 1.0]
probationAge: 10 # If the target's age exceeds this, the target will be considered to be valid.
maxShadowTrackingAge: 50 # Max length of shadow tracking. If the shadowTrackingAge exceeds this limit, the tracker will be terminated.
earlyTerminationAge: 10 # If the shadowTrackingAge reaches this threshold while in TENTATIVE period, the target will be terminated prematurely.
TrajectoryManagement:
useUniqueID: 0 # Use 64-bit long Unique ID when assignining tracker ID. Default is [true]
DataAssociator:
dataAssociatorType: 0 # the type of data associator among { DEFAULT= 0 }
associationMatcherType: 0 # the type of matching algorithm among { GREEDY=0, GLOBAL=1 }
checkClassMatch: 1 # If checked, only the same-class objects are associated with each other. Default: true
# [Association Metric: Thresholds for valid candidates]
minMatchingScore4Overall: 0.2 # Min total score
minMatchingScore4SizeSimilarity: 0.6 # Min bbox size similarity score
minMatchingScore4Iou: 0.1 # Min IOU score
minMatchingScore4VisualSimilarity: 0.6 # Min visual similarity score
# [Association Metric: Weights]
matchingScoreWeight4VisualSimilarity: 0.7 # Weight for the visual similarity (in terms of correlation response ratio)
matchingScoreWeight4SizeSimilarity: 0.0 # Weight for the Size-similarity score
matchingScoreWeight4Iou: 0.3 # Weight for the IOU score
StateEstimator:
stateEstimatorType: 1 # the type of state estimator among { DUMMY=0, SIMPLE=1, REGULAR=2 }
# [Dynamics Modeling]
processNoiseVar4Loc: 2.0 # Process noise variance for bbox center
processNoiseVar4Size: 1.0 # Process noise variance for bbox size
processNoiseVar4Vel: 0.1 # Process noise variance for velocity
measurementNoiseVar4Detector: 4.0 # Measurement noise variance for detector's detection
measurementNoiseVar4Tracker: 16.0 # Measurement noise variance for tracker's localization
VisualTracker:
visualTrackerType: 1 # the type of visual tracker among { DUMMY=0, NvDCF=1 }
# [NvDCF: Feature Extraction]
useColorNames: 1 # Use ColorNames feature
useHog: 1 # Use Histogram-of-Oriented-Gradient (HOG) feature
featureImgSizeLevel: 3 # Size of a feature image. Valid range: {1, 2, 3, 4, 5}, from the smallest to the largest
featureFocusOffsetFactor_y: -0.2 # The offset for the center of hanning window relative to the feature height. The center of hanning window would move by (featureFocusOffsetFactor_y*featureMatSize.height) in vertical direction
# [NvDCF: Correlation Filter]
filterLr: 0.05 # learning rate for DCF filter in exponential moving average. Valid Range: [0.0, 1.0]
filterChannelWeightsLr: 0.1 # learning rate for the channel weights among feature channels. Valid Range: [0.0, 1.0]
gaussianSigma: 0.75 # Standard deviation for Gaussian for desired response when creating DCF filter [pixels]