On the first use-case I removed the detection of the person from objects_at_2 (because of the removal frame.objectsIn.numFilled = 1, yes.
I’ve attached the full code.
#include <iostream>
#include <nvdstracker.h>
#include <cuda_runtime_api.h>
#include <chrono>
#include <vector>
#include <array>
#include <opencv2/core/mat.hpp>
#include <opencv2/imgcodecs.hpp>
#include <unordered_map>
namespace TMetadataV2ObjectHeader {
enum class TObjectType {
car,
person
};
}
struct Object {
int x;
int y;
int width;
int height;
};
static const std::unordered_map< TMetadataV2ObjectHeader::TObjectType, std::vector< Object > > objects_at_0{
{ // CAR
TMetadataV2ObjectHeader::TObjectType::car, // type
std::vector< Object >{ // vector (x,y,width,height)
Object{ 261, 90, 108, 77 }
}
},
{ // PERSON
TMetadataV2ObjectHeader::TObjectType::person, // type
std::vector< Object >{ // vector (x,y,width,height)
Object{ 63, 93, 110, 249 }
}
}
};
static const std::unordered_map< TMetadataV2ObjectHeader::TObjectType, std::vector< Object > > objects_at_2{
{ // CAR
TMetadataV2ObjectHeader::TObjectType::car, // type
std::vector< Object >{ // vector (x,y,width,height)
Object{ 313, 90, 108, 77 }
}
},
{ // PERSON - REMOVED DETECTION
TMetadataV2ObjectHeader::TObjectType::person, // type
std::vector< Object >{ // vector (x,y,width,height)
//Object{ 114, 93, 110, 249 }
}
}
};
static const std::unordered_map< TMetadataV2ObjectHeader::TObjectType, std::vector< Object > > objects_at_4{
{ // CAR
TMetadataV2ObjectHeader::TObjectType::car, // type
std::vector< Object >{ // vector (x,y,width,height)
Object{ 364, 90, 108, 77 }
}
},
{ // PERSON
TMetadataV2ObjectHeader::TObjectType::person, // type
std::vector< Object >{ // vector (x,y,width,height)
Object{ 166, 93, 110, 249 }
}
}
};
void processFrame( cv::Mat currentFrame, uint32_t frameNum, int64_t frameTimestamp, const std::unordered_map< TMetadataV2ObjectHeader::TObjectType, std::vector< Object > >& objects )
{
static constexpr std::array< TMetadataV2ObjectHeader::TObjectType, 2 > objectTypes{ {
TMetadataV2ObjectHeader::TObjectType::car,
TMetadataV2ObjectHeader::TObjectType::person
} };
auto startPoint = std::chrono::high_resolution_clock::now();
char path[] = "/opt/nvidia/deepstream/deepstream-4.0/samples/configs/deepstream-app/tracker_config.yml";
NvMOTQuery query{};
{
const auto status = NvMOT_Query( sizeof( path ), path, &query );
if( status != NvMOTStatus_OK ) {
std::cout << "Error";
}
}
/* INIT */
void* yDevMem;
const uint32_t width = static_cast< uint32_t >( currentFrame.cols );
const uint32_t height = static_cast< uint32_t >( currentFrame.rows );
const uint32_t pitch = static_cast< uint32_t >( currentFrame.step1() );
//cudaMallocManaged( &yDevMem, width * height, cudaMemAttachGlobal );
yDevMem = currentFrame.data;
const uint32_t fullSize = pitch * height;
static bool initCompleted{ false };
static NvMOTContextHandle pContextHandle{};
if( !initCompleted ) {
// IN params
NvMOTPerTransformBatchConfig batchConfig[ 1 ]{};
batchConfig->bufferType = NVBUF_MEM_CUDA_UNIFIED;
batchConfig->colorFormat = NVBUF_COLOR_FORMAT_GRAY8;
batchConfig->maxHeight = height;
batchConfig->maxPitch = pitch;
batchConfig->maxSize = fullSize;
batchConfig->maxWidth = width;
NvMOTConfig pConfigIn{};
pConfigIn.computeConfig = NVMOTCOMP_CPU; /**< Compute target. see NvMOTCompute */
pConfigIn.maxStreams = 1; /**< Maximum number of streams in a batch. */
pConfigIn.numTransforms = 1; /**< Number of NvMOTPerTransformBatchConfig entries in perTransformBatchConfig */
pConfigIn.perTransformBatchConfig = batchConfig; /**< List of numTransform batch configs including type and resolution, one for each transform*/
pConfigIn.miscConfig.gpuId = 0; /**< GPU to be used. */
pConfigIn.miscConfig.maxObjPerBatch = 0; /**< Max number of objects to track per stream. 0 means no limit. */
pConfigIn.miscConfig.maxObjPerStream = 0; /**< Max number of objects to track per batch. 0 means no limit. */
pConfigIn.customConfigFilePathSize = sizeof( path ) ; /**< The char length in customConfigFilePath */
pConfigIn.customConfigFilePath = path; /**< Path to the tracker's custom config file. Null terminated */
// OUT Params
NvMOTConfigResponse pConfigResponse{};
{
const auto status = NvMOT_Init( &pConfigIn, &pContextHandle, &pConfigResponse );
if( status != NvMOTStatus_OK ) {
std::cout << "Error";
} else {
initCompleted = true;
}
}
}
/* PROCESS */
// IN Params
NvBufSurfaceParams bufferParam[ 1 ]{};
bufferParam->width = width; /** width of buffer */
bufferParam->height = height; /** height of buffer */
bufferParam->pitch = pitch; /** pitch of buffer */
bufferParam->colorFormat = NVBUF_COLOR_FORMAT_GRAY8; /** color format */
bufferParam->layout = NVBUF_LAYOUT_PITCH; /** BL or PL for Jetson, ONLY PL in case of dGPU */
bufferParam->dataSize = fullSize; /** size of allocated memory */
bufferParam->dataPtr = yDevMem; /** pointer to allocated memory, Not valid for NVBUF_MEM_SURFACE_ARRAY and NVBUF_MEM_HANDLE */
bufferParam->planeParams.num_planes = 1; /** Number of planes */
bufferParam->planeParams.width[ 0 ] = width; /** width of planes */
bufferParam->planeParams.height[ 0 ] = height; /** height of planes */
bufferParam->planeParams.pitch[ 0 ] = pitch; /** pitch of planes in bytes */
bufferParam->planeParams.offset[ 0 ] = 0; /** offsets of planes in bytes */
bufferParam->planeParams.psize[ 0 ] = pitch * height; /** size of planes in bytes */
bufferParam->planeParams.bytesPerPix[ 0 ] = 1; /** bytes taken for each pixel */
bufferParam->mappedAddr.addr[ 0 ] = yDevMem; /** pointers of mapped buffers. Null Initialized values.*/
bufferParam->mappedAddr.eglImage = nullptr;
//bufferParam->bufferDesc; /** dmabuf fd in case of NVBUF_MEM_SURFACE_ARRAY and NVBUF_MEM_HANDLE type memory. Invalid for other types. */
NvBufSurfaceParams* bufferParamPtr{ bufferParam };
static std::vector< NvMOTObjToTrack > inObjectVec{};
static std::vector< NvMOTTrackedObj > outObjectVec{};
size_t currObjectCount{ 0 };
for( const TMetadataV2ObjectHeader::TObjectType currType: objectTypes ) {
currObjectCount += objects.at( currType ).size();
}
if( inObjectVec.size() < currObjectCount ) {
inObjectVec.resize( currObjectCount );
}
if( outObjectVec.size() < currObjectCount ) {
outObjectVec.resize( currObjectCount );
}
size_t currInObjectIndex{ 0 };
/* Iterate through the possible object types */
/* The inner loop operates on the vector of tracked objects from that type */
/* You can mock this section with generated objects */
for( const TMetadataV2ObjectHeader::TObjectType currObjType: objectTypes ) {
for( const auto& currObj: objects.at( currObjType ) ) {
NvMOTObjToTrack& currInObject = inObjectVec[ currInObjectIndex++ ];
currInObject.classId = static_cast< uint16_t >( currObjType ); /**< Class of the object to be tracked. */
currInObject.bbox.x = currObj.x; /**< Bounding box. */
currInObject.bbox.y = currObj.y;
currInObject.bbox.width = currObj.width;
currInObject.bbox.height = currObj.height;
currInObject.confidence = 1.f; /**< Detection confidence of the object. */
currInObject.doTracking = true; /**< True: track this object. False: do not initiate tracking on this object. */
}
}
NvMOTFrame frame{};
frame.streamID = 0; /**< The stream source for this frame. */
frame.frameNum = frameNum; /**< Frame number sequentially identifying the frame within a stream. */
frame.timeStamp = frameTimestamp; /**< Timestamp of the frame at the time of capture. */
frame.timeStampValid = true; /**< The timestamp value is properly populated. */
frame.doTracking = true; /**< True: track objects in this frame; False: do not track this frame. */
frame.reset = false; /**< True: reset tracking for the stream. */
frame.numBuffers = 1; /**< Number of entries in bufferList. */
frame.bufferList = &bufferParamPtr; /**< Array of pointers to buffer params. */
frame.objectsIn.detectionDone = ( frameNum % 2 == 0 ); // We detect on every second image
frame.objectsIn.numAllocated = inObjectVec.size();
frame.objectsIn.numFilled = ( frameNum % 2 == 0 ? currObjectCount : 0 );//currObjectCount;
frame.objectsIn.list = inObjectVec.data();
NvMOTProcessParams processParams{};
processParams.numFrames = 1;
processParams.frameList = &frame;
// OUT Params
NvMOTTrackedObjBatch outTrackedBatch{};
NvMOTTrackedObjList outBatchObjects{};
outBatchObjects.list = outObjectVec.data();
outBatchObjects.streamID = 0; /**< Stream associated with objects in the list. */
outBatchObjects.frameNum = frameNum; /**< Frame number for objects in the list. */
outBatchObjects.valid = true; /**< This entry in the batch is valid */
outBatchObjects.numAllocated = outObjectVec.size(); /**< Number of blocks allocated for the list. */
outBatchObjects.numFilled = /*outObjVec.size()*/0; /**< Number of populated blocks in the list. */
outTrackedBatch.numAllocated = 1;
outTrackedBatch.numFilled = 1;
outTrackedBatch.list = &outBatchObjects;
{
const auto status = NvMOT_Process( pContextHandle, &processParams, &outTrackedBatch );
if( status != NvMOTStatus_OK ) {
std::cout << "Error";
}
}
for( size_t outIndex = 0; outIndex < outBatchObjects.numFilled; ++outIndex ) {
const auto& currOutObj{ outObjectVec[ outIndex ] };
const auto currOutAssociated{ currOutObj.associatedObjectIn };
if( currOutAssociated != nullptr ) {
std::cout << "Ref [x: " << currOutAssociated->bbox.x << " y: " << currOutAssociated->bbox.y
<< " w: " << currOutAssociated->bbox.width << " h: " << currOutAssociated->bbox.height << "] ";
} else {
std::cout << "No reference, ";
}
std::cout << " Tracked [x: " << currOutObj.bbox.x << " y: " << currOutObj.bbox.y
<< " w: " << currOutObj.bbox.width << " h: " << currOutObj.bbox.height << "]"
<< " Confidence: " << currOutObj.confidence << " ID: " << currOutObj.trackingId << " Age: " << currOutObj.age << std::endl;
}
auto endPoint = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> fdur = endPoint - startPoint;
std::cout << "Count in: " << frame.objectsIn.numFilled << " Count out: " << outBatchObjects.numFilled << " Runtime: " << fdur.count() << " Avg runtime: " << fdur.count() / inObjectVec.size() << std::endl;
}
int main()
{
std::array< cv::Mat, 6 > images{ {
cv::imread( "/home/balazad/Development/Cpp/DSTest/img_1.jpg", cv::IMREAD_GRAYSCALE ),
cv::imread( "/home/balazad/Development/Cpp/DSTest/img_2.jpg", cv::IMREAD_GRAYSCALE ),
cv::imread( "/home/balazad/Development/Cpp/DSTest/img_3.jpg", cv::IMREAD_GRAYSCALE ),
cv::imread( "/home/balazad/Development/Cpp/DSTest/img_4.jpg", cv::IMREAD_GRAYSCALE ),
cv::imread( "/home/balazad/Development/Cpp/DSTest/img_5.jpg", cv::IMREAD_GRAYSCALE ),
cv::imread( "/home/balazad/Development/Cpp/DSTest/img_6.jpg", cv::IMREAD_GRAYSCALE ),
} };
std::array< const std::unordered_map< TMetadataV2ObjectHeader::TObjectType, std::vector< Object > >*, 6 > objs{ {
&objects_at_0,
&objects_at_0,
&objects_at_2,
&objects_at_2,
&objects_at_4,
&objects_at_4
} };
for( size_t imgIndex = 0; imgIndex < images.size(); ++imgIndex ) {
std::cout << "---Frame " << imgIndex + 1 << "---" << std::endl;
processFrame( images[ imgIndex ], imgIndex, imgIndex * 100, *objs[ imgIndex ] );
}
}