I interface intel realsense D435 with Jetson Nano. I capture color image and feed it as input to detectNet c++ function. detectNet function is not detecting any objects. It detect a small object and it says detected object is human.Below i have pasted the code. Can you please tell me where is the wrong in my code.
// include the librealsense C++ header file
#include <librealsense2/rs.hpp>
// include OpenCV header file
#include “opencv2/opencv.hpp”
#include <jetson-inference/detectNet.h>
#include <jetson-utils/videoOutput.h>
using namespace std;
using namespace cv;
#ifdef HEADLESS
#define IS_HEADLESS() “headless” // run without display
#else
#define IS_HEADLESS() (const char*)NULL
#endif
int main(int argc, char ** argv)
{
commandLine cmdLine(argc, argv, IS_HEADLESS());
int imgWidth = 1280;
int imgHeight = 720;
int fps = 30;
rs2::config rs2Cfg;
uchar3* imgPtr = NULL;
cudaMalloc((void**)&imgPtr,imgWidth*imgHeight*sizeof(uchar3));
/*
* create output stream
*/
videoOutput* output = videoOutput::Create(cmdLine, ARG_POSITION(0));
rs2Cfg.enable_stream(RS2_STREAM_COLOR, imgWidth, imgHeight, RS2_FORMAT_BGR8, fps);
// start pipeline
rs2::pipeline rs2Pipe;
rs2::pipeline_profile pipeline_profile = rs2Pipe.start(rs2Cfg);
// create the detection network
detectNet* net = detectNet::Create();
// parse overlay flags
const uint32_t overlayFlags = detectNet::OverlayFlagsFromStr(cmdLine.GetString("overlay", "box,labels,conf"));
Mat colorFrame(imgHeight,imgWidth,CV_8UC3,cv::Scalar(0,0,0));
while (1) // Application still alive?
{
// wait for frames and get frameset
rs2::frameset rs2Frame = rs2Pipe.wait_for_frames();
//Get each frame
rs2::frame color_frame = rs2Frame.get_color_frame();
// Creating OpenCV Matrix from a color image
Mat colorFrameTemp(Size(imgWidth, imgHeight), CV_8UC3, (void*)color_frame.get_data(), Mat::AUTO_STEP);
cvtColor(colorFrameTemp,colorFrame,COLOR_RGB2BGR);
cudaMemcpy2D((void*)imgPtr,imgWidth*sizeof(uchar3),(void*)colorFrame.data,colorFrame.step,imgWidth*sizeof(uchar3),imgHeight,cudaMemcpyHostToDevice);
// this variable will store the confidence of the classification (between 0 and 1)
//float confidence = 0.0;
// detect objects in the frame
detectNet::Detection* detections = NULL;
const int numDetections = net->Detect(imgPtr, imgWidth, imgHeight, &detections, overlayFlags);
if( numDetections > 0 )
{
LogVerbose("%i objects detected\n", numDetections);
for( int n=0; n < numDetections; n++ )
{
LogVerbose("detected obj %i class #%u (%s) confidence=%f\n", n, detections[n].ClassID, net->GetClassDesc(detections[n].ClassID), detections[n].Confidence);
LogVerbose("bounding box %i (%f, %f) (%f, %f) w=%f h=%f\n", n, detections[n].Left, detections[n].Top, detections[n].Right, detections[n].Bottom, detections[n].Width(), detections[n].Height());
}
}
//cout<< "Distance is " << depthImage.at<ushort>(680,640) << endl;
// render outputs
if( output != NULL )
{
output->Render(imgPtr, imgWidth, imgHeight);
// update the status bar
char str[256];
sprintf(str, "TensorRT %i.%i.%i | %s | Network %.0f FPS", NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, precisionTypeToStr(net->GetPrecision()), net->GetNetworkFPS());
output->SetStatus(str);
}
// print out timing info
//net->PrintProfilerTimes();
}
SAFE_DELETE(output);
SAFE_DELETE(net);
return EXIT_SUCCESS;
}