Hi,
Thanks for your testing image.
We can generate the correct bounding box with TensorRT C++ API now.
Please check following for the detail steps:
1. Generate uff model.
Please use this config.py.txt (2.5 KB) and with the following command:
sudo python3 /usr/lib/python3.6/dist-packages/uff/bin/convert_to_uff.py frozen_inference_graph.pb -o sample_ssd_relu6.uff -O NMS -p config.py
2. Prepare data
Please store the sample_ssd_relu6.uff into /usr/src/tensorrt/data/ssd
.
Please store your testing image as test.jpeg and save to /usr/src/tensorrt/data/ssd
also.
3. Apply following patch to our sampleUffSSD sample
diff --git a/Makefile.config b/Makefile.config
index a2a5ca8..dfd6ed9 100644
--- a/Makefile.config
+++ b/Makefile.config
@@ -141,7 +141,7 @@ ifneq ($(shell uname -m), $(TARGET))
LIBPATHS += -L"../lib/stubs" -L"../../lib/stubs" -L"/usr/lib/$(DLSW_TRIPLE)/stubs" -L"/usr/lib/$(DLSW_TRIPLE)" -L"/usr/lib/$(CUBLAS_TRIPLE)/stubs" -L"/usr/lib/$(CUBLAS_TRIPLE)"
LIBPATHS += -L"$(CUDA_INSTALL_DIR)/targets/$(CUDA_TRIPLE)/$(CUDA_LIBDIR)/stubs" -L"$(CUDA_INSTALL_DIR)/targets/$(CUDA_TRIPLE)/$(CUDA_LIBDIR)"
endif
-INCPATHS += -I"../common" -I"$(CUDA_INSTALL_DIR)/include" -I"$(CUDNN_INSTALL_DIR)/include" -I"../include" -I"../../include" -I"../../parsers/onnxOpenSource"
+INCPATHS += -I"../common" -I"$(CUDA_INSTALL_DIR)/include" -I"$(CUDNN_INSTALL_DIR)/include" -I"../include" -I"../../include" -I"../../parsers/onnxOpenSource" -I"/usr/include/opencv4"
LIBPATHS += -L"$(CUDA_INSTALL_DIR)/$(CUDA_LIBDIR)" -Wl,-rpath-link="$(CUDA_INSTALL_DIR)/$(CUDA_LIBDIR)"
LIBPATHS += -L"$(CUDNN_INSTALL_DIR)/$(CUDNN_LIBDIR)" -Wl,-rpath-link="$(CUDNN_INSTALL_DIR)/$(CUDNN_LIBDIR)"
LIBPATHS += -L"../lib" -L"../../lib" -L"$(TRT_LIB_DIR)" -Wl,-rpath-link="$(TRT_LIB_DIR)" $(STUBS_DIR)
@@ -223,12 +223,12 @@ ifeq ($(TARGET), qnx)
COMMON_FLAGS += -D_POSIX_C_SOURCE=200112L -D_QNX_SOURCE -D_FILE_OFFSET_BITS=64 -fpermissive
endif
-COMMON_LD_FLAGS += $(LIBPATHS) -L$(OUTDIR)
+COMMON_LD_FLAGS += $(LIBPATHS) -L$(OUTDIR) `pkg-config --libs opencv4`
OBJDIR = $(call concat,$(OUTDIR),/chobj)
DOBJDIR = $(call concat,$(OUTDIR),/dchobj)
-COMMON_LIBS += $(CUDART_LIB)
+COMMON_LIBS += $(CUDART_LIB) `pkg-config --cflags opencv4`
ifneq ($(SAFE_PDK),1)
COMMON_LIBS += $(CUBLAS_LIB) $(CUDNN_LIB)
endif
diff --git a/sampleUffSSD/sampleUffSSD.cpp b/sampleUffSSD/sampleUffSSD.cpp
index 97dcead..8078849 100644
--- a/sampleUffSSD/sampleUffSSD.cpp
+++ b/sampleUffSSD/sampleUffSSD.cpp
@@ -29,6 +29,9 @@
#include "common.h"
#include "logger.h"
+#include "opencv2/highgui.hpp"
+#include "opencv2/imgproc.hpp"
+
#include "NvInfer.h"
#include "NvUffParser.h"
#include <cuda_runtime_api.h>
@@ -39,6 +42,16 @@
#include <sstream>
const std::string gSampleName = "TensorRT.sample_uff_ssd";
+void readImage(const std::string& filename, cv::Mat &image)
+{
+ image = cv::imread(filename);
+ if( image.empty() )
+ {
+ std::cout << "Cannot open image " << filename << std::endl;
+ exit(0);
+ }
+ cv::resize(image, image, cv::Size(300,300));
+}
//!
//! \brief The SampleUffSSDParams structure groups the additional parameters required by
@@ -95,6 +108,7 @@ private:
std::shared_ptr<nvinfer1::ICudaEngine> mEngine; //!< The TensorRT engine used to run the network
+ cv::Mat image;
//!
//! \brief Parses an UFF model for SSD and creates a TensorRT network
//!
@@ -290,25 +304,26 @@ bool SampleUffSSD::processInput(const samplesCommon::BufferManager& buffers)
const int batchSize = mParams.batchSize;
// Available images
- std::vector<std::string> imageList = {"dog.ppm", "bus.ppm"};
+ std::vector<std::string> imageList = {"test.jpeg"};
mPPMs.resize(batchSize);
assert(mPPMs.size() <= imageList.size());
for (int i = 0; i < batchSize; ++i)
{
- readPPMFile(locateFile(imageList[i], mParams.dataDirs), mPPMs[i]);
+ readImage(locateFile(imageList[i], mParams.dataDirs), image);
}
float* hostDataBuffer = static_cast<float*>(buffers.getHostBuffer(mParams.inputTensorNames[0]));
// Host memory for input buffer
- for (int i = 0, volImg = inputC * inputH * inputW; i < mParams.batchSize; ++i)
+ for (int i = 0, volImg = inputH * inputW; i < mParams.batchSize; ++i)
{
- for (int c = 0; c < inputC; ++c)
+ for (unsigned j = 0, volChl = inputH * inputW; j < inputH; ++j)
{
- // The color image to input should be in BGR order
- for (unsigned j = 0, volChl = inputH * inputW; j < volChl; ++j)
- {
- hostDataBuffer[i * volImg + c * volChl + j]
- = (2.0 / 255.0) * float(mPPMs[i].buffer[j * inputC + c]) - 1.0;
+ for( unsigned k = 0; k < inputW; ++ k)
+ {
+ cv::Vec3b bgr = image.at<cv::Vec3b>(j,k);
+ hostDataBuffer[i * volImg + 0 * volChl + j * inputW + k] = (2.0 / 255.0) * float(bgr[2]) - 1.0;
+ hostDataBuffer[i * volImg + 1 * volChl + j * inputW + k] = (2.0 / 255.0) * float(bgr[1]) - 1.0;
+ hostDataBuffer[i * volImg + 2 * volChl + j * inputW + k] = (2.0 / 255.0) * float(bgr[0]) - 1.0;
}
}
}
@@ -350,7 +365,7 @@ bool SampleUffSSD::verifyOutput(const samplesCommon::BufferManager& buffers)
{
int numDetections = 0;
// at least one correct detection
- bool correctDetection = false;
+ bool correctDetection = true;
for (int i = 0; i < keepCount[p]; ++i)
{
@@ -360,29 +375,27 @@ bool SampleUffSSD::verifyOutput(const samplesCommon::BufferManager& buffers)
continue;
}
+ std::cout << det[2] << std::endl;
// Output format for each detection is stored in the below order
// [image_id, label, confidence, xmin, ymin, xmax, ymax]
int detection = det[1];
assert(detection < outputClsSize);
- std::string storeName = classes[detection] + "-" + std::to_string(det[2]) + ".ppm";
+ std::string storeName = "class" + std::to_string(detection) + "-" + std::to_string(det[2]) + ".jpg";
numDetections++;
- if ((p == 0 && classes[detection] == "dog")
- || (p == 1 && (classes[detection] == "truck" || classes[detection] == "car")))
- {
- correctDetection = true;
- }
- sample::gLogInfo << "Detected " << classes[detection].c_str() << " in the image " << int(det[0]) << " ("
- << mPPMs[p].fileName.c_str() << ")"
+ sample::gLogInfo << "Detected class" << std::to_string(detection) << " in the image " << int(det[0])
<< " with confidence " << det[2] * 100.f << " and coordinates (" << det[3] * inputW << ","
<< det[4] * inputH << ")"
<< ",(" << det[5] * inputW << "," << det[6] * inputH << ")." << std::endl;
sample::gLogInfo << "Result stored in " << storeName.c_str() << "." << std::endl;
- samplesCommon::writePPMFileWithBBox(
- storeName, mPPMs[p], {det[3] * inputW, det[4] * inputH, det[5] * inputW, det[6] * inputH});
+ cv::Mat out;
+ image.copyTo(out);
+ cv::rectangle(out, cv::Rect(det[3]*inputW, det[4]*inputH, det[5]*inputW-det[3]*inputW, det[6]*inputH-det[4]*inputH),
+ cv::Scalar(rand() % 256, rand() % 256, rand() % 256), 2);
+ cv::imwrite(storeName, out);
}
pass &= correctDetection;
pass &= numDetections >= 1;
@@ -413,7 +426,7 @@ SampleUffSSDParams initializeSampleParams(const samplesCommon::Args& args)
params.uffFileName = "sample_ssd_mobilenet_v2.uff";
params.labelsFileName = "ssd_coco_labels.txt";
params.inputTensorNames.push_back("Input");
- params.batchSize = 2;
+ params.batchSize = 1;
params.outputTensorNames.push_back("NMS");
params.outputTensorNames.push_back("NMS_1");
params.dlaCore = args.useDLACore;
4. Testing
$ cd /usr/src/tensorrt/samples/
$ make
$ cd /usr/src/tensorrt/bin/
$ ./sample_uff_ssd
We can get the similar result as the TRT_object_detection python sample generated.
0.999016
[08/17/2020-17:10:06] [I] Detected class18 in the image 0 with confidence 99.9016 and coordinates (54.1487,95.957),(83.7936,205.902).
[08/17/2020-17:10:06] [I] Result stored in class18-0.999016.jpg.
0.996855
[08/17/2020-17:10:06] [I] Detected class3 in the image 0 with confidence 99.6855 and coordinates (155.734,87.6828),(187.611,189.075).
[08/17/2020-17:10:06] [I] Result stored in class3-0.996855.jpg.
0.969117
[08/17/2020-17:10:06] [I] Detected class10 in the image 0 with confidence 96.9117 and coordinates (204.955,85.7581),(228.593,189.347).
[08/17/2020-17:10:06] [I] Result stored in class10-0.969117.jpg.
0.968491
[08/17/2020-17:10:06] [I] Detected class22 in the image 0 with confidence 96.8491 and coordinates (124.306,89.2235),(158.748,194.497).
[08/17/2020-17:10:06] [I] Result stored in class22-0.968491.jpg.
0.960414
[08/17/2020-17:10:06] [I] Detected class37 in the image 0 with confidence 96.0414 and coordinates (81.1726,96.9146),(105.868,207.71).
[08/17/2020-17:10:06] [I] Result stored in class37-0.960414.jpg.
0.901776
[08/17/2020-17:10:06] [I] Detected class2 in the image 0 with confidence 90.1776 and coordinates (184.289,84.7503),(209.169,188.614).
[08/17/2020-17:10:06] [I] Result stored in class2-0.901776.jpg.
0.901547
[08/17/2020-17:10:06] [I] Detected class15 in the image 0 with confidence 90.1547 and coordinates (105.564,95.422),(133.682,208.951).
[08/17/2020-17:10:06] [I] Result stored in class15-0.901547.jpg.
0.895801
[08/17/2020-17:10:06] [I] Detected class38 in the image 0 with confidence 89.5801 and coordinates (23.4149,77.7281),(56.8211,231.343).
[08/17/2020-17:10:06] [I] Result stored in class38-0.895801.jpg.
0.715208
[08/17/2020-17:10:06] [I] Detected class7 in the image 0 with confidence 71.5208 and coordinates (227.963,78.5744),(252.259,184.704).
[08/17/2020-17:10:06] [I] Result stored in class7-0.715208.jpg.
&&&& PASSED TensorRT.sample_uff_ssd # ./sample_uff_ssd
Here are some output result for your reference:
Thanks.