@gaylord @hymanzhu1983 @y14uc339 @jiejing_ma
Current Yolo implementation via CUDA kernel in DeepStream is based on old Yolo models (v2, v3) so it may not suit new Yolo models like YoloV4. Location: /opt/nvidia/deepstream/deepstream-5.0/sources/objectDetector_Yolo/nvdsinfer_custom_impl_Yolo/kernels.cu
We are trying to embed Yolo layer into tensorRT engine before deploying to DeepStream, which would cause Yolo cuda kernel in DeepStream no longer to be used.
We have not officially released YoloV4 solutions for DeepStream yet but you can try following steps:
- go to https://github.com/Tianxiaomo/pytorch-YOLOv4 to generate a TensorRT engine according to this workflow: DarkNet or Pytorch → ONNX → TensorRT.
- Add following C++ functions into
objectDetector_Yolo/nvdsinfer_custom_impl_Yolo/nvdsparsebbox_Yolo.cpp
and rebuildlibnvdsinfer_custom_impl_Yolo.so
- Here are configuration files for you as references (You have to update a little to suit your environment):
config_infer_primary_yoloV4.txt (3.4 KB)
deepstream_app_config_yoloV4.txt (3.8 KB)
static NvDsInferParseObjectInfo convertBBoxYoloV4(const float& bx1, const float& by1, const float& bx2,
const float& by2, const uint& netW, const uint& netH)
{
NvDsInferParseObjectInfo b;
// Restore coordinates to network input resolution
float x1 = bx1 * netW;
float y1 = by1 * netH;
float x2 = bx2 * netW;
float y2 = by2 * netH;
x1 = clamp(x1, 0, netW);
y1 = clamp(y1, 0, netH);
x2 = clamp(x2, 0, netW);
y2 = clamp(y2, 0, netH);
b.left = x1;
b.width = clamp(x2 - x1, 0, netW);
b.top = y1;
b.height = clamp(y2 - y1, 0, netH);
return b;
}
static void addBBoxProposalYoloV4(const float bx, const float by, const float bw, const float bh,
const uint& netW, const uint& netH, const int maxIndex,
const float maxProb, std::vector<NvDsInferParseObjectInfo>& binfo)
{
NvDsInferParseObjectInfo bbi = convertBBoxYoloV4(bx, by, bw, bh, netW, netH);
if (bbi.width < 1 || bbi.height < 1) return;
bbi.detectionConfidence = maxProb;
bbi.classId = maxIndex;
binfo.push_back(bbi);
}
static std::vector<NvDsInferParseObjectInfo>
decodeYoloV4Tensor(
const float* boxes, const float* scores,
const uint num_bboxes, NvDsInferParseDetectionParams const& detectionParams,
const uint& netW, const uint& netH)
{
std::vector<NvDsInferParseObjectInfo> binfo;
uint bbox_location = 0;
uint score_location = 0;
for (uint b = 0; b < num_bboxes; ++b)
{
float bx1 = boxes[bbox_location];
float by1 = boxes[bbox_location + 1];
float bx2 = boxes[bbox_location + 2];
float by2 = boxes[bbox_location + 3];
float maxProb = 0.0f;
int maxIndex = -1;
for (uint c = 0; c < detectionParams.numClassesConfigured; ++c)
{
float prob = scores[score_location + c];
if (prob > maxProb)
{
maxProb = prob;
maxIndex = c;
}
}
if (maxProb > detectionParams.perClassPreclusterThreshold[maxIndex])
{
addBBoxProposalYoloV4(bx1, by1, bx2, by2, netW, netH, maxIndex, maxProb, binfo);
}
bbox_location += 4;
score_location += detectionParams.numClassesConfigured;
}
return binfo;
}
extern "C" bool NvDsInferParseCustomYoloV4(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList)
{
if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured)
{
std::cerr << "WARNING: Num classes mismatch. Configured:"
<< detectionParams.numClassesConfigured
<< ", detected by network: " << NUM_CLASSES_YOLO << std::endl;
}
std::vector<NvDsInferParseObjectInfo> objects;
const NvDsInferLayerInfo &boxes = outputLayersInfo[0]; // num_boxes x 4
const NvDsInferLayerInfo &scores = outputLayersInfo[1]; // num_boxes x num_classes
// 3 dimensional: [num_boxes, 1, 4]
assert(boxes.inferDims.numDims == 3);
// 2 dimensional: [num_boxes, num_classes]
assert(scores.inferDims.numDims == 2);
// The second dimension should be num_classes
assert(detectionParams.numClassesConfigured == scores.inferDims.d[1]);
uint num_bboxes = boxes.inferDims.d[0];
// std::cout << "Network Info: " << networkInfo.height << " " << networkInfo.width << std::endl;
std::vector<NvDsInferParseObjectInfo> outObjs =
decodeYoloV4Tensor(
(const float*)(boxes.buffer), (const float*)(scores.buffer), num_bboxes, detectionParams,
networkInfo.width, networkInfo.height);
objects.insert(objects.end(), outObjs.begin(), outObjs.end());
objectList = objects;
return true;
}