Some questions about plugins in deepstream

I tested it and found that when writing plugins in DeepStream, there’s no need to write yoloPlugins.cpp for registration or yoloForward_nc.cu . I can directly compile nvdsparsebbox_yolo.cpp or nvdsparsebbox_yolo.cu and it runs correctly, yielding the same results. Why is this the case?

this is nvdsparsebbox_yolo.cu code.
ced972bacb2edaf2c03ebc1bde3feb7

#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>

#include "nvdsinfer_custom_impl.h"

extern "C" bool
NvDsInferParseYolo(std::vector<NvDsInferLayerInfo> const& outputLayersInfo, NvDsInferNetworkInfo const& networkInfo,
    NvDsInferParseDetectionParams const& detectionParams, std::vector<NvDsInferParseObjectInfo>& objectList);

extern "C" bool
NvDsInferParseYoloE(std::vector<NvDsInferLayerInfo> const& outputLayersInfo, NvDsInferNetworkInfo const& networkInfo,
    NvDsInferParseDetectionParams const& detectionParams, std::vector<NvDsInferParseObjectInfo>& objectList);

__global__ void decodeTensorYolo(NvDsInferParseObjectInfo *binfo, float* boxes, float* scores, float* classes,
    int outputSize, int netW, int netH, float minPreclusterThreshold)
{
  int x_id = blockIdx.x * blockDim.x + threadIdx.x;

  if (x_id >= outputSize) {
    return;
  }

  float maxProb = scores[x_id];
  int maxIndex = (int) classes[x_id];

  if (maxProb < minPreclusterThreshold) {
    binfo[x_id].detectionConfidence = 0.0;
    return;
  }

  float bxc = boxes[x_id * 4 + 0];
  float byc = boxes[x_id * 4 + 1];
  float bw = boxes[x_id * 4 + 2];
  float bh = boxes[x_id * 4 + 3];

  float x0 = bxc - bw / 2;
  float y0 = byc - bh / 2;
  float x1 = x0 + bw;
  float y1 = y0 + bh;

  x0 = fminf(float(netW), fmaxf(float(0.0), x0));
  y0 = fminf(float(netH), fmaxf(float(0.0), y0));
  x1 = fminf(float(netW), fmaxf(float(0.0), x1));
  y1 = fminf(float(netH), fmaxf(float(0.0), y1));

  binfo[x_id].left = x0;
  binfo[x_id].top = y0;
  binfo[x_id].width = fminf(float(netW), fmaxf(float(0.0), x1 - x0));
  binfo[x_id].height = fminf(float(netH), fmaxf(float(0.0), y1 - y0));
  binfo[x_id].detectionConfidence = maxProb;
  binfo[x_id].classId = maxIndex;
}

__global__ void decodeTensorYoloE(NvDsInferParseObjectInfo *binfo, float* boxes, float* scores, float* classes,
    int outputSize, int netW, int netH, float minPreclusterThreshold)
{
  int x_id = blockIdx.x * blockDim.x + threadIdx.x;

  if (x_id >= outputSize) {
    return;
  }

  float maxProb = scores[x_id];
  int maxIndex = (int) classes[x_id];

  if (maxProb < minPreclusterThreshold) {
    binfo[x_id].detectionConfidence = 0.0;
    return;
  }

  float x0 = boxes[x_id * 4 + 0];
  float y0 = boxes[x_id * 4 + 1];
  float x1 = boxes[x_id * 4 + 2];
  float y1 = boxes[x_id * 4 + 3];

  x0 = fminf(float(netW), fmaxf(float(0.0), x0));
  y0 = fminf(float(netH), fmaxf(float(0.0), y0));
  x1 = fminf(float(netW), fmaxf(float(0.0), x1));
  y1 = fminf(float(netH), fmaxf(float(0.0), y1));

  binfo[x_id].left = x0;
  binfo[x_id].top = y0;
  binfo[x_id].width = fminf(float(netW), fmaxf(float(0.0), x1 - x0));
  binfo[x_id].height = fminf(float(netH), fmaxf(float(0.0), y1 - y0));
  binfo[x_id].detectionConfidence = maxProb;
  binfo[x_id].classId = maxIndex;
}

static bool NvDsInferParseCustomYolo(std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
    NvDsInferNetworkInfo const& networkInfo, NvDsInferParseDetectionParams const& detectionParams,
    std::vector<NvDsInferParseObjectInfo>& objectList)
{
  if (outputLayersInfo.empty()) {
    std::cerr << "ERROR: Could not find output layer in bbox parsing" << std::endl;
    return false;
  }

  const NvDsInferLayerInfo& boxes = outputLayersInfo[0];
  const NvDsInferLayerInfo& scores = outputLayersInfo[1];
  const NvDsInferLayerInfo& classes = outputLayersInfo[2];

  const int outputSize = boxes.inferDims.d[0];

  thrust::device_vector<NvDsInferParseObjectInfo> objects(outputSize);

  float minPreclusterThreshold = *(std::min_element(detectionParams.perClassPreclusterThreshold.begin(),
        detectionParams.perClassPreclusterThreshold.end()));

  int threads_per_block = 1024;
  int number_of_blocks = ((outputSize - 1) / threads_per_block) + 1;

  decodeTensorYolo<<<number_of_blocks, threads_per_block>>>(
      thrust::raw_pointer_cast(objects.data()), (float*) (boxes.buffer), (float*) (scores.buffer),
      (float*) (classes.buffer), outputSize, networkInfo.width, networkInfo.height, minPreclusterThreshold);

  objectList.resize(outputSize);
  thrust::copy(objects.begin(), objects.end(), objectList.begin());

  return true;
}

static bool NvDsInferParseCustomYoloE(std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
    NvDsInferNetworkInfo const& networkInfo, NvDsInferParseDetectionParams const& detectionParams,
    std::vector<NvDsInferParseObjectInfo>& objectList)
{
  if (outputLayersInfo.empty()) {
    std::cerr << "ERROR: Could not find output layer in bbox parsing" << std::endl;
    return false;
  }

  const NvDsInferLayerInfo& boxes = outputLayersInfo[0];
  const NvDsInferLayerInfo& scores = outputLayersInfo[1];
  const NvDsInferLayerInfo& classes = outputLayersInfo[2];

  const int outputSize = boxes.inferDims.d[0];

  thrust::device_vector<NvDsInferParseObjectInfo> objects(outputSize);

  float minPreclusterThreshold = *(std::min_element(detectionParams.perClassPreclusterThreshold.begin(),
        detectionParams.perClassPreclusterThreshold.end()));

  int threads_per_block = 1024;
  int number_of_blocks = ((outputSize - 1) / threads_per_block) + 1;

  decodeTensorYoloE<<<number_of_blocks, threads_per_block>>>(
      thrust::raw_pointer_cast(objects.data()), (float*) (boxes.buffer), (float*) (scores.buffer),
      (float*) (classes.buffer), outputSize, networkInfo.width, networkInfo.height, minPreclusterThreshold);

  objectList.resize(outputSize);
  thrust::copy(objects.begin(), objects.end(), objectList.begin());

  return true;
}

extern "C" bool
NvDsInferParseYolo(std::vector<NvDsInferLayerInfo> const& outputLayersInfo, NvDsInferNetworkInfo const& networkInfo,
    NvDsInferParseDetectionParams const& detectionParams, std::vector<NvDsInferParseObjectInfo>& objectList)
{
  return NvDsInferParseCustomYolo(outputLayersInfo, networkInfo, detectionParams, objectList);
}

extern "C" bool
NvDsInferParseYoloE(std::vector<NvDsInferLayerInfo> const& outputLayersInfo, NvDsInferNetworkInfo const& networkInfo,
    NvDsInferParseDetectionParams const& detectionParams, std::vector<NvDsInferParseObjectInfo>& objectList)
{
  return NvDsInferParseCustomYoloE(outputLayersInfo, networkInfo, detectionParams, objectList);
}

CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseYolo);
CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseYoloE);

which sample are you testing or referring to? is it because the code in yoloPlugins.cpp or yoloForward_nc.cu not used?

i used this plugins in deepstream sample test1.
this is pgie config

if using weights,cfg model, you need to set engine-create-func-name, then yoloPlugins.cpp will be called. please refer to this cfg.
if using onnx model, you don’t need to set engine-create-func-name because nvinfer can support parsing onnx model. so yoloPlugins.cpp code is not needed. nvdsparsebbox_yolo.cpp is needed for postprocessing.


i deleted the onnx-file path ,the program also can be run. and i can not find the cfg.

At the first time, the app will create a TRT engine from the onnx model. After the first time, the app will load the existing model instead of creating a new engine. so even you did not set onnx-file path, the app still can run by loading the engine.

i can understand what you say. i do not understand why there’s no need to write yoloPlugins.cpp for registration or yoloForward_nc.cu for feature map caculating . even i only use engine model.

why do you think yoloPlugins.cpp and yoloForward_nc.cu are needed if setting model-engine-file? there two files are only used for custom engine creating function. if there is already engine file, why are they still needed?

as you said, nvdsparsebbox_yolo.cpp is needed for postprocessing.
yoloForward_nc.cu is needed for yolo feature map calculating. if the program does not have yoloForward_nc.cu, how to deal with feature map? does deepstream already have?

yoloForward_nc.cu is used to generate TRT engine, not for inference. you can add log to check.DeepStream nvinfer/nvinferserver will do inference.

i test this.


as you can see, when generate TRT engine, there is no any printf(“====== feature map process======”)

when the deepstream pipeline running, you can see the printf(“====== feature map process======”) and FPS below.

In the log, the app was trying to recreate engine because loading engine failed. network-mode needs to be compatible with engine name.please set network-mode=1, which means fp32.

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks