→ Hardware Platform (Jetson / GPU)** : Deepstream 6.3 docker container on a NVIDIA GeForce MX230
→ DeepStream Version : 6.3
→ JetPack Version (valid for Jetson only) : Nil
→ TensorRT Version** : 8.5.3.1
→ NVIDIA GPU Driver Version (valid for GPU only) : 535.183.01
→ Issue Type( questions, new requirements, bugs) : Problem
Here, I have added a new input called source ID in the input layers using the function extraInputProcess present in the cpp custom inferserver implementation file called nvdsinferserver_custom_process_ensemble.cpp.
Given below is the code for function extraInputProcess implementing source ID as one of the extraInputs:
NvDsInferStatus extraInputProcess(
const std::vectordsis::IBatchBuffer*&
primaryInputs, // primary tensor(image) has been processed
std::vectordsis::IBatchBuffer*& extraInputs, const dsis::IOptions* options) override
{
INFER_ASSERT(primaryInputs.size() > 0);
INFER_ASSERT(extraInputs.size() >= 1);
// primary input tensor: INPUT [batch, channel, height, width]
dsis::InferBufferDescription input0Desc = primaryInputs[0]->getBufDesc();
// extra input tensor: STREAM_ID [batch, 1]
dsis::InferBufferDescription streamIdDesc = extraInputs[0]->getBufDesc();
dsis::InferBufferDescription sourceIdDesc = extraInputs[1]->getBufDesc();
INFER_ASSERT(streamIdDesc.dataType == dsis::InferDataType::kInt32);
INFER_ASSERT(sourceIdDesc.dataType == dsis::InferDataType::kInt32);
INFER_ASSERT(streamIdDesc.elementSize == sizeof(int32_t));
INFER_ASSERT(sourceIdDesc.elementSize == sizeof(int32_t));
INFER_ASSERT(!strOfBufDesc(input0Desc).empty());
LOG_DEBUG("extraInputProcess: primary input %s", strOfBufDesc(input0Desc).c_str());
LOG_DEBUG("extraInputProcess: extra input stream ID %s", strOfBufDesc(streamIdDesc).c_str());
LOG_DEBUG("extraInputProcess: extra input source ID %s", strOfBufDesc(sourceIdDesc).c_str());
// batch size must be get from primary input tensor.
// extra inputs 'image_shape' does not have a batch size in this specific model
int batchSize = input0Desc.dims.d[0];
INFER_ASSERT(streamIdDesc.dims.numDims == 2 && streamIdDesc.dims.d[0] == batchSize);
INFER_ASSERT(sourceIdDesc.dims.numDims == 2 && sourceIdDesc.dims.d[0] == batchSize);
INFER_ASSERT(batchSize >= 1);
if (!options) {
LOG_ERROR("custom process does not receive IOptions");
return NVDSINFER_CUSTOM_LIB_FAILED;
}
NvDsBatchMeta* batchMeta = nullptr;
std::vector<NvDsFrameMeta*> frameMetaList;
NvBufSurface* bufSurf = nullptr;
std::vector<NvBufSurfaceParams*> surfParamsList;
std::vector<uint64_t> streamIds;
int64_t unique_id = 0;
INFER_ASSERT(options->getValueArray(OPTION_NVDS_SREAM_IDS, streamIds) == NVDSINFER_SUCCESS);
INFER_ASSERT(streamIds.size() == (uint32_t)batchSize);
// get NvBufSurface
if (options->hasValue(OPTION_NVDS_BUF_SURFACE)) {
INFER_ASSERT(options->getObj(OPTION_NVDS_BUF_SURFACE, bufSurf) == NVDSINFER_SUCCESS);
}
INFER_ASSERT(bufSurf);
// get NvDsBatchMeta
if (options->hasValue(OPTION_NVDS_BATCH_META)) {
INFER_ASSERT(options->getObj(OPTION_NVDS_BATCH_META, batchMeta) == NVDSINFER_SUCCESS);
}
INFER_ASSERT(batchMeta);
// get all frame meta list into vector<NvDsFrameMeta*>
if (options->hasValue(OPTION_NVDS_FRAME_META_LIST)) {
INFER_ASSERT(
options->getValueArray(OPTION_NVDS_FRAME_META_LIST, frameMetaList) ==
NVDSINFER_SUCCESS);
}
// get unique_id
if (options->hasValue(OPTION_NVDS_UNIQUE_ID)) {
INFER_ASSERT(options->getInt(OPTION_NVDS_UNIQUE_ID, unique_id) == NVDSINFER_SUCCESS);
}
// get all surface params list into vector<NvBufSurfaceParams*>
if (options->hasValue(OPTION_NVDS_BUF_SURFACE_PARAMS_LIST)) {
INFER_ASSERT(
options->getValueArray(OPTION_NVDS_BUF_SURFACE_PARAMS_LIST, surfParamsList) ==
NVDSINFER_SUCCESS);
}
// fill extra input tensor "STREAM_ID[-1,1]"
int32_t* streamIdTensor = (int32_t*)extraInputs[0]->getBufPtr(0);
for (int iBatch = 0; iBatch < batchSize; ++iBatch) {
streamIdTensor[iBatch] = streamIds[iBatch];
}
I have tried printing the source ID tensor using the python backend file model.py using these statements:
source_id_tensor = pb_utils.get_input_tensor_by_name(request, “SOURCE_ID”)
print(source_id_tensor.as_numpy())
And it outputs the source ID as ‘[[0]]’ for the following pipeline query :
gst-launch-1.0 videotestsrc ! nvvideoconvert ! “video/x-raw(memory:NVMM), width=1920, height=1080” ! m.sink_0 nvstreammux name=m width=1920 height=1080 batch_size=3 ! nvinferserver config-file-path=dstest1_pgie_inferserver_config.txt ! nvstreamdemux name=d d.src_0 ! nvvideoconvert ! autovideosink sync=0
I am referring to the sample ‘deepstream-rtsp-in-rtsp-out’.
Below given are the contents in dstest1_pgie_inferserver_config.txt :
infer_config {
unique_id: 1
gpu_ids: 0
max_batch_size: 1
backend {
inputs [
{
name: “INPUT0”
dims: [3, 1080, 1920]
},
{
name: “SOURCE_ID”
dims: [ 1 ]
}
]
triton {
model_name: “centerface”
version: -1
model_repo {
root: “./centerface”
log_level: 1
tf_gpu_memory_fraction: 0.2
tf_disable_soft_placement: 0
}
}
}
preprocess {
network_format: IMAGE_FORMAT_RGB
tensor_order: TENSOR_ORDER_LINEAR
maintain_aspect_ratio: 0
normalize {
scale_factor: 1.0
channel_offsets: [0, 0, 0]
}
}
postprocess {
labelfile_path: “./centerface/centerface/centerface_labels.txt”
other{}
}
custom_lib {
path: “./centerface/libnvdstriton_custom_impl_ensemble.so”
}
extra {
custom_process_funcion: “CreateInferServerCustomProcess”
}
}
input_control {
process_mode: PROCESS_MODE_FULL_FRAME
interval: 0
}
output_control {
output_tensor_meta: true
}
How can I make the new input ‘source ID’ hold custom values instead of just the value 0?