Please provide complete information as applicable to your setup.
Hardware Platform (jetson xavier nx )
DeepStream Version 6.0.1
JetPack Version (valid for Jetson only) 4.6.2
TensorRT Version 8.2
NVIDIA GPU Driver Version (valid for GPU only)
The flow sequence I use is shown in Attachment.
I use Gst-nvinfer to call my model(Super resolution model) to get the inference result NvDsInferLayerInfo.
I use Gst-nvdsvideotemplate to receive the data (NvDsInferLayerInfo).
I want to use NvDsInferLayerInfo data to update the data of GstBuffer and push it to downstream plug-ins.
How to update NvDsInferLayerInfo data to outBuffer(GstBuffer) ?
The code is as follows:
/* Output Processing Thread */
void EnhancerAlgorithm::OutputThread(void)
{
GstFlowReturn flow_ret;
GstBuffer *outBuffer = NULL;
NvBufSurface *outSurf = NULL;
int num_in_meta = 0;
int video_out_width = 0;
int video_out_height = 0;
NvDsBatchMeta *batch_meta = NULL;
NvDsInferLayerInfo *outInfo = NULL;
NvBufSurfTransform_Error err = NvBufSurfTransformError_Success;
std::unique_lockstd::mutex lk(m_processLock);
/* Run till signalled to stop. */
while (1) {
/* Wait if processing queue is empty. */
if (m_processQ.empty()) {
if (m_stop == TRUE) {
break;
}
m_processCV.wait(lk);
continue;
}
PacketInfo packetInfo = m_processQ.front();
m_processQ.pop();
m_processCV.notify_all();
lk.unlock();
// Add custom algorithm logic here
// Once buffer processing is done, push the buffer to the downstream by
// using gst_pad_push function
NvBufSurface *in_surf = getNvBufSurface (packetInfo.inbuf);
batch_meta = gst_buffer_get_nvds_batch_meta (packetInfo.inbuf);
if (!batch_meta) {
GST_ELEMENT_ERROR (m_element, STREAM, FAILED,
("%s:No batch meta available", __func__), (NULL));
return;
}
num_in_meta = batch_meta->num_frames_in_batch;
// printf("num_in_meta: %d \n",num_in_meta);
//First getting the bbox of faces and eyes
NvDsMetaList * l_frame = NULL;
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
/* Iterate object metadata in frame */
for (NvDsMetaList * l_user = frame_meta->frame_user_meta_list; l_user != NULL;
l_user = l_user->next){
NvDsUserMeta *user_meta = (NvDsUserMeta *)l_user->data;
if (user_meta->base_meta.meta_type != NVDSINFER_TENSOR_OUTPUT_META)
continue;
NvDsInferTensorMeta *meta = (NvDsInferTensorMeta *) user_meta->user_meta_data;
//for (unsigned int i = 0; i < meta->num_output_layers; i++) {
NvDsInferLayerInfo *info = &meta->output_layers_info[0];
info->buffer = meta->out_buf_ptrs_host[i];
outInfo = info;
video_out_height = outInfo->inferDims.d[1];
video_out_width = outInfo->inferDims.d[2];
// printf("in_surf colorformat =%d\n", in_surf->surfaceList[frame_meta->batch_id].colorFormat);
//}
}
}
if(!outInfo || video_out_height <= 0 || video_out_width <= 0){
printf("The model inference result is error . \n");
return;
}
std::cout<<"Shape "<<outInfo->inferDims.numElements<<std::endl;
printf("layer name: %s \n",outInfo->layerName);
printf("frame_width: %d \n",video_out_width);
printf("frame_height: %d \n",video_out_height);
printf("******************************* \n");
// Transform IP case
outSurf = in_surf;
outBuffer = packetInfo.inbuf;
// gint size = video_out_width * video_out_height * 3 / 2;
// outBuffer = gst_buffer_new_allocate(NULL, size, NULL);
// Output buffer parameters checking
if (outSurf->numFilled != 0)
{
g_assert ((guint)m_outVideoInfo.width == outSurf->surfaceList->width);
g_assert ((guint)m_outVideoInfo.height == outSurf->surfaceList->height);
}
flow_ret = gst_pad_push (GST_BASE_TRANSFORM_SRC_PAD (m_element), outBuffer);
printf("CustomLib: %s in_surf=%p, Pushing Frame %d to downstream..."
" flow_ret = %d TS=%" GST_TIME_FORMAT " \n", __func__, in_surf,
packetInfo.frame_num, flow_ret, GST_TIME_ARGS(GST_BUFFER_PTS(outBuffer)));
GST_DEBUG ("CustomLib: %s in_surf=%p, Pushing Frame %d to downstream..."
" flow_ret = %d TS=%" GST_TIME_FORMAT " \n", __func__, in_surf,
packetInfo.frame_num, flow_ret, GST_TIME_ARGS(GST_BUFFER_PTS(outBuffer)));
lk.lock();
continue;
}