I’m trying to overlay some information on GstBuffer, my pipeline is like following:
gst-launch-1.0 uridecodebin uri=rtsp://admin:123456@192.168.0.1:554 ! nvstreammux ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=(string)NV12' ! nvv4l2h264dec ! h264parse ! flvmux ! queue ! rtmpsink location=rtmp://my_rtmp_server_uri
I added a probe on nvv4l2h264dec’s sink pad and extract its buffer, then reference to the implementation of dsexample, I extracted the NvBufSurface through NvBufSurfaceMap()
but it failed and printed following log:
nvbufsurface: mapping of memory type (0) not supported
My develop environments and more details are showed:
• Hardware Platform (Jetson / GPU): Tesla 4 GPU(Ubuntu18.04 LTS)
• DeepStream Version: DeepStream-6.0
• TensorRT Version: TensorRT 8.0.1
• NVIDIA GPU Driver Version (valid for GPU only): 470.74
• Issue Type( questions, new requirements, bugs): bugs
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
The key codes to reproduce the issue:
NvBufSurface *surface = NULL;
NvDsMetaList *l_frame = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buffer);
// VideoPipeline* vp = (VideoPipeline*) user_data;
GstMapInfo info;
if (!gst_buffer_map(buffer, &info,
(GstMapFlags) (GST_MAP_READ | GST_MAP_WRITE))) {
TS_WARN_MSG_V ("WHY? WHAT PROBLEM ABOUT SYNC?");
gst_buffer_unmap(buffer, &info);
return;
}
surface = (NvBufSurface *) info.data;
TS_INFO_MSG_V ("surface type: %d", surface->memType);
uint32_t frame_width, frame_height;
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
frame_width = frame_meta->source_frame_width;
frame_height = frame_meta->source_frame_height;
TS_INFO_MSG_V ("input frame width: %d, height: %d", frame_width, frame_height);
if (NvBufSurfaceMap (surface, 0, 0, NVBUF_MAP_READ_WRITE)) {
TS_ERR_MSG_V ("NVMM map failed.");
return ;
}
/* only valid for Jetson platform according to the DeepStream SDK API reference
if(surface->memType == NVBUF_MEM_CUDA_UNIFIED || surface->memType == NVBUF_MEM_CUDA_DEVICE ) {
NvBufSurfaceSyncForCpu (surface, 0, 0);
}
*/
YUVImgInfo m_YUVImgInfo;
m_YUVImgInfo.imgdata = reinterpret_cast<uint8_t*>
(surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0]);
// m_YUVImgInfo.imgdata = info.data;
m_YUVImgInfo.width = frame_width;
m_YUVImgInfo.height = frame_height;
m_YUVImgInfo.yuvType = TYPE_YUV420SP_NV12;
std::vector<TsOsdObject> oos = jobject->GetOsdObject();
for (size_t i = 0; i < oos.size(); i++) {
if (oos[i].x_>=0 && oos[i].w_>0 && (oos[i].x_+oos[i].w_)<frame_width &&
oos[i].y_>=0 && oos[i].h_>0 && (oos[i].y_+oos[i].h_)<frame_height) {
unsigned char R = oos[i].r_, G = oos[i].g_, B = oos[i].b_;
unsigned char Y = 0.257*R + 0.504*G + 0.098*B + 16;
unsigned char U =-0.148*R - 0.291*G + 0.439*B + 128;
unsigned char V = 0.439*R - 0.368*G - 0.071*B + 128;
YUVPixColor m_Color = {Y, U, V};
YUVRectangle m_Rect;
m_Rect.x = oos[i].x_;
m_Rect.y = oos[i].y_;
m_Rect.width = oos[i].w_;
m_Rect.height = oos[i].h_;
drawRectangle(&m_YUVImgInfo, m_Rect, m_Color, 12);
}
}
}
NvBufSurfaceUnMap (surface, 0, 0);
gst_buffer_unmap(buffer, &info);