Sure! I’m happy to help.
My current pipeline is streammux → pgie → nvtracker → nvvideoconverter → capsfilter → nvdslogger → nvmultistreamtiler-> nvosd → fakesink
The key is that usually, the surface object provide an output images in NV12 format. By using nvvideoconverter with capsfilter, you can change the images from NV12 to RGBA format.
Code snippet of how to change NV12 to RGBA
/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter");
g_object_set (G_OBJECT (nvvidconv), "nvbuf-memory-type", 3, NULL);
// caps to convert from nv12 to rgba
nvvidconv_cap = gst_element_factory_make("capsfilter", "nvvidconv_cap");
/* create cap for filter */
//caps = gst_caps_from_string ("video/x-raw(memory:NVMM),format=RGBA");
caps = gst_caps_from_string ("video/x-raw(memory:NVMM),format=RGBA");
g_object_set (G_OBJECT (nvvidconv_cap), "caps", caps, NULL);
Example Probe
static GstPadProbeReturn
capsfilter_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer ctx)
{
cv::Mat in_mat;
cv::Mat mat_BGR;
cv::Rect crop_rect;
std::vector<uchar> buffer;
NvOSD_RectParams crop_rect_params;
GstBuffer *buf = (GstBuffer *) info->data;
GstMapInfo inmap = GST_MAP_INFO_INIT;
if (!gst_buffer_map (buf, &inmap, GST_MAP_READ)) {
GST_ERROR ("input buffer mapinfo failed");
return GST_PAD_PROBE_DROP;
}
// get the surface object from input buffer
NvBufSurface *ip_surf = (NvBufSurface *) inmap.data;
gst_buffer_unmap (buf, &inmap);
cudaError_t cuda_err;
cuda_err = cudaSetDevice(ip_surf->gpuId);
cudaStream_t cuda_stream;
cuda_err = cudaStreamCreate(&cuda_stream);
NvBufSurfaceMap(ip_surf, -1, -1, NVBUF_MAP_READ);
NvBufSurfaceSyncForCpu(ip_surf, -1, -1);
NvDsObjectMeta *obj_meta = NULL;
guint vehicle_count = 0;
guint person_count = 0;
NvDsMetaList *l_frame = NULL;
NvDsMetaList *l_obj = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
// Get frame_meta information
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
guint num_rects = 0;
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) {
obj_meta = (NvDsObjectMeta *) (l_obj->data);
// In case of my pipeline, it's will only save images if it's detec human
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
static int cnt = 0;
crop_rect_params = obj_meta->rect_params;
/* Rectangle for cropped objects */
crop_rect = cv::Rect (crop_rect_params.left, crop_rect_params.top,
crop_rect_params.width, crop_rect_params.height);
in_mat =
cv::Mat (ip_surf->surfaceList[frame_meta->batch_id].planeParams.height[0],
ip_surf->surfaceList[frame_meta->batch_id].planeParams.width[0], CV_8UC4,
ip_surf->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
ip_surf->surfaceList[frame_meta->batch_id].planeParams.pitch[0]);
// Convert from RGBA to BGR for opencv usage.
cv::cvtColor (in_mat, mat_BGR, cv::COLOR_RGBA2BGR);
/* Crop an image*/
cv::Mat crop = mat_BGR(crop_rect).clone();
// Encode croped image to buffer
cv::imencode (".jpg", crop, buffer);
// Encode entire frame
// cv::imencode (".jpg", in_mat, buffer);
// Variables for gRPC
gint8 cam_id = frame_meta->source_id;
gfloat conf = obj_meta->confidence;
gint8 tracker_id = obj_meta->object_id;
guint32 img_rows = crop.rows;
guint32 img_cols = crop.cols;
// /* Save img*/
// cv::imwrite("cam_" + std::to_string(frame_meta->source_id) + "_" +
// "idv_id_" + std::to_string (obj_meta->object_id) + "_" +
// "out_" + std::to_string (cnt) + ".jpeg", crop);
cv::imwrite("cam_" + std::to_string(frame_meta->source_id) + "_" +
"out_" + std::to_string (cnt) + ".jpeg", mat_BGR);
cnt ++
}
}
}
return GST_PAD_PROBE_OK;
}