Below is the complete code of my program, save_frame_with_bbox is my new code.
By the way, is it appropriate to put the operation of saving the full image in the probe of nvdsanalytics?
static GstPadProbeReturn
nvdsanalytics_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
gpointer u_data)
{
GstBuffer *buf = (GstBuffer *) info->data;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
parse_nvdsanalytics_meta_data (batch_meta);
save_frame_with_bbox(buf);
return GST_PAD_PROBE_OK;
}
extern "C" void save_frame_with_bbox(GstBuffer *buf)
{
NvDsMetaList *l_frame = NULL;
NvDsMetaList *l_obj = NULL;
//NvDsMetaList *l_usr_meta = NULL;
NvDsObjectMeta *obj_meta = NULL;
// Get original raw data
GstMapInfo in_map_info = GST_MAP_INFO_INIT;
if (!gst_buffer_map(buf, &in_map_info, GST_MAP_READ))
{
g_print ("Error: Failed to map gst buffer\n");
return;
}
NvBufSurface *surface = (NvBufSurface *)in_map_info.data;
gst_buffer_unmap (buf, &in_map_info);
uint32_t frame_data_size = 0;
unsigned char* src_data = NULL;
std::vector<cv::Rect> objsRect;
static std::map<uint32_t, std::vector<cv::Rect>> lastObjsRect;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next)
{
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
guint object_num = 0, person_num = 0, train_num = 0;
objsRect.clear();
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next)
{
obj_meta = (NvDsObjectMeta *)(l_obj->data);
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON)
{
//cv::Rect rect(obj_meta->rect_params.left, obj_meta->rect_params.top, obj_meta->rect_params.width, obj_meta->rect_params.height);
objsRect.emplace_back(obj_meta->rect_params.left, obj_meta->rect_params.top,
obj_meta->rect_params.width, obj_meta->rect_params.height);
person_num++;
object_num++;
}
else if (obj_meta->class_id == PGIE_CLASS_ID_TRAIN)
{
train_num++;
object_num++;
}
}
if (person_num || object_num)
{
g_print("test: person_num=%d, object_num=%d, train_num=%d, batchId=%d, srcId=%d\n",
person_num, object_num, train_num, frame_meta->batch_id, frame_meta->source_id);
}
if (person_num == 0)
{
std::string key = get_detect_result_redis_key(detect_person/*, frame_meta->source_id*/);
RedisHelper::instance()->setCommand(key.c_str(), "0");
continue;
}
else
{
std::string key = get_detect_result_redis_key(detect_person/*, frame_meta->source_id*/);
RedisHelper::instance()->setCommand(key.c_str(), "1");
}
if (train_num == 0)
{
std::string key = get_detect_result_redis_key(detect_train/*, frame_meta->source_id*/);
RedisHelper::instance()->setCommand(key.c_str(), "0");
}
else
{
std::string key = get_detect_result_redis_key(detect_train/*, frame_meta->source_id*/);
RedisHelper::instance()->setCommand(key.c_str(), "1");
}
if (frame_data_size < surface->surfaceList[frame_meta->batch_id].dataSize)
{
if (src_data != NULL)
{
std::cout << "test debug src_data size changed!" << std::endl;
free(src_data);
src_data = NULL;
}
src_data = (unsigned char*)malloc(surface->surfaceList[frame_meta->batch_id].dataSize);
if (src_data == NULL)
{
g_print("Error: failed to malloc src_data \n");
return;
}
frame_data_size = surface->surfaceList[frame_meta->batch_id].dataSize;
}
memset(src_data, 0, frame_data_size);
#ifdef PLATFORM_TEGRA
NvBufSurfaceMap(surface, -1, -1, NVBUF_MAP_READ);
NvBufSurfacePlaneParams *pParams = &surface->surfaceList[frame_meta->batch_id].planeParams;
unsigned int offset = 0;
for (unsigned int num_planes=0; num_planes < pParams->num_planes; num_planes++) {
if (num_planes>0)
offset += pParams->height[num_planes-1]*(pParams->bytesPerPix[num_planes-1]*pParams->width[num_planes-1]);
for (unsigned int h = 0; h < pParams->height[num_planes]; h++) {
memcpy((void *)(src_data+offset+h*pParams->bytesPerPix[num_planes]*pParams->width[num_planes]),
(void *)((char *)surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[num_planes]+h*pParams->pitch[num_planes]),
pParams->bytesPerPix[num_planes]*pParams->width[num_planes]);
}
}
NvBufSurfaceSyncForDevice(surface, -1, -1);
NvBufSurfaceUnMap(surface, -1, -1);
#else
cudaMemcpy((void*)src_data,
(void*)surface->surfaceList[frame_meta->batch_id].dataPtr,
surface->surfaceList[frame_meta->batch_id].dataSize,
cudaMemcpyDeviceToHost);
#endif
gint frame_width = (gint)surface->surfaceList[frame_meta->batch_id].width;
gint frame_height = (gint)surface->surfaceList[frame_meta->batch_id].height;
gint frame_step = surface->surfaceList[frame_meta->batch_id].pitch;
gint color_format = surface->surfaceList[frame_meta->batch_id].colorFormat;
cv::Mat frame;
cv::Mat out_mat = cv::Mat(cv::Size(frame_width, frame_height), CV_8UC3);
switch (color_format)
{
case NVBUF_COLOR_FORMAT_NV12:
frame = cv::Mat(frame_height * 3 / 2, frame_width, CV_8UC1, src_data, frame_step);
cv::cvtColor(frame, out_mat, CV_YUV2BGR_NV12);
break;
case NVBUF_COLOR_FORMAT_RGBA:
frame = cv::Mat(frame_height, frame_width, CV_8UC4, src_data, frame_step);
cv::cvtColor(frame, out_mat, CV_RGBA2BGR);
break;
case NVBUF_COLOR_FORMAT_NV12_709_ER:
frame = cv::Mat(frame_height * 3 / 2, frame_width, CV_8UC1, src_data, frame_step);
cv::cvtColor(frame, out_mat, CV_YUV2BGR_I420);
break;
default:
std::cout << "warning: color_format = " << color_format << std::endl;
break;
}
for (uint i = 0; i < objsRect.size(); ++i)
{
cv::rectangle(out_mat, objsRect[i], cv::Scalar(0,255,0), 1);
}
std::sort(objsRect.begin(), objsRect.end(), bbox_ascending);
if (objs_bbox_great_diffrence(objsRect, lastObjsRect[frame_meta->source_id]))
{
auto t1 = std::chrono::system_clock::now();
//const char *path_str = "temp";
static int ii = 0;
char filePath[FILE_NAME_SIZE] = {0};
snprintf((char*)filePath, FILE_NAME_SIZE, "./temp_%d_%d_%d.jpg",
frame_meta->source_id, frame_meta->batch_id, ii++);
//std::vector<int> compression_params;
//compression_params.push_back(cv::IMWRITE_JPEG_QUALITY);
//compression_params.push_back(95);
cv::imwrite(filePath, out_mat);
auto t2 = std::chrono::system_clock::now();
std::vector<unsigned char> img_encode;
cv::imencode(".jpg", out_mat, img_encode);
auto t3 = std::chrono::system_clock::now();
std::string filename = create_image_name_redis_key(frame_meta->source_id);
RedisHelper::instance()->setBinStream(filename.c_str(), (const char*)img_encode.data(), img_encode.size());
std::string imageKey = get_result_image_redis_key(detect_person/*, frame_meta->source_id*/);
RedisHelper::instance()->setCommand(imageKey.c_str(), filename.c_str());
auto t4 = std::chrono::system_clock::now();
int span1 = std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count();
int span2 = std::chrono::duration_cast<std::chrono::milliseconds>(t3-t2).count(); //
int span3 = std::chrono::duration_cast<std::chrono::milliseconds>(t4-t3).count(); //
std::cout << "test debug: span1=" << span1 << " span2=" << span2 << " span3=" << span3 << std::endl;
lastObjsRect[frame_meta->source_id].swap(objsRect);
}
}
if (src_data != NULL) {
free(src_data);
src_data = NULL;
}
}
int
main (int argc, char *argv[])
{
...
for (i = 0; i < num_instances; i++) {
if (!create_pipeline (appCtx[i], NULL,
all_bbox_generated, perf_cb, overlay_graphics)) {
NVGSTDS_ERR_MSG_V ("Failed to create pipeline");
return_value = -1;
goto done;
}
if (appCtx[i]->config.dsanalytics_config.enable){
GstPad *src_pad = NULL;
GstElement *nvdsanalytics = appCtx[i]->pipeline.common_elements.dsanalytics_bin.elem_dsanalytics;
src_pad = gst_element_get_static_pad (nvdsanalytics, "src");
if (!src_pad)
g_print ("Unable to get nvdsanalytics src pad\n");
else
{
gst_pad_add_probe (src_pad, GST_PAD_PROBE_TYPE_BUFFER,
nvdsanalytics_src_pad_buffer_probe, NULL, NULL);
gst_object_unref (src_pad);
}
}
}
main_loop = g_main_loop_new (NULL, FALSE);
...
}