• Hardware Platform (Jetson / GPU) GTX 1650
• DeepStream Version 6.0
• TensorRT Version 8.0.1-1+cuda11.3
• NVIDIA GPU Driver Version (valid for GPU only) 470.103.01
• Issue Type( questions, new requirements, bugs) Unknow bugs
• How to reproduce the issue ?
I run deepstream in docker container, image is deepstream:6.0-devel.
This is my pipeline
This is my preprocess config file:
config_preprocess_3d_custom.txt (2.9 KB)
I added some lines of code in gstnvdspreprocess.cpp to get NvDsObjectMeta created by primary nvinfer.
In function gst_nvdspreprocess_on_frame:
static GstFlowReturn
gst_nvdspreprocess_on_frame(GstNvDsPreProcess *nvdspreprocess, GstBuffer *inbuf,
NvBufSurface *in_surf)
{
GstFlowReturn flow_ret = GST_FLOW_ERROR;
std::string nvtx_str;
std::unique_ptr<NvDsPreProcessBatch> batch = nullptr;
GstNvDsPreProcessMemory *memory = nullptr;
GstBuffer *conv_gst_buf = nullptr;
NvDsBatchMeta *batch_meta = NULL;
guint num_groups = 0;
gdouble scale_ratio_x, scale_ratio_y;
gdouble offset_left, offset_top;
gint idx = 0;
if (((in_surf->memType == NVBUF_MEM_DEFAULT || in_surf->memType == NVBUF_MEM_CUDA_DEVICE) &&
((int)in_surf->gpuId != (int)nvdspreprocess->gpu_id)) ||
(((int)in_surf->gpuId == (int)nvdspreprocess->gpu_id) && (in_surf->memType == NVBUF_MEM_SYSTEM)))
{
GST_ELEMENT_ERROR(nvdspreprocess, RESOURCE, FAILED,
("Memory Compatibility Error:Input surface gpu-id doesnt match with configured gpu-id for element,"
" please allocate input using unified memory, or use same gpu-ids OR,"
" if same gpu-ids are used ensure appropriate Cuda memories are used"),
("surface-gpu-id=%d,%s-gpu-id=%d", in_surf->gpuId, GST_ELEMENT_NAME(nvdspreprocess),
nvdspreprocess->gpu_id));
return GST_FLOW_ERROR;
}
batch_meta = gst_buffer_get_nvds_batch_meta(inbuf);
if (batch_meta == nullptr)
{
GST_ELEMENT_ERROR(nvdspreprocess, STREAM, FAILED,
("NvDsBatchMeta not found for input buffer."), (NULL));
return GST_FLOW_ERROR;
}
if (batch == nullptr)
{
batch.reset(new NvDsPreProcessBatch);
batch->push_buffer = FALSE;
batch->inbuf = inbuf;
batch->inbuf_batch_num = nvdspreprocess->current_batch_num;
batch->batch_meta = batch_meta;
batch->scaling_pool_format = nvdspreprocess->scaling_pool_format;
flow_ret =
gst_buffer_pool_acquire_buffer(nvdspreprocess->scaling_pool, &conv_gst_buf,
nullptr);
if (flow_ret != GST_FLOW_OK)
{
return flow_ret;
}
memory = gst_nvdspreprocess_buffer_get_memory(conv_gst_buf);
if (!memory)
{
return GST_FLOW_ERROR;
}
batch->converted_buf = conv_gst_buf;
batch->pitch = memory->surf->surfaceList[0].planeParams.pitch[0];
}
if (nvdspreprocess->nvdspreprocess_groups[0]->src_ids[0] == -1 &&
batch_meta->num_frames_in_batch * nvdspreprocess->nvdspreprocess_groups[0]->framemeta_map[0].roi_vector.size() > nvdspreprocess->max_batch_size)
{
GST_ELEMENT_ERROR(nvdspreprocess, STREAM, FAILED,
("tensor shape batch-size should be atleast sum total rois\n"), (NULL));
return GST_FLOW_ERROR;
}
num_groups = nvdspreprocess->nvdspreprocess_groups.size();
GST_DEBUG_OBJECT(nvdspreprocess, "Num Groups = %d\n", num_groups);
std::vector<bool> group_present(num_groups, 0);
for (guint gcnt = 0; gcnt < num_groups; gcnt++)
{
GstNvDsPreProcessGroup *&preprocess_group = nvdspreprocess->nvdspreprocess_groups[gcnt];
GST_DEBUG_OBJECT(nvdspreprocess, "num filled in batch meta = %d\n", batch_meta->num_frames_in_batch);
NvDsMetaList *l_frame = NULL;
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next)
{
NvDsFrameMeta *frame_meta = NULL;
frame_meta = (NvDsFrameMeta *)(l_frame->data);
gint source_id = frame_meta->source_id; /* source id of incoming buffer */
gint batch_index = frame_meta->batch_id; /* batch id of incoming buffer */
GstNvDsPreProcessFrame preprocess_frame;
std::vector<NvDsRoiMeta> roi_vector;
NvDsRoiMeta roi_meta;
NvOSD_RectParams rect_params;
std::vector<gint> src_ids = preprocess_group->src_ids;
if (src_ids[0] == -1)
{
num_groups = 1;
}
if ((std::find(src_ids.begin(), src_ids.end(), source_id) == src_ids.end()) && src_ids[0] != -1)
{
GST_DEBUG_OBJECT(nvdspreprocess, "Group %d : No Source %d => skipping\n", gcnt, source_id);
continue;
}
GST_DEBUG_OBJECT(nvdspreprocess, "Group %d : Processsing Source ID = %d \n", gcnt, source_id);
auto get_preprocess_frame_meta = preprocess_group->framemeta_map.find(source_id);
if (get_preprocess_frame_meta == preprocess_group->framemeta_map.end() && src_ids[0] != -1)
{
g_print("Group %d : Configuration for Source ID = %d not found\n", gcnt, source_id);
flow_ret = GST_FLOW_ERROR;
return flow_ret;
}
else
{
preprocess_frame = get_preprocess_frame_meta->second;
// Start
std::vector<NvDsRoiMeta> detected_roi;
for (NvDsObjectMetaList *l_user = frame_meta->obj_meta_list; l_user != NULL; l_user = l_user->next)
{
NvDsObjectMeta *obj_meta = (NvDsObjectMeta *)l_user->data;
// Match obj which is tracking?
if (obj_meta->object_id != UNTRACKED_OBJECT_ID)
{
continue;
}
NvDsRoiMeta temp_roi_meta;
temp_roi_meta.roi.left = MAX(obj_meta->rect_params.left, 0);
temp_roi_meta.roi.top = MAX(obj_meta->rect_params.top, 0);
temp_roi_meta.roi.width = MAX(obj_meta->rect_params.width, 224);
temp_roi_meta.roi.height = MAX(obj_meta->rect_params.height, 224);
detected_roi.push_back(temp_roi_meta);
// g_print("\nNew roi: %f %f %f %f", temp_roi_meta.roi.left,temp_roi_meta.roi.top, temp_roi_meta.roi.width, temp_roi_meta.roi.height);
// only accept 1 ROI
break;
}
check_and_fix_roi_vector(detected_roi, preprocess_frame.roi_vector);
roi_vector = preprocess_frame.roi_vector;
// End
GST_DEBUG_OBJECT(nvdspreprocess, "Group %d : Source ID %d : Got roi-vecsize = %ld\n",
gcnt, source_id, roi_vector.size());
for (guint n = 0; n < roi_vector.size(); n++)
{
roi_meta = roi_vector[n];
if (preprocess_group->process_on_roi)
{
GST_DEBUG_OBJECT(nvdspreprocess, "Group %d : Source ID %d : Processing on ROIS\n", gcnt, source_id);
/** Process on ROIs provided from Object Meta */
rect_params = roi_meta.roi;
GST_DEBUG_OBJECT(nvdspreprocess, "filling ROI left=%f top=%f width=%f height=%f\n",
rect_params.left, rect_params.top, rect_params.width, rect_params.height);
}
else
{
GST_DEBUG_OBJECT(nvdspreprocess, "Group %d : Source ID %d : Processing on Full Frames\n", gcnt, source_id);
/** Process on Full Frames */
rect_params.left = 0;
rect_params.top = 0;
rect_params.width = in_surf->surfaceList[batch_index].width;
rect_params.height = in_surf->surfaceList[batch_index].height;
roi_meta.roi = rect_params;
GST_DEBUG_OBJECT(nvdspreprocess, "filling FULL FRAME left=%f top=%f width=%f height=%f\n",
rect_params.left, rect_params.top, rect_params.width, rect_params.height);
}
#ifdef DRAW_ROIS
NvDsDisplayMeta *display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
display_meta->num_rects = 1;
display_meta->rect_params[0].left = rect_params.left;
display_meta->rect_params[0].top = rect_params.top;
display_meta->rect_params[0].width = rect_params.width;
display_meta->rect_params[0].height = rect_params.height;
display_meta->rect_params[0].border_width = 2;
display_meta->rect_params[0].border_color = {0, 1, 0, 1};
nvds_add_display_meta_to_frame(frame_meta, display_meta);
GST_DEBUG_OBJECT(nvdspreprocess, "Draw ROI!");
#endif
idx = batch->units.size();
if (idx == nvdspreprocess->tensor_params.network_input_shape[0])
{
GST_ELEMENT_WARNING(nvdspreprocess, STREAM, FAILED,
("Warning: exceeding preprocess batch-size (=network-input-shape[0]) => skipping sources\n"), (NULL));
continue;
}
/** Scale the roi part to the network resolution maintaining aspect ratio */
if (scale_and_fill_data(nvdspreprocess, in_surf->surfaceList + batch_index,
&rect_params, scale_ratio_x, scale_ratio_y, offset_left, offset_top,
memory->surf, memory->surf->surfaceList + idx,
memory->frame_memory_ptrs[idx]) != GST_FLOW_OK)
{
flow_ret = GST_FLOW_ERROR;
return flow_ret;
}
roi_meta.converted_buffer = (NvBufSurfaceParams *)memory->surf->surfaceList + idx;
roi_meta.scale_ratio_x = scale_ratio_x;
roi_meta.scale_ratio_y = scale_ratio_y;
roi_meta.offset_left = offset_left;
roi_meta.offset_top = offset_top;
roi_meta.frame_meta = frame_meta;
/* Adding a Unit (ROI/Crop/Full Frame) to the current batch. Set the frames members. */
NvDsPreProcessUnit unit;
unit.converted_frame_ptr = memory->frame_memory_ptrs[idx];
unit.obj_meta = nullptr;
unit.frame_meta = frame_meta;
unit.frame_num = unit.frame_meta->frame_num;
unit.batch_index = batch_index;
unit.input_surf_params = in_surf->surfaceList + batch_index;
unit.roi_meta = roi_meta;
unit.roi_meta.classifier_meta_list = NULL;
unit.roi_meta.roi_user_meta_list = NULL;
batch->units.push_back(unit);
if (preprocess_group->process_on_roi)
{
GST_DEBUG_OBJECT(nvdspreprocess,
"Group %d : Source ID %d : ROI : max-batch-size = %d batch-units-size = %ld batch_index = %d idx = %d\n",
gcnt, source_id, nvdspreprocess->max_batch_size, batch->units.size(), batch_index, idx);
}
else
{
GST_DEBUG_OBJECT(nvdspreprocess,
"Group %d : Source ID %d : FULL FRAME : max-batch-size = %d batch-units-size = %ld batch_index = %d idx = %d\n",
gcnt, source_id, nvdspreprocess->max_batch_size, batch->units.size(), batch_index, idx);
}
}
}
}
/** transform the group according to num filled from batch_meta */
if (nvdspreprocess->batch_insurf.numFilled < preprocess_group->num_units)
{
GST_DEBUG_OBJECT(nvdspreprocess, "Some Sources not received for Group %d\n", gcnt);
}
if (nvdspreprocess->batch_insurf.numFilled > 0)
{
if (!group_transformation(nvdspreprocess, preprocess_group))
{
GST_ELEMENT_ERROR(nvdspreprocess, STREAM, FAILED,
("Group %d : group transformation failed\n", gcnt), (NULL));
return GST_FLOW_ERROR;
}
group_present[gcnt] = 1;
}
else
{
GST_DEBUG_OBJECT(nvdspreprocess, "No Sources received for Group %d\n", gcnt);
}
nvdspreprocess->batch_insurf.numFilled = 0;
nvdspreprocess->batch_outsurf.numFilled = 0;
}
While running, the pipeline stuck when about 10 ROIs were collected
This is my debug file:
debug.txt (13.4 MB)
It show that there is an error when call gst_poll_wait function.
The error only occur when i set new ROIs from detected bounding box. If i set new ROI by constant value in code, the pipeline run successfully.
Please help me to fix this error. Thanks.