Please provide complete information as applicable to your setup.
**• Hardware Platform (Jetson / GPU)**RTX4060Ti
• DeepStream Version7.0
**• Issue Type( questions, new requirements, bugs)**question
I am currently using nvstreammux
and nvmultistreamtiler
, and I want to analyze multiple sources simultaneously and display the analysis results in the top-left corner.
I looked at the deepstream_3d_action_recognition
example, which uses ROI and seems to display the analysis results in the top-left corner of each ROI:
pgie_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info,
gpointer u_data)
{
GstBuffer *buf = (GstBuffer *)info->data;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
NvDsMetaList *l_user_meta = NULL;
NvDsUserMeta *user_meta = NULL;
for (l_user_meta = batch_meta->batch_user_meta_list; l_user_meta != NULL;
l_user_meta = l_user_meta->next)
{
user_meta = (NvDsUserMeta *)(l_user_meta->data);
if (user_meta->base_meta.meta_type == NVDS_PREPROCESS_BATCH_META)
{
GstNvDsPreProcessBatchMeta *preprocess_batchmeta =
(GstNvDsPreProcessBatchMeta *)(user_meta->user_meta_data);
std::string model_dims = "";
if (preprocess_batchmeta->tensor_meta) {
if (preprocess_batchmeta->tensor_meta->tensor_shape.size() == MODEL_3D_SHAPES) {
model_dims = "3D: AR - ";
} else {
model_dims = "2D: AR - ";
}
}
for (auto &roi_meta : preprocess_batchmeta->roi_vector)
{
NvDsMetaList *l_user = NULL;
for (l_user = roi_meta.roi_user_meta_list; l_user != NULL;
l_user = l_user->next)
{NvDsUserMeta *user_meta = (NvDsUserMeta *)(l_user->data);
if (user_meta->base_meta.meta_type == NVDSINFER_TENSOR_OUTPUT_META)
{
NvDsInferTensorMeta *tensor_meta = (NvDsInferTensorMeta *)(user_meta->user_meta_data);
gfloat max_prob = 0;
gint class_id = 0;
gfloat *buffer = (gfloat *)tensor_meta->out_buf_ptrs_host[0];
for (size_t i = 0; i < tensor_meta->output_layers_info[0].inferDims.d[0]; i++)
{
if (buffer[i] > max_prob)
{
max_prob = buffer[i];
class_id = i;
}
}
const gchar *label = "";
if (class_id < MAX_CLASS_LEN)
label = kActioClasseLabels[class_id];
LOG_DEBUG("output tensor result: cls_id: %d, scrore:%.3f, label: %s", class_id, max_prob, label);
}
}
NvDsMetaList *l_classifier = NULL;
for (l_classifier = roi_meta.classifier_meta_list; l_classifier != NULL;
l_classifier = l_classifier->next)
{
NvDsClassifierMeta *classifier_meta = (NvDsClassifierMeta *)(l_classifier->data);
NvDsLabelInfoList *l_label;
for (l_label = classifier_meta->label_info_list; l_label != NULL;
l_label = l_classifier->next)
{
NvDsLabelInfo *label_info = (NvDsLabelInfo *)l_label->data;
NvDsDisplayMeta *display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
display_meta->num_labels = 1;
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
txt_params->display_text = (char *)g_malloc0(MAX_STR_LEN);
snprintf(txt_params->display_text, MAX_STR_LEN - 1,
"%s: %s", model_dims.c_str(), label_info->result_label);
LOG_DEBUG("classification result: cls_id: %d, label: %s", label_info->result_class_id, label_info->result_label);
/* Now set the offsets where the string should appear */
txt_params->x_offset = roi_meta.roi.left;
txt_params->y_offset = (uint32_t)std::max<int32_t>(roi_meta.roi.top - 10, 0);
/* Font , font-color and font-size */
txt_params->font_params.font_name = (char *)"Serif";
txt_params->font_params.font_size = 12;
txt_params->font_params.font_color.red = 1.0;
txt_params->font_params.font_color.green = 1.0;
txt_params->font_params.font_color.blue = 1.0;
txt_params->font_params.font_color.alpha = 1.0;
/* Text background color */
txt_params->set_bg_clr = 1;
txt_params->text_bg_clr.red = 0.0;
txt_params->text_bg_clr.green = 0.0;
txt_params->text_bg_clr.blue = 0.0;
txt_params->text_bg_clr.alpha = 1.0;
nvds_add_display_meta_to_frame(roi_meta.frame_meta, display_meta);
}
}
}
}
}
However, I am unable to find a method to get ROI data in Python.
My current pipeline is:
streammux->preprocess->pgie1->tracker->sgie->nvtiler->nvvidconv->nvosd->sink
I have added a probe in nvosd
to process the data, but it only contains:
user_meta.base_meta.meta_type NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META
user_meta.base_meta.meta_type NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META
There is no NVDS_PREPROCESS_BATCH_META
like in the C++ example.
How should I find ROI data in Python, or how can I analyze multiple videos separately and write the analysis results in the top-left corner, similar to the C++ example?