Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) GPU
• DeepStream Version 6.3
**• JetPack Version (valid for Jetson only)**5.1.2
• TensorRT Version8.5.3
I have customized text to display on a frame.
Text is string type.
My implementation is as follows.
The box is drawn.
On top of the box, I like to display text from request_.name_
.
request_.name_
is a string type.
But the text is not displayed on frame.
if (dsexample->process_full_frame) {
std::chrono::time_point<std::chrono::system_clock> before = std::chrono::system_clock::now();
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next)
{
frame_meta = (NvDsFrameMeta *) (l_frame->data);
NvOSD_RectParams rect_params_;
/*For attachment*/
NvDsBatchMeta *batch_meta = frame_meta->base_meta.batch_meta;
NvDsObjectMeta *object_meta = NULL;
object_meta = nvds_acquire_obj_meta_from_pool(batch_meta);
NvOSD_RectParams & rect_params = object_meta->rect_params;
NvOSD_TextParams & text_params = object_meta->text_params;
/* Scale the entire frame to processing resolution */
rect_params_.left = 0;
rect_params_.top = 0;
rect_params_.width = dsexample->video_info.width;
rect_params_.height = dsexample->video_info.height;
/* Scale and convert the frame */
if (get_converted_mat (dsexample, surface, i, &rect_params_,
scale_ratio, dsexample->video_info.width,
dsexample->video_info.height) != GST_FLOW_OK) {
goto error;
}
/* Process FaceRecognition*/
queue.push(data_((unsigned char *)dsexample->inter_buf->surfaceList[0].mappedAddr.addr[0], imageW, imageH, 4), (int)frame_meta->source_id);
i++;
for(int cnt=0; cnt < facequeue.getSize((int)frame_meta->source_id); cnt++){
FaceBoxSafeQ<facebox_>::QueueResult result;
facebox_ request_;
if ((result = facequeue.pop(request_, (int)frame_meta->source_id)) != FaceBoxSafeQ<facebox_>::NOTOK)
{
//attach_metadata_full_frame_own (dsexample, frame_meta, 1, output, i);
/* Assign bounding box coordinates */
rect_params.left = request_.left_;
rect_params.top = request_.top_;
rect_params.width = request_.width_;
rect_params.height = request_.height_;
/* Semi-transparent yellow background */
rect_params.has_bg_color = 0;
rect_params.bg_color = (NvOSD_ColorParams) {1, 1, 0, 0.4};
/* Red border of width 6 */
rect_params.border_width = 3;
rect_params.border_color = (NvOSD_ColorParams) {0, 1, 0, 1};
/* Scale the bounding boxes proportionally based on how the object/frame was scaled during input */
rect_params.left /= scale_ratio;
rect_params.top /= scale_ratio;
rect_params.width /= scale_ratio;
rect_params.height /= scale_ratio;
object_meta->object_id = UNTRACKED_OBJECT_ID;
/* display_text required heap allocated memory */
text_params.display_text = g_strdup (request_.name_.c_str());
/* Display text above the left top corner of the object */
text_params.x_offset = rect_params.left;
text_params.y_offset = rect_params.top - 10;
/* Set black background for the text */
text_params.set_bg_clr = 1;
text_params.text_bg_clr = (NvOSD_ColorParams) {0, 0, 0, 1};
/* Font face, size and color */
text_params.font_params.font_name = font_name;
text_params.font_params.font_size = 12;
text_params.font_params.font_color = (NvOSD_ColorParams) {1, 1, 1, 1};
nvds_add_obj_meta_to_frame(frame_meta, object_meta, NULL);
frame_meta->bInferDone = TRUE;
}
}
}
std::chrono::time_point<std::chrono::system_clock> after = std::chrono::system_clock::now();
auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(after - before).count();
g_print("interval %d \n", (int)millis);
}