How can i get the total number of bounding box detected in each rtsp stream in deesptream 6.4 its tensorrt 8.6

i just edited the c file of my deepstream

this is the github which i used
i connected 3 rtsp stream to this
how i can get the total number of bounding box detected from each camera

/static guint total_objects_detected = 0;/
guint count;
guint sum;
float avg;
//guint num_people_by_camera[MAX_SOURCE_BINS];
//guint num_people_by_camera=3;
static void
bbox_generated_probe_after_analytics (AppCtx * appCtx, GstBuffer * buf,
NvDsBatchMeta * batch_meta, guint index)
{

guint total_objects_detected = 0;
NvDsObjectMeta *obj_meta = NULL;
GstClockTime buffer_pts = 0;
guint32 stream_id = 0;
NvDsMetaList * l_obj = NULL;
NvDsDisplayMeta *display_meta = NULL;

//code by steeve
// guint num_objects_detected[MAX_NUM_CAMERAS] = {0};
//closed by steeve

if (!appCtx->config.dsanalytics_config.enable){
g_print ("Unable to get nvdsanalytics src pad\n");
return;	
}

for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
    l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = l_frame->data;
stream_id = frame_meta->source_id;
GstClockTime buf_ntp_time = 0;
if (playback_utc == FALSE) {
    /** Calculate the buffer-NTP-time
     * derived from this stream's RTCP Sender Report here:
     */
    StreamSourceInfo *src_stream = &testAppCtx->streams[stream_id];
    buf_ntp_time = frame_meta->ntp_timestamp;

    if (buf_ntp_time < src_stream->last_ntp_time) {
	NVGSTDS_WARN_MSG_V ("Source %d: NTP timestamps are backward in time."
		" Current: %lu previous: %lu", stream_id, buf_ntp_time,
		src_stream->last_ntp_time);
    }
    src_stream->last_ntp_time = buf_ntp_time;
}

GList *l;
NvDsMetaList *l_analyticsuser;
l_analyticsuser = frame_meta->frame_user_meta_list;

AnalyticsUserMeta *user_data = 
    (AnalyticsUserMeta *) g_malloc0(sizeof(AnalyticsUserMeta));
if (l_analyticsuser != NULL) {
    analytics_custom_parse_nvdsanalytics_meta_data(l_analyticsuser, user_data);
}
user_data->source_id = stream_id;

//		l_analyticsuser = l_analyticsuser->next;

/* Code from test5 application */
for (l = frame_meta->obj_meta_list; l != NULL; l = l->next) {
    /* Now using above information we need to form a text that should
     * be displayed on top of the bounding box, so lets form it here. */
    obj_meta = (NvDsObjectMeta *) (l->data);
    //
    //num_objects_detected[obj_meta->source_id] += 1;
    //
    {
	/**
	 * Enable only if this callback is after tiler
	 * NOTE: Scaling back code-commented
	 * now that bbox_generated_probe_after_analytics() is post analytics
	 * (say pgie, tracker or sgie)
	 * and before tiler, no plugin shall scale metadata and will be
	 * corresponding to the nvstreammux resolution
	 */
	float scaleW = 0;
	float scaleH = 0;
	/* Frequency of messages to be send will be based on use case.
	 * Here message is being sent for first object every 30 frames.
	 */
	buffer_pts = frame_meta->buf_pts;
	if (!appCtx->config.streammux_config.pipeline_width
		|| !appCtx->config.streammux_config.pipeline_height) {
	    g_print ("invalid pipeline params\n");
	    return;
	}
	LOGD ("stream %d==%d [%d X %d]\n", frame_meta->source_id,
		frame_meta->pad_index, frame_meta->source_frame_width,
		frame_meta->source_frame_height);
	scaleW =
	    (float) frame_meta->source_frame_width /
	    appCtx->config.streammux_config.pipeline_width;
	scaleH =
	    (float) frame_meta->source_frame_height /
	    appCtx->config.streammux_config.pipeline_height;

	if (playback_utc == FALSE) {
	    /** Use the buffer-NTP-time derived from this stream's RTCP Sender
	     * Report here:
	     */
	    buffer_pts = buf_ntp_time;
	}
	
	/** Generate NvDsEventMsgMeta for every object */
	NvDsEventMsgMeta *msg_meta =
	    (NvDsEventMsgMeta *) g_malloc0 (sizeof (NvDsEventMsgMeta));
	generate_event_msg_meta (msg_meta, PERSON_ID, TRUE,
		/**< useTs NOTE: Pass FALSE for files without base-timestamp in URI */
		buffer_pts,
		appCtx->config.multi_source_config[stream_id].uri, stream_id,
		appCtx->config.multi_source_config[stream_id].camera_id,
		user_data, scaleW, scaleH, frame_meta);
	testAppCtx->streams[stream_id].meta_number++;
	NvDsUserMeta *user_event_meta =
	    nvds_acquire_user_meta_from_pool (batch_meta);
	    //
	//num_people_by_camera[stream_id] += 1;
	//
	if (user_event_meta) {
	    /*
	     * Since generated event metadata has custom objects for
	     * Vehicle / Person which are allocated dynamically, we are
	     * setting copy and free function to handle those fields when
	     * metadata copy happens between two components.
	     */
	    user_event_meta->user_meta_data = (void *) msg_meta;
	    user_event_meta->base_meta.batch_meta = batch_meta;
	    user_event_meta->base_meta.meta_type = NVDS_EVENT_MSG_META;
	    user_event_meta->base_meta.copy_func =
		(NvDsMetaCopyFunc) meta_copy_func;
	    user_event_meta->base_meta.release_func =
		(NvDsMetaReleaseFunc) meta_free_func;
	    nvds_add_user_meta_to_frame (frame_meta, user_event_meta);
	} else {
	    g_print ("Error in attaching event meta to buffer\n");
	}
    }
}
testAppCtx->streams[stream_id].frameCount++;
//g_print ("strem id = %f \n",frame_meta->obj_meta_list);

//g_print("Camera %d: Number of people detected = %d\n", i, num_people_by_camera[i]);
int offset = 0;
total_objects_detected += g_list_length(frame_meta->obj_meta_list);

/*for (int i = 0; i < MAX_NUM_CAMERAS; i++) {

    g_print("Camera %d: Number of objects detected = %d\n", i, num_objects_detected[i]);

}*/

count++;
sum += total_objects_detected;
if(count == 10){
	avg = sum/count;
	g_print ("Average Number of people in the room : %f \n",avg);
	count = 0;
	sum = 0;
}
//temporary



//closed

display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
    NvOSD_TextParams *txt_params  = &display_meta->text_params[0];
    display_meta->num_labels = 1;
    txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);
    offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Approximate number of people in the room = %f ", avg);
    //offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);

    /* Now set the offsets where the string should appear */
    txt_params->x_offset = 10;
    txt_params->y_offset = 12;

    /* Font , font-color and font-size */
    txt_params->font_params.font_name = "Serif";
    txt_params->font_params.font_size = 10;
    txt_params->font_params.font_color.red = 1.0;
    txt_params->font_params.font_color.green = 1.0;
    txt_params->font_params.font_color.blue = 1.0;
    txt_params->font_params.font_color.alpha = 1.0;

    /* Text background color */
    txt_params->set_bg_clr = 1;
    txt_params->text_bg_clr.red = 0.0;
    txt_params->text_bg_clr.green = 0.0;
    txt_params->text_bg_clr.blue = 0.0;
    txt_params->text_bg_clr.alpha = 1.0;

    nvds_add_display_meta_to_frame(frame_meta, display_meta);

//g_print ("Number of objects = %d \n",total_objects_detected);

g_free(user_data);
}

}

i just edited the fundtion of the c file deepstream_test5_app_main.c
iam getting the approximate number of people in the room
its getting the data of one source i thing so

is this the correct way to do

how i can get the total number of bounding box detected in each cam seperately

is the any way to get those details from the config file?

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks

You can make your own statistical distinctions using the batch_id in the NvDsFrameMeta.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.