i got thist problem, and i did not know how to resolve this problem. mu computer’s GPU is V100.could you help me. thank you so much
Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU)
• DeepStream Version
• JetPack Version (valid for Jetson only)
• TensorRT Version
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs)
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)
• The pipeline being used
Hardware Platform :GPU V100;
DeepStream Version:6.1;
TensorRT Version:8.4.1
NVIDIA GPU Driver Version:535.54.03
cuda version:12.2
Issue Type: [Features = memory:systemmemory not NVMM
Could you attach your pipeline to us and check the decoder with gst-inspect-1.0 nvv4l2decoder
?
this is my code. thank you for your attention
include <gst/gst.h>
include <glib.h>
include <stdio.h>
include <stdlib.h>
include <string.h>
include
include <cuda_runtime_api.h>
include “nvds_yml_parser.h”
include “gstnvdsmeta.h”
include “nvds_analytics_meta.h”
include <gst/rtsp-server/rtsp-server.h>
include “task/border_cross.h”
include “task/gather.h”
include
include
include
// gie 配置文件
define PGIE_CONFIG_FILE “config/pgie_config.txt”
define MAX_DISPLAY_LEN 64
// tracking 配置文件
define TRACKER_CONFIG_FILE “config/tracker_config.txt”
define MAX_TRACKING_ID_LEN 16
define PGIE_CLASS_ID_VEHICLE 2
define PGIE_CLASS_ID_PERSON 0
/* The muxer output resolution must be set if the input streams will be of
- different resolution. The muxer will scale all the input frames to this
- resolution. */
define MUXER_OUTPUT_WIDTH 1920
define MUXER_OUTPUT_HEIGHT 1080
define TILED_OUTPUT_WIDTH 1280
define TILED_OUTPUT_HEIGHT 720
define OSD_PROCESS_MODE 0
define OSD_DISPLAY_TEXT 1
define MAX_NUM_SOURCES 255
/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
- based on the fastest source’s framerate. */
define MUXER_BATCH_TIMEOUT_USEC 40000
bool is_aarch64()
{
#if defined(aarch64)
return true;
else
return false;
endif
}
gint g_num_sources = 0;
gint g_source_id_list[MAX_NUM_SOURCES];
gboolean g_eos_list[MAX_NUM_SOURCES];
gboolean g_source_enabled[MAX_NUM_SOURCES];
GstElement **g_source_bin_list = NULL;
GMutex eos_lock;
gboolean g_run_forever = FALSE;
gint frame_number = 0;
// 从文件中读取多边形的顶点坐标
void readPoints(std::string filename, Polygon &g_ploygon, int width, int height)
{
std::ifstream file(filename);
std::string str;
while (std::getline(file, str))
{
std::stringstream ss(str);
std::string x, y;
std::getline(ss, x, ‘,’);
std::getline(ss, y, ‘,’);
// recover to original size
x = std::to_string(std::stof(x) * width);
y = std::to_string(std::stof(y) * height);
g_ploygon.push_back({std::stoi(x), std::stoi(y)});
}
}
// 定义每个source摄像头的信息
typedef struct
{
std::vector g_person_ids;
// g_person_ids.reserve(10000);
std::vector g_vehicle_ids;
Polygon g_ploygon;
} SOURCE_INFO;
// 初始化为两个source
SOURCE_INFO g_source_info[2];
typedef struct
{
guint64 n_frames;
guint64 last_fps_update_time;
gdouble fps;
} PERF_DATA;
PERF_DATA g_perf_data = {0, 0, 0.0};
// 打印FPS
gboolean perf_print_callback(gpointer user_data)
{
PERF_DATA *perf_data = (PERF_DATA *)user_data;
guint64 current_time = g_get_monotonic_time();
guint64 time_elapsed = current_time - perf_data->last_fps_update_time;
if (time_elapsed > 0)
{
perf_data->fps = 1000000.0 * perf_data->n_frames / time_elapsed;
g_print(“FPS: %0.2f\n”, perf_data->fps);
perf_data->n_frames = 0;
perf_data->last_fps_update_time = current_time;
}
return G_SOURCE_CONTINUE;
}
void update_frame_counter()
{
g_perf_data.n_frames++;
}
/* This is the buffer probe function that we have registered on the sink pad
-
of the OSD element. All the infer elements in the pipeline shall attach
-
their metadata to the GstBuffer, here we will iterate & process the metadata
-
forex: class ids to strings, counting of class_id objects etc.
-
这是我们在OSD元素的接收器上注册的缓冲区探针函数。所有管道中的推理元素都将将其元数据附加到GstBuffer上,这里我们将迭代并处理元数据
-
例如:类ID到字符串的转换,类ID对象的计数等。
-
*/
static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info,
gpointer u_data)
{
GstBuffer *buf = (GstBuffer *)info->data;
guint num_rects = 0;
NvDsObjectMeta *obj_meta = NULL;
NvDsMetaList *l_frame = NULL;
NvDsMetaList *l_obj = NULL;
NvDsDisplayMeta *display_meta = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf); // 获取批处理元数据
// 遍历批处理元数据,得到每一帧的元数据
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next)
{
// 获取每一帧的元数据
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
int offset = 0;
// 获取source id,用于区分不同的摄像头
guint source_id = frame_meta->source_id;// 检查对应source_id的polygon是否为空,如果为空,则从文件中读取 if (g_source_info[source_id].g_ploygon.empty()) { // 获取画面的宽度和高度,注意这里不能使用source_frame_width,因为进入管道之后,画面的宽度和高度会发生变化 guint width = frame_meta->pipeline_width; guint height = frame_meta->pipeline_height; readPoints("config/polygon_" + std::to_string(source_id) + ".txt", g_source_info[source_id].g_ploygon, width, height); g_print("read polygon_%d.txt success!, frame height = %d, width = %d \r \n", source_id, height, width); } // 获取每一个检测到的物体的元数据 for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) { // 获取每一个检测到的物体的元数据 obj_meta = (NvDsObjectMeta *)(l_obj->data); // 如果是车辆 if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) { // 如果当前帧中的车辆id不在g_vehicle_ids中 if (std::find(g_source_info[source_id].g_vehicle_ids.begin(), g_source_info[source_id].g_vehicle_ids.end(), obj_meta->object_id) == g_source_info[source_id].g_vehicle_ids.end()) { g_source_info[source_id].g_vehicle_ids.push_back(obj_meta->object_id); } } // 如果是人 // if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) // { // // 如果当前帧中的人id不在g_person_ids中 // if (std::find(g_source_info[source_id].g_person_ids.begin(), g_source_info[source_id].g_person_ids.end(), obj_meta->object_id) == g_source_info[source_id].g_person_ids.end()) // { // g_source_info[source_id].g_person_ids.push_back(obj_meta->object_id); // } // } // 获取检测框的中心点 Point p = { obj_meta->rect_params.left + obj_meta->rect_params.width / 2, obj_meta->rect_params.top + obj_meta->rect_params.height / 2}; // 如果中心点在多边形内 if (isInside(g_source_info[source_id].g_ploygon, p)) { // 更改检测框的颜色为红色 obj_meta->rect_params.border_color.red = 1.0; obj_meta->rect_params.border_color.green = 0.0; obj_meta->rect_params.border_color.blue = 0.0; // 设置检测框的背景颜色为红色, 透明度为0.2 obj_meta->rect_params.has_bg_color = 1; obj_meta->rect_params.bg_color.red = 1.0; obj_meta->rect_params.bg_color.green = 0.0; obj_meta->rect_params.bg_color.blue = 0.0; obj_meta->rect_params.bg_color.alpha = 0.2; } else { // 更改检测框的颜色为绿色 obj_meta->rect_params.border_color.red = 0.0; obj_meta->rect_params.border_color.green = 1.0; obj_meta->rect_params.border_color.blue = 0.0; } } // 获取显示元数据,用于在屏幕上绘制多边形 display_meta = nvds_acquire_display_meta_from_pool(batch_meta); // 绘制多边形 guint line_num = g_source_info[source_id].g_ploygon.size(); display_meta->num_lines = line_num; for (guint i = 0; i < line_num; i++) { NvOSD_LineParams *line_params = &display_meta->line_params[i]; line_params->x1 = g_source_info[source_id].g_ploygon[i % line_num].x; line_params->y1 = g_source_info[source_id].g_ploygon[i].y; line_params->x2 = g_source_info[source_id].g_ploygon[(i + 1) % line_num].x; line_params->y2 = g_source_info[source_id].g_ploygon[(i + 1) % line_num].y; line_params->line_width = 2; line_params->line_color.red = 1.0; line_params->line_color.green = 0.0; line_params->line_color.blue = 1.0; line_params->line_color.alpha = 1.0; } // 添加文字 NvOSD_TextParams *txt_params = &display_meta->text_params[0]; display_meta->num_labels = 1; txt_params->display_text = (char *)g_malloc0(MAX_DISPLAY_LEN); offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person Count = %d ", g_source_info[source_id].g_person_ids.size()); snprintf(txt_params->display_text + offset, MAX_DISPLAY_LEN - offset, "Vehicle Count = %d ", g_source_info[source_id].g_vehicle_ids.size()); // 设置文字的位置 txt_params->x_offset = 10; txt_params->y_offset = 12; // 字体 txt_params->font_params.font_name = "Serif"; txt_params->font_params.font_size = 20; txt_params->font_params.font_color.red = 1.0; txt_params->font_params.font_color.green = 1.0; txt_params->font_params.font_color.blue = 1.0; txt_params->font_params.font_color.alpha = 1.0; // 背景颜色 txt_params->set_bg_clr = 1; txt_params->text_bg_clr.red = 0.0; txt_params->text_bg_clr.green = 0.0; txt_params->text_bg_clr.blue = 0.0; txt_params->text_bg_clr.alpha = 1.0; // 添加显示 nvds_add_display_meta_to_frame(frame_meta, display_meta);
}
#if 0
g_print ("Frame Number = %d Number of objects = %d "
“Vehicle Count = %d Person Count = %d\n”,
frame_number, num_rects, vehicle_count, person_count);
endif
frame_number++;
update_frame_counter();
return GST_PAD_PROBE_OK;
}
/*
这个函数是用来处理GStreamer总线的消息的回调函数。
当总线上发布消息时,它会被调用。它接受总线、消息和用户数据作为参数。
该函数检查消息的类型,并处理EOS(流结束)和ERROR(错误)消息。如果收到EOS消息,它会打印"流结束"并退出主循环。
如果收到ERROR消息,它会提取错误详情并打印错误消息,然后退出主循环。该函数返回TRUE以继续监听总线消息。
*/
static gboolean bus_call(GstBus *bus, GstMessage *msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *)data;
switch (GST_MESSAGE_TYPE(msg))
{
case GST_MESSAGE_EOS:
g_print(“End of stream\n”);
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR:
{
gchar *debug;
GError error;
gst_message_parse_error(msg, &error, &debug);
g_printerr(“ERROR from element %s: %s\n”,
GST_OBJECT_NAME(msg->src), error->message);
if (debug)
g_printerr(“Error details: %s\n”, debug);
g_free(debug);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return TRUE;
}
/ Tracker config parsing */
define CHECK_ERROR(error)
if (error)
{
g_printerr(“Error while parsing config file: %s\n”, error->message);
goto done;
}
define CONFIG_GROUP_TRACKER “tracker”
define CONFIG_GROUP_TRACKER_WIDTH “tracker-width”
define CONFIG_GROUP_TRACKER_HEIGHT “tracker-height”
define CONFIG_GROUP_TRACKER_LL_CONFIG_FILE “ll-config-file”
define CONFIG_GROUP_TRACKER_LL_LIB_FILE “ll-lib-file”
define CONFIG_GROUP_TRACKER_ENABLE_BATCH_PROCESS “enable-batch-process”
define CONFIG_GPU_ID “gpu-id”
static gchar *
get_absolute_file_path(gchar *cfg_file_path, gchar *file_path)
{
gchar abs_cfg_path[PATH_MAX + 1];
gchar *abs_file_path;
gchar *delim;
if (file_path && file_path[0] == ‘/’)
{
return file_path;
}
if (!realpath(cfg_file_path, abs_cfg_path))
{
g_free(file_path);
return NULL;
}
// Return absolute path of config file if file_path is NULL.
if (!file_path)
{
abs_file_path = g_strdup(abs_cfg_path);
return abs_file_path;
}
delim = g_strrstr(abs_cfg_path, "/");
*(delim + 1) = '\0';
abs_file_path = g_strconcat(abs_cfg_path, file_path, NULL);
g_free(file_path);
return abs_file_path;
}
// 从配置文件中读取配置信息, 设置tracker的属性
static gboolean set_tracker_properties(GstElement *nvtracker)
{
gboolean ret = FALSE;
GError *error = NULL;
gchar **keys = NULL;
gchar **key = NULL;
GKeyFile *key_file = g_key_file_new();
if (!g_key_file_load_from_file(key_file, TRACKER_CONFIG_FILE, G_KEY_FILE_NONE,
&error))
{
g_printerr("Failed to load config file: %s\n", error->message);
return FALSE;
}
keys = g_key_file_get_keys(key_file, CONFIG_GROUP_TRACKER, NULL, &error);
CHECK_ERROR(error);
for (key = keys; *key; key++)
{
if (!g_strcmp0(*key, CONFIG_GROUP_TRACKER_WIDTH))
{
gint width =
g_key_file_get_integer(key_file, CONFIG_GROUP_TRACKER,
CONFIG_GROUP_TRACKER_WIDTH, &error);
CHECK_ERROR(error);
g_object_set(G_OBJECT(nvtracker), "tracker-width", width, NULL);
}
else if (!g_strcmp0(*key, CONFIG_GROUP_TRACKER_HEIGHT))
{
gint height =
g_key_file_get_integer(key_file, CONFIG_GROUP_TRACKER,
CONFIG_GROUP_TRACKER_HEIGHT, &error);
CHECK_ERROR(error);
g_object_set(G_OBJECT(nvtracker), "tracker-height", height, NULL);
}
else if (!g_strcmp0(*key, CONFIG_GPU_ID))
{
guint gpu_id =
g_key_file_get_integer(key_file, CONFIG_GROUP_TRACKER,
CONFIG_GPU_ID, &error);
CHECK_ERROR(error);
g_object_set(G_OBJECT(nvtracker), "gpu_id", gpu_id, NULL);
}
else if (!g_strcmp0(*key, CONFIG_GROUP_TRACKER_LL_CONFIG_FILE))
{
char *ll_config_file = get_absolute_file_path(TRACKER_CONFIG_FILE,
g_key_file_get_string(key_file,
CONFIG_GROUP_TRACKER,
CONFIG_GROUP_TRACKER_LL_CONFIG_FILE, &error));
CHECK_ERROR(error);
g_object_set(G_OBJECT(nvtracker), "ll-config-file", ll_config_file, NULL);
}
else if (!g_strcmp0(*key, CONFIG_GROUP_TRACKER_LL_LIB_FILE))
{
char *ll_lib_file = get_absolute_file_path(TRACKER_CONFIG_FILE,
g_key_file_get_string(key_file,
CONFIG_GROUP_TRACKER,
CONFIG_GROUP_TRACKER_LL_LIB_FILE, &error));
CHECK_ERROR(error);
g_object_set(G_OBJECT(nvtracker), "ll-lib-file", ll_lib_file, NULL);
}
else if (!g_strcmp0(*key, CONFIG_GROUP_TRACKER_ENABLE_BATCH_PROCESS))
{
gboolean enable_batch_process =
g_key_file_get_integer(key_file, CONFIG_GROUP_TRACKER,
CONFIG_GROUP_TRACKER_ENABLE_BATCH_PROCESS, &error);
CHECK_ERROR(error);
g_object_set(G_OBJECT(nvtracker), "enable_batch_process",
enable_batch_process, NULL);
}
else
{
g_printerr("Unknown key '%s' for group [%s]", *key,
CONFIG_GROUP_TRACKER);
}
}
ret = TRUE;
done:
if (error)
{
g_error_free(error);
}
if (keys)
{
g_strfreev(keys);
}
if (!ret)
{
g_printerr(“%s failed”, func);
}
return ret;
}
static void decodebin_child_added(GstChildProxy *child_proxy, GObject *object, gchar *name, gpointer user_data)
{
g_print(“Decodebin child added: %s\n”, name);
if (strstr(name, "decodebin") != NULL)
{
g_signal_connect(object, "child-added", G_CALLBACK(decodebin_child_added), user_data);
}
}
static gboolean add_sources (gpointer data);
//函数中的GstPad * pad ==decoder_src_pad
static void
cb_newpad (GstElement * decodebin, GstPad * pad, gpointer data)
{
//查询 pad 的能力(capabilities)
GstCaps *caps = gst_pad_query_caps (pad, NULL);
//获取能力的结构体。
const GstStructure *str = gst_caps_get_structure (caps, 0);
//获取结构体的名字,通常表示 pad 的媒体类型,如 “video”, “audio” 等。
const gchar name = gst_structure_get_name (str);
g_print (“decodebin new pad %s\n”, name);
//检查是否是视频 pad。如果是,执行以下操作:
if (!strncmp (name, “video”, 5)) {
gint source_id = ((gint *) data);
gchar pad_name[16] = { 0 };
GstPad *sinkpad = NULL;
g_snprintf (pad_name, 15, “sink_%u”, source_id);
//streammux sinkpad
sinkpad = gst_element_get_request_pad (streammux, pad_name);
if (gst_pad_link (pad, sinkpad) != GST_PAD_LINK_OK) {
g_print (“Failed to link decodebin to pipeline\n”);
} else {
g_print (“Decodebin linked to pipeline\n”);
}
gst_object_unref (sinkpad);
}
}
//create new source_bin and linked src-sink by cb_newpad
static GstElement *
create_uridecode_bin (guint index, gchar * filename)
{
GstElement *bin = NULL;
gchar bin_name[16] = { };
g_print (“creating uridecodebin for [%s]\n”, filename);
g_source_id_list[index] = index;
g_snprintf (bin_name, 15, “source-bin-%02d”, index);
// 创建 uridecodebin 元素 bin
bin = gst_element_factory_make (“uridecodebin”, bin_name);
// 设置 bin 的 URI 属性为 filename
g_object_set (G_OBJECT (bin), “uri”, filename, NULL);
// 连接回调函数到 pad-added 信号g_signal_connect(gpointer object, const gchar *detailed_signal, GCallback c_handler, gpointer data);
g_signal_connect (G_OBJECT (bin), “pad-added”,
G_CALLBACK (cb_newpad), &g_source_id_list[index]);
// 连接回调函数到 child-added 信号
g_signal_connect (G_OBJECT (bin), “child-added”,
G_CALLBACK (decodebin_child_added), &g_source_id_list[index]);
g_source_enabled[index] = TRUE;
return bin;
}
static void
stop_release_source (gint source_id)
{
GstStateChangeReturn state_return;
gchar pad_name[16];
GstPad *sinkpad = NULL;
state_return =
gst_element_set_state (g_source_bin_list[source_id], GST_STATE_NULL);
switch (state_return) {
case GST_STATE_CHANGE_SUCCESS:
g_print (“STATE CHANGE SUCCESS\n\n”);
g_snprintf (pad_name, 15, “sink_%u”, source_id);
sinkpad = gst_element_get_static_pad (streammux, pad_name);
gst_pad_send_event (sinkpad, gst_event_new_eos ());
gst_pad_send_event (sinkpad, gst_event_new_flush_stop (FALSE));
gst_element_release_request_pad (streammux, sinkpad);
g_print (“STATE CHANGE SUCCESS %p\n\n”, sinkpad);
gst_object_unref (sinkpad);
gst_bin_remove (GST_BIN (pipeline), g_source_bin_list[source_id]);
source_id–;
g_num_sources–;
break;
case GST_STATE_CHANGE_FAILURE:
g_print (“STATE CHANGE FAILURE\n\n”);
break;
case GST_STATE_CHANGE_ASYNC:
g_print (“STATE CHANGE ASYNC\n\n”);
g_snprintf (pad_name, 15, “sink_%u”, source_id);
sinkpad = gst_element_get_static_pad (streammux, pad_name);
gst_pad_send_event (sinkpad, gst_event_new_eos ());
gst_pad_send_event (sinkpad, gst_event_new_flush_stop (FALSE));
gst_element_release_request_pad (streammux, sinkpad);
g_print (“STATE CHANGE ASYNC %p\n\n”, sinkpad);
gst_object_unref (sinkpad);
gst_bin_remove (GST_BIN (pipeline), g_source_bin_list[source_id]);
source_id–;
g_num_sources–;
break;
case GST_STATE_CHANGE_NO_PREROLL:
g_print (“STATE CHANGE NO PREROLL\n\n”);
break;
default:
break;
}
}
static gboolean
delete_sources (gpointer data)
{
gint source_id;
g_mutex_lock (&eos_lock);
for (source_id = 0; source_id < MAX_NUM_SOURCES; source_id++) {
if (g_eos_list[source_id] == TRUE && g_source_enabled[source_id] == TRUE) {
g_source_enabled[source_id] = FALSE;
stop_release_source (source_id);
}
}
g_mutex_unlock (&eos_lock);
if (g_num_sources == 0) {
if (g_run_forever==FALSE){
g_main_loop_quit (loop);
g_print (“All sources Stopped quitting\n”);
}
else {
g_timeout_add_seconds (15, add_sources, (gpointer) g_source_bin_list);
}
return FALSE;
}
do {
source_id = rand () % MAX_NUM_SOURCES;
} while (!g_source_enabled[source_id]);
g_source_enabled[source_id] = FALSE;
g_print (“Calling Stop %d \n”, source_id);
stop_release_source (source_id);
if (g_num_sources == 0) {
if (g_run_forever==FALSE){
g_main_loop_quit (loop);
g_print (“All sources Stopped quitting\n”);
}
else {
g_timeout_add_seconds (15, add_sources, (gpointer) g_source_bin_list);
}
return FALSE;
}
return TRUE;
}
/*
this function is that create a new source_bin that is already linked src-sink ,
and change the new source_bin to playing status.
*/
static gboolean
add_sources (gpointer data)
{
gint source_id = g_num_sources;
GstElement *source_bin;
GstStateChangeReturn state_return;
do {
/* Generating random source id between 0 - MAX_NUM_SOURCES - 1,
* which has not been enabled
*/
source_id = rand () % MAX_NUM_SOURCES;
} while (g_source_enabled[source_id]);
g_source_enabled[source_id] = TRUE;
g_print (“Calling Start %d \n”, source_id);
source_bin = create_uridecode_bin (source_id, uri);
if (!source_bin) {
g_printerr (“Failed to create source bin. Exiting.\n”);
return -1;
}
g_source_bin_list[source_id] = source_bin;
gst_bin_add (GST_BIN (pipeline), source_bin);
//change the new source_bin to playing status
state_return =
gst_element_set_state (g_source_bin_list[source_id], GST_STATE_PLAYING);
switch (state_return) {
case GST_STATE_CHANGE_SUCCESS:
g_print (“STATE CHANGE SUCCESS\n\n”);
source_id++;
break;
case GST_STATE_CHANGE_FAILURE:
g_print (“STATE CHANGE FAILURE\n\n”);
break;
case GST_STATE_CHANGE_ASYNC:
g_print (“STATE CHANGE ASYNC\n\n”);
state_return =
gst_element_get_state (g_source_bin_list[source_id], NULL, NULL,
GST_CLOCK_TIME_NONE);
source_id++;
break;
case GST_STATE_CHANGE_NO_PREROLL:
g_print (“STATE CHANGE NO PREROLL\n\n”);
break;
default:
break;
}
g_num_sources++;
if (g_num_sources == MAX_NUM_SOURCES) {
/* We have reached MAX_NUM_SOURCES to be added, no stop calling this function
* and enable calling delete sources
*/
g_timeout_add_seconds (5, delete_sources, (gpointer) g_source_bin_list);
return FALSE;
}
return TRUE;
}
// main函数
int main(int argc, char *argv)
{
GMainLoop *loop = NULL;
// 创建各种元素
guint num_sources=argc-1;
MAX_NUM_SOURCES=num_sources;
// GstElement *source_bin[num_sources];
GstElement *pipeline = NULL, *streammux = NULL, *pgie = NULL, *nvvidconv = NULL,
*nvosd = NULL, *nvtracker = NULL, *nvvidconv_postosd = NULL, *caps = NULL, *encoder = NULL, *rtppay = NULL, *sink = NULL;
// GstElement *display_sink = gst_element_factory_make("fpsdisplaysink","display_sink");
GstElement *queue1,*queue2,*queue3,*queue4,*queue5,*queue6,*queue7,*queue8,*queue9,*queue10,*queue11;
g_print("With tracker\n");
GstBus *bus = NULL;
guint bus_watch_id = 0;
GstPad *osd_sink_pad = NULL;
GstCaps *caps_filter = NULL;
guint bitrate = 5000000; // 比特率
gchar *codec = "H264"; // 设置编码格式
guint updsink_port_num = 8080; // 设置端口号
// guint updsink_port_num = 5400; // 设置端口号
guint rtsp_port_num = 8554; // 设置RTSP端口号
gchar *rtsp_path = "/ds-test"; // 设置RTSP路径
int current_device = -1;
cudaGetDevice(¤t_device);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, current_device);
/* Check input arguments */
if (argc < 3)
{
g_printerr("OR: %s <H264 filename>\n", argv[0]);
return -1;
}
/* Standard GStreamer initialization */
// 初始化GStreamer
gst_init(&argc, &argv);
loop = g_main_loop_new(NULL, FALSE);
// ==================== 创建元素 ====================
/* Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new(“ds-tracker-pipeline”);
/* Create nvstreammux instance to form batches from one or more sources. */
// 创建流复用器, 用于将多个流合并为一个流 , 以及将多帧画面打包batch
streammux = gst_element_factory_make (“nvstreammux”, “stream-muxer”);
if (!pipeline || !streammux) {
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
gst_bin_add (GST_BIN (pipeline), streammux);
g_source_bin_list = g_malloc0 (sizeof (GstElement *) * MAX_NUM_SOURCES);
uri = g_strdup (argv[1]);
for (i = 0; i < num_sources; i++) {
GstElement *source_bin = create_uridecode_bin (i, argv[i + 1]);
if (!source_bin) {
g_printerr ("Failed to create source bin. Exiting.\n");
return -1;
}
g_source_bin_list[i] = source_bin;
gst_bin_add (GST_BIN (pipeline), source_bin);
}
g_num_sources = num_sources;
pgie = gst_element_factory_make("nvinfer", "primary-nvinference-engine"); // 创建PGIE元素, 用于执行推理,TensorRT推理引擎(可用来检测和分类、分割)
nvtracker = gst_element_factory_make("nvtracker", "tracker"); // 创建tracker元素, 用于跟踪识别到的物体
nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter"); // 创建nvvidconv元素, 用于将NV12转换为RGBA
nvosd = gst_element_factory_make("nvdsosd", "nv-onscreendisplay"); // 创建nvosd元素, 用于在转换后的RGBA缓冲区上绘制
nvvidconv_postosd = gst_element_factory_make("nvvideoconvert", "convertor_postosd"); // 创建nvvidconv_postosd元素, 用于将NV12转换为RGBA
caps = gst_element_factory_make("capsfilter", "filter"); // 创建caps元素, 用于设置视频格式
GstElement *tiler = gst_element_factory_make("nvmultistreamtiler", "tiler"); // 创建tiler元素, 用于将多个视频流拼接为一个视频流
/* we set the tiler properties here */
guint tiler_rows, tiler_columns;
tiler_rows = (guint) sqrt (num_sources);
tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);
g_object_set (G_OBJECT (tiler), “rows”, tiler_rows, “columns”, tiler_columns,
“width”, TILED_OUTPUT_WIDTH, “height”, TILED_OUTPUT_HEIGHT, NULL);
/* Add queue elements between every two elements */
queue1 = gst_element_factory_make ("queue", "queue1");
queue2 = gst_element_factory_make ("queue", "queue2");
queue3 = gst_element_factory_make ("queue", "queue3");
queue4 = gst_element_factory_make ("queue", "queue4");
queue5 = gst_element_factory_make ("queue", "queue5");
queue6 = gst_element_factory_make ("queue", "queue6");
queue7 = gst_element_factory_make ("queue", "queue7");
queue8 = gst_element_factory_make ("queue", "queue8");
queue9 = gst_element_factory_make ("queue", "queue9");
queue10 = gst_element_factory_make ("queue", "queue10");
queue11 = gst_element_factory_make ("queue", "queue11");
// 创建编码器
if (g_strcmp0(codec, "H264") == 0)
{
// 创建H264编码器
encoder = gst_element_factory_make("nvv4l2h264enc", "encoder");
printf("Creating H264 Encoder\n");
}
else if (g_strcmp0(codec, "H265") == 0)
{
// 创建H265编码器
encoder = gst_element_factory_make("nvv4l2h265enc", "encoder");
printf("Creating H265 Encoder\n");
}
// 创建rtppay元素, 用于将编码后的数据打包为RTP包
if (g_strcmp0(codec, "H264") == 0)
{
rtppay = gst_element_factory_make("rtph264pay", "rtppay");
printf("Creating H264 rtppay\n");
}
else if (g_strcmp0(codec, "H265") == 0)
{
rtppay = gst_element_factory_make("rtph265pay", "rtppay");
printf("Creating H265 rtppay\n");
}
// 创建udpsink元素, 用于将RTP包发送到网络
sink = gst_element_factory_make("udpsink", "udpsink");
if ( !pgie || !nvtracker || !nvvidconv || !nvosd || !nvvidconv_postosd ||
!caps || !encoder || !rtppay || !sink)
{
g_printerr("One element could not be created. Exiting.\n");
return -1;
}
// ==================== 设置元素参数 ====================
// 1.设置streammux元素的参数
g_object_set(G_OBJECT(streammux), "batch-size", num_sources, NULL);
g_object_set(G_OBJECT(streammux), "width", MUXER_OUTPUT_WIDTH, "height",
MUXER_OUTPUT_HEIGHT,
"batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);
// 2.设置PGIE元素的参数
guint pgie_batch_size;
g_object_set(G_OBJECT(pgie), "config-file-path", PGIE_CONFIG_FILE, NULL);
/* Override the batch-size set in the config file with the number of sources. */
g_object_get (G_OBJECT (pgie), "batch-size", &pgie_batch_size, NULL);
if (pgie_batch_size != num_sources) {
g_printerr
("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
pgie_batch_size, num_sources);
g_object_set (G_OBJECT (pgie), "batch-size", num_sources, NULL);
}
// 3.设置tracker元素的参数
set_tracker_properties(nvtracker);
// 4.设置caps元素的视频格式
caps_filter = gst_caps_from_string("video/x-raw(memory:NVMM), format=I420");
g_object_set(G_OBJECT(caps), "caps", caps_filter, NULL);
// 释放caps_filter
gst_caps_unref(caps_filter);
// 5.设置编码器的比特率
g_object_set(G_OBJECT(encoder), "bitrate", bitrate, NULL);
// 设置编码器的preset-level
if (is_aarch64())
{
g_object_set(G_OBJECT(encoder), "preset-level", 1, NULL);
g_object_set(G_OBJECT(encoder), "insert-sps-pps", 1, NULL);
}
// 6.设置udpsink元素的参数
g_object_set(G_OBJECT(sink), "host", "224.224.255.255", NULL);
g_object_set(G_OBJECT(sink), "port", updsink_port_num, NULL);
g_object_set(G_OBJECT(sink), "async", FALSE, NULL);
g_object_set(G_OBJECT(sink), "sync", 1, NULL);
// 添加消息处理器
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
gst_object_unref(bus);
// ==================== 将元素添加到管道中 ====================
gst_bin_add_many(GST_BIN(pipeline),queue1, pgie, queue2,nvtracker, queue3,tiler,queue4,
nvvidconv, queue5, nvosd, queue6,nvvidconv_postosd, queue7,caps, queue8,encoder, queue9,rtppay, queue10,sink, NULL);
/* we link the elements together
- nvstreammux → nvinfer → nvtiler → nvvidconv → nvosd → video-renderer */
if(!gst_element_link_many(streammux,queue1, pgie, queue2,nvtracker, queue3,tiler,queue4,
nvvidconv, queue5, nvosd, queue6,nvvidconv_postosd, queue7,caps, queue8,encoder, queue9,rtppay, queue10,sink, NULL)){
g_printerr (“Elements could not be linked. Exiting.\n”);
return -1;
}
/* Lets add probe to get informed of the meta data generated, we add probe to
- the sink pad of the osd element, since by that time, the buffer would have
- had got all the metadata. */
// 添加探针,用于获取元数据
osd_sink_pad = gst_element_get_static_pad(nvtracker, "src"); // 获取nvosd元素的sink pad
if (!osd_sink_pad)
g_print("Unable to get sink pad\n");
else
// 参数:pad, 探针类型, 探针回调函数, 回调函数的参数, 回调函数的参数释放函数
gst_pad_add_probe(osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, osd_sink_pad_buffer_probe, NULL, NULL); // 添加探针
g_timeout_add(5000, perf_print_callback, &g_perf_data); // 添加定时器,用于打印性能数据
gst_object_unref(osd_sink_pad);
// ==================== 创建rtsp服务器, 用于将视频流发布到网络 ====================
GstRTSPServer *server;
GstRTSPMountPoints *mounts;
GstRTSPMediaFactory *factory;
server = gst_rtsp_server_new();
g_object_set(G_OBJECT(server), "service", g_strdup_printf("%d", rtsp_port_num), NULL);
gst_rtsp_server_attach(server, NULL);
mounts = gst_rtsp_server_get_mount_points(server);
factory = gst_rtsp_media_factory_new();
gst_rtsp_media_factory_set_launch(factory, g_strdup_printf("( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )", updsink_port_num, codec));
gst_rtsp_media_factory_set_shared(factory, TRUE);
gst_rtsp_mount_points_add_factory(mounts, rtsp_path, factory);
g_object_unref(mounts);
printf("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d%s ***\n\n", rtsp_port_num, rtsp_path);
// ==================== 启动管道 ====================
/* Set the pipeline to "playing" state */
g_print("Using file: %s\n", argv[1]);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
/* Iterate */
g_print("Running...\n");
g_timeout_add_seconds (5, add_sources, (gpointer) g_source_bin_list);
g_main_loop_run(loop);
/* Out of the main loop, clean up nicely */
g_print("Returned, stopping playback\n");
gst_element_set_state(pipeline, GST_STATE_NULL); // 设置管道状态为NULL
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(pipeline));
g_source_remove(bus_watch_id);
g_main_loop_unref(loop);
g_free (g_source_bin_list);
return 0;
}
Please use the command gst-inspect-1.0 nvv4l2decoder
to check the decoding environment first.
There is no update from you for a period, assuming this is not an issue anymore.
Hence we are closing this topic. If need further support, please open a new one.
Thanks
It seems that your platform supports hardware decoding, and it may be due to a problem with the source video that caused the decoding problem. Can you attach your video to us?
This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.