Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) Jetson NX
**• DeepStream Version 6.0
• JetPack Version (valid for Jetson only) 4.6
• TensorRT Version8.0
• NVIDIA GPU Driver Version (valid for GPU only)
**• Issue Type( questions, new requirements, bugs) questions
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
my question:
1、I refer to deepstream-image-meta-test.c file, prepared a C + + version:
main.cpp:
#include <gst/gst.h>
#include <glib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <cuda_runtime_api.h>
#include "gstnvdsmeta.h"
#ifndef PLATFORM_TEGRA
#include "gst-nvmessage.h"
#endif
#include "nvbufsurface.h"
#include "nvds_obj_encode.h"
// #include <opencv2/core/core_c.h>
// #include <opencv2/highgui/highgui_c.h>
// #include <opencv2/imgproc/imgproc_c.h>
// #include "utils.hpp"
// #include<opencv2/opencv.hpp>
#define MAX_DISPLAY_LEN 64
#define PGIE_CLASS_ID_VEHICLE 1
#define PGIE_CLASS_ID_PERSON 0
#define TRACKER_CONFIG_FILE "dstest2_tracker_config.txt"
#define MAX_TRACKING_ID_LEN 16
/* By default, OSD process-mode is set to CPU_MODE. To change mode, set as:
* 1: GPU mode (for Tesla only)
* 2: HW mode (For Jetson only)
* rtsp://admin:abc12345@192.168.0.222:554/h264/ch01/main/av_streamm
*/
#define OSD_PROCESS_MODE 0
/* By default, OSD will not display text. To display text, change this to 1 */
#define OSD_DISPLAY_TEXT 1
/* The muxer output resolution must be set if the input streams will be of
* different resolution. The muxer will scale all the input frames to this
* resolution. */
#define MUXER_OUTPUT_WIDTH 1920
#define MUXER_OUTPUT_HEIGHT 1080
/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
* based on the fastest source's framerate. */
#define MUXER_BATCH_TIMEOUT_USEC 40000
#define TILED_OUTPUT_WIDTH 1280
#define TILED_OUTPUT_HEIGHT 720
/* NVIDIA Decoder source pad memory feature. This feature signifies that source
* pads having this capability will push GstBuffers containing cuda buffers. */
#define GST_CAPS_FEATURES_NVMM "memory:NVMM"
// using namespace cv;
gchar pgie_classes_str[5][32] = { "Person", "Car", "Plt",
"Face", "Mask"
};
#define FPS_PRINT_INTERVAL 300
//static struct timeval start_time = { };
//static guint probe_counter = 0;
/* tiler_sink_pad_buffer_probe will extract metadata received on OSD sink pad
* and update params for drawing rectangle, object information etc. */
static GstPadProbeReturn pgie_src_pad_buffer_probe(GstPad* pad, GstPadProbeInfo* info, gpointer ctx)
{
GstBuffer *buf = (GstBuffer *) info->data;
GstMapInfo inmap = GST_MAP_INFO_INIT;
if (!gst_buffer_map (buf, &inmap, GST_MAP_READ))
{
GST_ERROR ("input buffer mapinfo failed");
return GST_FLOW_ERROR;
}
NvBufSurface *ip_surf = (NvBufSurface *) inmap.data;
gst_buffer_unmap (buf, &inmap);
NvDsObjectMeta *obj_meta = NULL;
guint vehicle_count = 0;
guint person_count = 0;
NvDsMetaList *l_frame = NULL;
NvDsMetaList *l_obj = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;l_frame = l_frame->next)
{
NvDsFrameMeta* frame_meta = (NvDsFrameMeta*)(l_frame->data);
//int offset = 0;
guint num_rects = 0;
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;l_obj = l_obj->next)
{
obj_meta = (NvDsObjectMeta*)(l_obj->data);
NvOSD_RectParams rect = (NvOSD_RectParams)(obj_meta->rect_params);
g_print("class_id = %d [%f, %f, %f, %f, %f]\n", obj_meta->class_id, rect.top, rect.left, rect.width, rect.height, obj_meta->confidence);
if(obj_meta->class_id == 3 || obj_meta->class_id == 4)
{
num_rects+= 1;
}
if (obj_meta->class_id == 3 || obj_meta->class_id == 4)
{
NvDsObjEncUsrArgs userData = { 0 };
/* To be set by user */
userData.saveImg = 1;
userData.attachUsrMeta = 1;
/* Set if Image scaling Required */
userData.scaleImg = FALSE;
userData.scaledWidth = 0;
userData.scaledHeight = 0;
/* Preset */
userData.objNum = num_rects;
/* Quality */
userData.quality = 80;
/*Main Function Call */
// nvds_obj_enc_process (ctx, &userData, ip_surf, obj_meta, frame_meta); // c++ error
nvds_obj_enc_process ((NvDsObjEncCtxHandle)ctx, &userData, ip_surf, obj_meta, frame_meta);
}
}
}
// nvds_obj_enc_finish (ctx);
nvds_obj_enc_finish ((NvDsObjEncCtxHandle) ctx);
return GST_PAD_PROBE_OK;
}
static GstPadProbeReturn osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer u_data)
{
GstBuffer *buf = (GstBuffer *) info->data;
guint num_rects = 0;
NvDsObjectMeta *obj_meta = NULL;
guint vehicle_count = 0;
guint person_count = 0;
NvDsMetaList *l_frame = NULL;
NvDsMetaList *l_obj = NULL;
NvDsDisplayMeta *display_meta = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
int current_device = -1;
cudaGetDevice(¤t_device);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, current_device);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next)
{
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
int offset = 0;
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next)
{
obj_meta = (NvDsObjectMeta *) (l_obj->data);
char fileNameString[FILE_NAME_SIZE];
const char *osd_string = "OSD";
int obj_res_width = (int) obj_meta->rect_params.width;
int obj_res_height = (int) obj_meta->rect_params.height;
if(prop.integrated)
{
obj_res_width = GST_ROUND_DOWN_2(obj_res_width);
obj_res_height = GST_ROUND_DOWN_2(obj_res_height);
}
snprintf (fileNameString, FILE_NAME_SIZE, "%s_%d_%d_%d_%s_%dx%d.jpg",
osd_string, 0, frame_meta->source_id, num_rects,
obj_meta->obj_label, obj_res_width, obj_res_height);
if (obj_meta->class_id == 3)
{
NvDsUserMetaList *usrMetaList = obj_meta->obj_user_meta_list;
FILE *file;
while (usrMetaList != NULL)
{
NvDsUserMeta *usrMetaData = (NvDsUserMeta *) usrMetaList->data;
if (usrMetaData->base_meta.meta_type == NVDS_CROP_IMAGE_META)
{
NvDsObjEncOutParams *enc_jpeg_image = (NvDsObjEncOutParams *) usrMetaData->user_meta_data;
/* Write to File */
file = fopen (fileNameString, "wb");
fwrite (enc_jpeg_image->outBuffer, sizeof (uint8_t), enc_jpeg_image->outLen, file);
fclose (file);
usrMetaList = NULL;
}
else
{
usrMetaList = usrMetaList->next;
}
}
}
}
}
return GST_PAD_PROBE_OK;
}
static gboolean bus_call(GstBus* bus, GstMessage* msg, gpointer data)
{
GMainLoop* loop = (GMainLoop*)data;
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_EOS:
g_print("End of stream\n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_WARNING:
{
gchar* debug;
GError* error;
gst_message_parse_warning(msg, &error, &debug);
g_printerr("WARNING from element %s: %s\n",
GST_OBJECT_NAME(msg->src), error->message);
g_free(debug);
g_printerr("Warning: %s\n", error->message);
g_error_free(error);
break;
}
case GST_MESSAGE_ERROR:
{
gchar* debug;
GError* error;
gst_message_parse_error(msg, &error, &debug);
g_printerr("ERROR from element %s: %s\n",
GST_OBJECT_NAME(msg->src), error->message);
if (debug)
g_printerr("Error details: %s\n", debug);
g_free(debug);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
#ifndef PLATFORM_TEGRA
case GST_MESSAGE_ELEMENT:
{
if (gst_nvmessage_is_stream_eos(msg)) {
guint stream_id;
if (gst_nvmessage_parse_stream_eos(msg, &stream_id)) {
g_print("Got EOS from stream %d\n", stream_id);
}
}
break;
}
#endif
default:
break;
}
return TRUE;
}
static void cb_newpad(GstElement* decodebin, GstPad* decoder_src_pad, gpointer data)
{
g_print("In cb_newpad\n");
GstCaps* caps = gst_pad_get_current_caps(decoder_src_pad);
const GstStructure* str = gst_caps_get_structure(caps, 0);
const gchar* name = gst_structure_get_name(str);
GstElement* source_bin = (GstElement*)data;
GstCapsFeatures* features = gst_caps_get_features(caps, 0);
/* Need to check if the pad created by the decodebin is for video and not
* audio. */
if (!strncmp(name, "video", 5)) {
/* Link the decodebin pad only if decodebin has picked nvidia
* decoder plugin nvdec_*. We do this by checking if the pad caps contain
* NVMM memory features. */
if (gst_caps_features_contains(features, GST_CAPS_FEATURES_NVMM)) {
/* Get the source bin ghost pad */
GstPad* bin_ghost_pad = gst_element_get_static_pad(source_bin, "src");
if (!gst_ghost_pad_set_target(GST_GHOST_PAD(bin_ghost_pad),
decoder_src_pad)) {
g_printerr("Failed to link decoder src pad to source bin ghost pad\n");
}
gst_object_unref(bin_ghost_pad);
}
else {
g_printerr("Error: Decodebin did not pick nvidia decoder plugin.\n");
}
}
}
static void decodebin_child_added(GstChildProxy* child_proxy, GObject* object, gchar* name, gpointer user_data)
{
g_print("Decodebin child added: %s\n", name);
if (g_strrstr(name, "decodebin") == name) {
g_signal_connect(G_OBJECT(object), "child-added",
G_CALLBACK(decodebin_child_added), user_data);
}
}
static GstElement* create_source_bin(guint index, gchar* uri)
{
GstElement* bin = NULL, * uri_decode_bin = NULL;
gchar bin_name[16] = { };
g_snprintf(bin_name, 15, "source-bin-%02d", index);
/* Create a source GstBin to abstract this bin's content from the rest of the
* pipeline */
bin = gst_bin_new(bin_name);
/* Source element for reading from the uri.
* We will use decodebin and let it figure out the container format of the
* stream and the codec and plug the appropriate demux and decode plugins. */
uri_decode_bin = gst_element_factory_make("uridecodebin", "uri-decode-bin");
if (!bin || !uri_decode_bin) {
g_printerr("One element in source bin could not be created.\n");
return NULL;
}
/* We set the input uri to the source element */
g_object_set(G_OBJECT(uri_decode_bin), "uri", uri, NULL);
/* Connect to the "pad-added" signal of the decodebin which generates a
* callback once a new pad for raw data has beed created by the decodebin */
g_signal_connect(G_OBJECT(uri_decode_bin), "pad-added",
G_CALLBACK(cb_newpad), bin);
g_signal_connect(G_OBJECT(uri_decode_bin), "child-added",
G_CALLBACK(decodebin_child_added), bin);
gst_bin_add(GST_BIN(bin), uri_decode_bin);
/* We need to create a ghost pad for the source bin which will act as a proxy
* for the video decoder src pad. The ghost pad will not have a target right
* now. Once the decode bin creates the video decoder and generates the
* cb_newpad callback, we will set the ghost pad target to the video decoder
* src pad. */
if (!gst_element_add_pad(bin, gst_ghost_pad_new_no_target("src",
GST_PAD_SRC))) {
g_printerr("Failed to add ghost pad in source bin\n");
return NULL;
}
return bin;
}
#define CHECK_ERROR(error) \
if (error) { \
g_printerr ("Error while parsing config file: %s\n", error->message); \
goto done; \
}
#define CONFIG_GROUP_TRACKER "tracker"
#define CONFIG_GROUP_TRACKER_WIDTH "tracker-width"
#define CONFIG_GROUP_TRACKER_HEIGHT "tracker-height"
#define CONFIG_GROUP_TRACKER_LL_CONFIG_FILE "ll-config-file"
#define CONFIG_GROUP_TRACKER_LL_LIB_FILE "ll-lib-file"
#define CONFIG_GROUP_TRACKER_ENABLE_BATCH_PROCESS "enable-batch-process"
#define CONFIG_GPU_ID "gpu-id"
static gchar * get_absolute_file_path (gchar *cfg_file_path, gchar *file_path)
{
gchar abs_cfg_path[PATH_MAX + 1];
gchar *abs_file_path;
gchar *delim;
if (file_path && file_path[0] == '/') {
return file_path;
}
if (!realpath (cfg_file_path, abs_cfg_path)) {
g_free (file_path);
return NULL;
}
// Return absolute path of config file if file_path is NULL.
if (!file_path) {
abs_file_path = g_strdup (abs_cfg_path);
return abs_file_path;
}
delim = g_strrstr (abs_cfg_path, "/");
*(delim + 1) = '\0';
abs_file_path = g_strconcat (abs_cfg_path, file_path, NULL);
g_free (file_path);
return abs_file_path;
}
static gboolean
set_tracker_properties (GstElement *nvtracker)
{
gboolean ret = FALSE;
GError *error = NULL;
gchar **keys = NULL;
gchar **key = NULL;
GKeyFile *key_file = g_key_file_new ();
if (!g_key_file_load_from_file (key_file, TRACKER_CONFIG_FILE, G_KEY_FILE_NONE,
&error)) {
g_printerr ("Failed to load config file: %s\n", error->message);
return FALSE;
}
keys = g_key_file_get_keys (key_file, CONFIG_GROUP_TRACKER, NULL, &error);
CHECK_ERROR (error);
for (key = keys; *key; key++) {
if (!g_strcmp0 (*key, CONFIG_GROUP_TRACKER_WIDTH)) {
gint width =
g_key_file_get_integer (key_file, CONFIG_GROUP_TRACKER,
CONFIG_GROUP_TRACKER_WIDTH, &error);
CHECK_ERROR (error);
g_object_set (G_OBJECT (nvtracker), "tracker-width", width, NULL);
} else if (!g_strcmp0 (*key, CONFIG_GROUP_TRACKER_HEIGHT)) {
gint height =
g_key_file_get_integer (key_file, CONFIG_GROUP_TRACKER,
CONFIG_GROUP_TRACKER_HEIGHT, &error);
CHECK_ERROR (error);
g_object_set (G_OBJECT (nvtracker), "tracker-height", height, NULL);
} else if (!g_strcmp0 (*key, CONFIG_GPU_ID)) {
guint gpu_id =
g_key_file_get_integer (key_file, CONFIG_GROUP_TRACKER,
CONFIG_GPU_ID, &error);
CHECK_ERROR (error);
g_object_set (G_OBJECT (nvtracker), "gpu_id", gpu_id, NULL);
} else if (!g_strcmp0 (*key, CONFIG_GROUP_TRACKER_LL_CONFIG_FILE)) {
char* ll_config_file = get_absolute_file_path (TRACKER_CONFIG_FILE,
g_key_file_get_string (key_file,
CONFIG_GROUP_TRACKER,
CONFIG_GROUP_TRACKER_LL_CONFIG_FILE, &error));
CHECK_ERROR (error);
g_object_set (G_OBJECT (nvtracker), "ll-config-file", ll_config_file, NULL);
} else if (!g_strcmp0 (*key, CONFIG_GROUP_TRACKER_LL_LIB_FILE)) {
char* ll_lib_file = get_absolute_file_path (TRACKER_CONFIG_FILE,
g_key_file_get_string (key_file,
CONFIG_GROUP_TRACKER,
CONFIG_GROUP_TRACKER_LL_LIB_FILE, &error));
CHECK_ERROR (error);
g_object_set (G_OBJECT (nvtracker), "ll-lib-file", ll_lib_file, NULL);
} else if (!g_strcmp0 (*key, CONFIG_GROUP_TRACKER_ENABLE_BATCH_PROCESS)) {
gboolean enable_batch_process =
g_key_file_get_integer (key_file, CONFIG_GROUP_TRACKER,
CONFIG_GROUP_TRACKER_ENABLE_BATCH_PROCESS, &error);
CHECK_ERROR (error);
g_object_set (G_OBJECT (nvtracker), "enable_batch_process",
enable_batch_process, NULL);
} else {
g_printerr ("Unknown key '%s' for group [%s]", *key,
CONFIG_GROUP_TRACKER);
}
}
ret = TRUE;
done:
if (error) {
g_error_free (error);
}
if (keys) {
g_strfreev (keys);
}
if (!ret) {
g_printerr ("%s failed", __func__);
}
return ret;
}
int main(int argc, char* argv[])
{
GMainLoop* loop = NULL;
GstElement* pipeline = NULL, * streammux = NULL, * sink = NULL, * pgie = NULL, * nvtracker = NULL,
* queue1, * queue2, * queue3, * queue4, * queue5, * nvvidconv = NULL,
* nvosd = NULL, * tiler = NULL;
GstElement* transform = NULL;
GstBus* bus = NULL;
guint bus_watch_id;
GstPad* tiler_src_pad = NULL;
GstPad* pgie_src_pad = NULL;
GstPad* osd_sink_pad = NULL;
guint i, num_sources;
guint tiler_rows, tiler_columns;
guint pgie_batch_size;
int current_device = -1;
cudaGetDevice(¤t_device);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, current_device);
/* Check input arguments */
if (argc < 2) {
g_printerr("Usage: %s <uri1> [uri2] ... [uriN] \n", argv[0]);
return -1;
}
num_sources = argc - 1;
/* Standard GStreamer initialization */
gst_init(&argc, &argv);
loop = g_main_loop_new(NULL, FALSE);
/* Create gstreamer elements */
/* Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new("dstest3-pipeline");
/* Create nvstreammux instance to form batches from one or more sources. */
streammux = gst_element_factory_make("nvstreammux", "stream-muxer");
if (!pipeline || !streammux) {
g_printerr("One element could not be created. Exiting.\n");
return -1;
}
gst_bin_add(GST_BIN(pipeline), streammux);
for (i = 0; i < num_sources; i++) {
GstPad* sinkpad, * srcpad;
gchar pad_name[16] = { };
GstElement* source_bin = create_source_bin(i, argv[i + 1]);
if (!source_bin) {
g_printerr("Failed to create source bin. Exiting.\n");
return -1;
}
gst_bin_add(GST_BIN(pipeline), source_bin);
g_snprintf(pad_name, 15, "sink_%u", i);
sinkpad = gst_element_get_request_pad(streammux, pad_name);
if (!sinkpad) {
g_printerr("Streammux request sink pad failed. Exiting.\n");
return -1;
}
srcpad = gst_element_get_static_pad(source_bin, "src");
if (!srcpad) {
g_printerr("Failed to get src pad of source bin. Exiting.\n");
return -1;
}
if (gst_pad_link(srcpad, sinkpad) != GST_PAD_LINK_OK) {
g_printerr("Failed to link source bin to stream muxer. Exiting.\n");
return -1;
}
gst_object_unref(srcpad);
gst_object_unref(sinkpad);
}
/* Use nvinfer to infer on batched frame. */
pgie = gst_element_factory_make("nvinfer", "primary-nvinference-engine");
/* We need to have a tracker to track the identified objects */
nvtracker = gst_element_factory_make("nvtracker", "tracker");
/* Set necessary properties of the tracker element. */
if (!set_tracker_properties(nvtracker)) {
g_printerr("Failed to set tracker properties. Exiting.\n");
return -1;
}
/* Add queue elements between every two elements */
queue1 = gst_element_factory_make("queue", "queue1");
queue2 = gst_element_factory_make("queue", "queue2");
queue3 = gst_element_factory_make("queue", "queue3");
queue4 = gst_element_factory_make("queue", "queue4");
queue5 = gst_element_factory_make("queue", "queue5");
/* Use nvtiler to composite the batched frames into a 2D tiled array based
* on the source of the frames. */
tiler = gst_element_factory_make("nvmultistreamtiler", "nvtiler");
/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter");
/* Create OSD to draw on the converted RGBA buffer */
nvosd = gst_element_factory_make("nvdsosd", "nv-onscreendisplay");
/* Finally render the osd output */
if (prop.integrated) {
transform = gst_element_factory_make("nvegltransform", "nvegl-transform");
}
sink = gst_element_factory_make("nveglglessink", "nvvideo-renderer");
if (!pgie || !tiler || !nvvidconv || !nvosd || !sink) {
g_printerr("One element could not be created. Exiting.\n");
return -1;
}
if (!transform && prop.integrated) {
g_printerr("One tegra element could not be created. Exiting.\n");
return -1;
}
g_object_set(G_OBJECT(streammux), "batch-size", num_sources, NULL);
g_object_set(G_OBJECT(streammux), "live-source", 1, NULL);
g_object_set(G_OBJECT(streammux), "width", MUXER_OUTPUT_WIDTH, "height",
MUXER_OUTPUT_HEIGHT,
"batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);
/* Configure the nvinfer element using the nvinfer config file. */
g_object_set(G_OBJECT(pgie), "config-file-path", "cfgs/primary-inference-config.txt", NULL);
/* Override the batch-size set in the config file with the number of sources. */
g_object_get(G_OBJECT(pgie), "batch-size", &pgie_batch_size, NULL);
if (pgie_batch_size != num_sources) {
g_printerr
("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
pgie_batch_size, num_sources);
//g_object_set (G_OBJECT (pgie), "batch-size", num_sources, NULL);
g_object_set(G_OBJECT(pgie), "batch-size", 1, NULL);
}
tiler_rows = (guint)sqrt(num_sources);
tiler_columns = (guint)ceil(1.0 * num_sources / tiler_rows);
/* we set the tiler properties here */
g_object_set(G_OBJECT(tiler), "rows", tiler_rows, "columns", tiler_columns,
"width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);
g_object_set(G_OBJECT(nvosd), "process-mode", OSD_PROCESS_MODE,
"display-text", OSD_DISPLAY_TEXT, NULL);
g_object_set(G_OBJECT(sink), "qos", 0, NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
gst_object_unref(bus);
/* Set up the pipeline */
/* we add all elements into the pipeline */
if (prop.integrated) {
gst_bin_add_many(GST_BIN(pipeline), queue1, pgie, nvtracker, queue2, tiler, queue3,
nvvidconv, queue4, nvosd, queue5, transform, sink, NULL);
/* we link the elements together
* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */
if (!gst_element_link_many(streammux, queue1, pgie, nvtracker, queue2, tiler, queue3,
nvvidconv, queue4, nvosd, queue5, transform, sink, NULL)) {
g_printerr("Elements could not be linked. Exiting.\n");
return -1;
}
}
else {
gst_bin_add_many(GST_BIN(pipeline), queue1, pgie, nvtracker, queue2, tiler, queue3,
nvvidconv, queue4, nvosd, queue5, sink, NULL);
/* we link the elements together
* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */
if (!gst_element_link_many(streammux, queue1, pgie, nvtracker, queue2, tiler, queue3,
nvvidconv, queue4, nvosd, queue5, sink, NULL)) {
g_printerr("Elements could not be linked. Exiting.\n");
return -1;
}
}
/* Lets add probe to get informed of the meta data generated, we add probe to
* the sink pad of the osd element, since by that time, the buffer would have
* had got all the metadata. */
pgie_src_pad = gst_element_get_static_pad(pgie, "src");
/*Creat Context for Object Encoding */
NvDsObjEncCtxHandle obj_ctx_handle = nvds_obj_enc_create_context ();
if (!obj_ctx_handle)
{
g_print ("Unable to create context\n");
return -1;
}
if (!pgie_src_pad)
g_print("Unable to get src pad\n");
else
gst_pad_add_probe(pgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
pgie_src_pad_buffer_probe, (gpointer)obj_ctx_handle, NULL);
gst_object_unref(pgie_src_pad);
osd_sink_pad = gst_element_get_static_pad (nvosd, "sink");
if (!osd_sink_pad)
g_print ("Unable to get sink pad\n");
else
gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_sink_pad_buffer_probe, (gpointer)obj_ctx_handle, NULL);
gst_object_unref (osd_sink_pad);
/* Set the pipeline to "playing" state */
g_print("Now playing:");
for (i = 0; i < num_sources; i++) {
g_print(" %s,", argv[i + 1]);
}
g_print("\n");
gst_element_set_state(pipeline, GST_STATE_PLAYING);
/* Wait till pipeline encounters an error or EOS */
g_print("Running...\n");
g_main_loop_run(loop);
/* Out of the main loop, clean up nicely */
g_print("Returned, stopping playback\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(pipeline));
g_source_remove(bus_watch_id);
g_main_loop_unref(loop);
return 0;
}
2、nvds_obj_enc_process (ctx, &userData, ip_surf, obj_meta, frame_meta) and nvds_obj_enc_finish (ctx) compilation error:
g++ -c -o main.o -DPLATFORM_TEGRA -I/opt/nvidia/deepstream/deepstream/sources/includes -I /usr/local/cuda-10.2/include -pthread -I/usr/include/gstreamer-1.0 -I/usr/include/glib-2.0 -I/usr/lib/aarch64-linux-gnu/glib-2.0/include -I/usr/local/include/opencv4 -g -w -O -Wno-deprecated main.cpp
main.cpp: In function ‘GstPadProbeReturn pgie_src_pad_buffer_probe(GstPad*, GstPadProbeInfo*, gpointer)’:
main.cpp:84:16: error: cannot convert ‘GstFlowReturn’ to ‘GstPadProbeReturn’ in return
return GST_FLOW_ERROR;
^~~~~~~~~~~~~~
Makefile:57: recipe for target 'main.o' failed
make: *** [main.o] Error 1
nvidia@nx:~/workspace/xjc$ make
g++ -c -o main.o -DPLATFORM_TEGRA -I/opt/nvidia/deepstream/deepstream/sources/includes -I /usr/local/cuda-10.2/include -pthread -I/usr/include/gstreamer-1.0 -I/usr/include/glib-2.0 -I/usr/lib/aarch64-linux-gnu/glib-2.0/include -I/usr/local/include/opencv4 -g -w -O -Wno-deprecated main.cpp
main.cpp: In function ‘GstPadProbeReturn pgie_src_pad_buffer_probe(GstPad*, GstPadProbeInfo*, gpointer)’:
main.cpp:84:16: error: cannot convert ‘GstFlowReturn’ to ‘GstPadProbeReturn’ in return
return GST_FLOW_ERROR;
^~~~~~~~~~~~~~
main.cpp:128:84: error: invalid conversion from ‘gpointer {aka void*}’ to ‘NvDsObjEncCtxHandle {aka _NvDsObjEncCtx*}’ [-fpermissive]
nvds_obj_enc_process (ctx, &userData, ip_surf, obj_meta, frame_meta); // c++ error
^
In file included from main.cpp:16:0:
/opt/nvidia/deepstream/deepstream/sources/includes/nvds_obj_encode.h:88:6: note: initializing argument 1 of ‘bool nvds_obj_enc_process(NvDsObjEncCtxHandle, NvDsObjEncUsrArgs*, NvBufSurface*, NvDsObjectMeta*, NvDsFrameMeta*)’
bool nvds_obj_enc_process (NvDsObjEncCtxHandle, NvDsObjEncUsrArgs *,
^~~~~~~~~~~~~~~~~~~~
main.cpp:133:29: error: invalid conversion from ‘gpointer {aka void*}’ to ‘NvDsObjEncCtxHandle {aka _NvDsObjEncCtx*}’ [-fpermissive]
nvds_obj_enc_finish (ctx);
^
In file included from main.cpp:16:0:
/opt/nvidia/deepstream/deepstream/sources/includes/nvds_obj_encode.h:92:6: note: initializing argument 1 of ‘void nvds_obj_enc_finish(NvDsObjEncCtxHandle)’
void nvds_obj_enc_finish (NvDsObjEncCtxHandle);
^~~~~~~~~~~~~~~~~~~
Makefile:57: recipe for target 'main.o' failed
make: *** [main.o] Error 1
Then i change it to nvds_obj_enc_process ((NvDsObjEncCtxHandle)ctx, &userData, ip_surf, obj_meta, frame_meta); nvds_obj_enc_finish ((NvDsObjEncCtxHandle) ctx),compiled successfully
3、Final question:img file saved twice:
Please help me confirm what’s wrong? Why img files saved twice