• Hardware Platform (Jetson / GPU)
jetson
• DeepStream Version
7.0
• JetPack Version (valid for Jetson only)
6.0
• TensorRT Version
8.6.2.3
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs)
How can I lower the FPS to reduce GPU usage in deepstream analytics test
are you referring to deepstream-nvdsanalytics-test? Here are some solution to lower fps.
- you can set nvv4l2decoder 's skip-frames to decode_key. then the decoder will only output I frame.
- you can add videorate plugin to control fps. please refer to the following cmd.
gst-launch-1.0 -v uridecodebin3 uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! nvvideoconvert ! videorate ! 'video/x-raw,framerate=20/1' ! fpsdisplaysink text-overlay=0 video-sink=fakesink
Thank you for the response. If I run it with deepstream_app -c deepstream_app_config.txt
, how can I implement the video rate? I’m using an RTSP source.
deepstrream-app is opensource. you can modify the code to customize.
- if you want to control the video rate by nvv4l2decoder, you can iterate the decodebin which includes nvv4l2decoder by gst_bin_iterate_elements, then set the skip-frames property. decodebin is created in create_rtsp_src_bin of \opt\nvidia\deepstream\deepstream\sources\apps\apps-common\src\deepstream_source_bin.c.
- if you want use videorate, you can add “nvvideoconvert → videorate → casfilter” after decodebin. please refer to my last comment.
Thank you for responding. Can I implement video/x-raw, framerate=8/1
in the DeepStream app? Can you show me which part I need to change?
Sorry for the late reply! please refer to this pipeline.
gst-launch-1.0 -v filesrc location=/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.mp4 ! qtdemux ! h264parse ! nvv4l2decoder ! videorate ! capsfilter caps="video/x-raw(memory:NVMM),framerate=1/1, format=NV12" ! fpsdisplaysink text-overlay=0 video-sink=fakesink sync=1
please refer to my last comment. you can modify create_rtsp_src_bin of \opt\nvidia\deepstream\deepstream\sources\apps\apps-common\src\deepstream_source_bin.c to add framerate.
Thank you for responding. May I know how to adjust NvDsSourceConfig
? Where is the path to that file?
NvDsSourceConfig corresponds to source configurations. for example, [source0] in \opt\nvidia\deepstream\deepstream\samples\configs\deepstream-app\source4_1080p_dec_infer-resnet_tracker_sgie_tiled_display_int8.txt
here are the changes i made
static gboolean
create_rtsp_src_bin (NvDsSourceConfig * config, NvDsSrcBin * bin)
{
NvDsSRContext *ctx = NULL;
gboolean ret = FALSE;
gchar elem_name[50];
bin->config = config;
GstCaps *caps = NULL;
GstCapsFeatures *feature = NULL;
bin->latency = config->latency;
bin->udp_buffer_size = config->udp_buffer_size;
bin->rtsp_reconnect_interval_sec = config->rtsp_reconnect_interval_sec;
bin->rtsp_reconnect_attempts = config->rtsp_reconnect_attempts;
bin->num_rtsp_reconnects = 0;
g_snprintf (elem_name, sizeof (elem_name), "src_elem%d", bin->bin_id);
bin->src_elem = gst_element_factory_make ("rtspsrc", elem_name);
if (!bin->src_elem) {
NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
goto done;
}
g_signal_connect (G_OBJECT (bin->src_elem), "select-stream",
G_CALLBACK (cb_rtspsrc_select_stream), bin);
if (config->udp_buffer_size) {
g_object_set (G_OBJECT (bin->src_elem), "udp-buffer-size",
config->udp_buffer_size, NULL);
}
g_object_set (G_OBJECT (bin->src_elem), "location", config->uri, NULL);
g_object_set (G_OBJECT (bin->src_elem), "latency", config->latency, NULL);
g_object_set (G_OBJECT (bin->src_elem), "drop-on-latency", TRUE, NULL);
configure_source_for_ntp_sync (bin->src_elem);
// 0x4 for TCP and 0x7 for All (UDP/UDP-MCAST/TCP)
if ((config->select_rtp_protocol == GST_RTSP_LOWER_TRANS_TCP)
|| (config->select_rtp_protocol == (GST_RTSP_LOWER_TRANS_UDP |
GST_RTSP_LOWER_TRANS_UDP_MCAST | GST_RTSP_LOWER_TRANS_TCP))) {
g_object_set (G_OBJECT (bin->src_elem), "protocols",
config->select_rtp_protocol, NULL);
GST_DEBUG_OBJECT (bin->src_elem,
"RTP Protocol=0x%x (0x4=TCP and 0x7=UDP,TCP,UDPMCAST)----\n",
config->select_rtp_protocol);
}
g_signal_connect (G_OBJECT (bin->src_elem), "pad-added",
G_CALLBACK (cb_newpad3), bin);
g_snprintf (elem_name, sizeof (elem_name), "tee_rtsp_elem%d", bin->bin_id);
bin->tee_rtsp_pre_decode = gst_element_factory_make ("tee", elem_name);
if (!bin->tee_rtsp_pre_decode) {
NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
goto done;
}
g_snprintf (elem_name, sizeof (elem_name), "tee_rtsp_post_decode_elem%d",
bin->bin_id);
bin->tee_rtsp_post_decode = gst_element_factory_make ("tee", elem_name);
if (!bin->tee_rtsp_post_decode) {
NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
goto done;
}
// Add videorate element to control framerate
g_snprintf (elem_name, sizeof (elem_name), "videorate_elem%d", bin->bin_id);
bin->videorate = gst_element_factory_make ("videorate", elem_name);
if (!bin->videorate) {
NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
goto done;
}
// Add capsfilter for videorate
g_snprintf (elem_name, sizeof (elem_name), "videorate_cap_filter%d", bin->bin_id);
bin->videorate_cap_filter = gst_element_factory_make ("capsfilter", elem_name);
if (!bin->videorate_cap_filter) {
NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
goto done;
}
// Set framerate caps
if (config->framerate > 0) {
GstCaps *framerate_caps = gst_caps_new_simple ("video/x-raw",
"framerate", GST_TYPE_FRACTION, 8, 1,
NULL);
g_object_set (G_OBJECT (bin->videorate_cap_filter), "caps", framerate_caps, NULL);
gst_caps_unref (framerate_caps);
}
if (config->smart_record) {
NvDsSRInitParams params = { 0 };
params.containerType = (NvDsSRContainerType) config->smart_rec_container;
if (config->file_prefix)
params.fileNamePrefix =
g_strdup_printf ("%s_%d", config->file_prefix, config->camera_id);
params.dirpath = config->dir_path;
params.cacheSize = config->smart_rec_cache_size;
params.defaultDuration = config->smart_rec_def_duration;
params.callback = smart_record_callback;
if (NvDsSRCreate (&ctx, ¶ms) != NVDSSR_STATUS_OK) {
NVGSTDS_ERR_MSG_V ("Failed to create smart record bin");
g_free (params.fileNamePrefix);
goto done;
}
g_free (params.fileNamePrefix);
gst_bin_add (GST_BIN (bin->bin), ctx->recordbin);
bin->recordCtx = (gpointer) ctx;
}
g_snprintf (elem_name, sizeof (elem_name), "dec_que%d", bin->bin_id);
bin->dec_que = gst_element_factory_make ("queue", elem_name);
if (!bin->dec_que) {
NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
goto done;
}
if (bin->rtsp_reconnect_interval_sec > 0) {
NVGSTDS_ELEM_ADD_PROBE (bin->rtspsrc_monitor_probe, bin->dec_que,
"sink", rtspsrc_monitor_probe_func, GST_PAD_PROBE_TYPE_BUFFER, bin);
install_mux_eosmonitor_probe = TRUE;
} else {
NVGSTDS_ELEM_ADD_PROBE (bin->rtspsrc_monitor_probe, bin->dec_que,
"sink", rtspsrc_monitor_probe_func,
GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM, bin);
}
g_snprintf (elem_name, sizeof (elem_name), "decodebin_elem%d", bin->bin_id);
bin->decodebin = gst_element_factory_make ("decodebin", elem_name);
if (!bin->decodebin) {
NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
goto done;
}
g_signal_connect (G_OBJECT (bin->decodebin), "pad-added",
G_CALLBACK (cb_newpad2), bin);
g_signal_connect (G_OBJECT (bin->decodebin), "child-added",
G_CALLBACK (decodebin_child_added), bin);
g_snprintf (elem_name, sizeof (elem_name), "src_que%d", bin->bin_id);
bin->cap_filter = gst_element_factory_make (NVDS_ELEM_QUEUE, elem_name);
if (!bin->cap_filter) {
NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
goto done;
}
g_mutex_init (&bin->bin_lock);
if (config->dewarper_config.enable) {
if (!create_dewarper_bin (&config->dewarper_config, &bin->dewarper_bin)) {
g_print ("Failed to create dewarper bin \n");
goto done;
}
gst_bin_add_many (GST_BIN (bin->bin), bin->src_elem,
bin->tee_rtsp_pre_decode,
bin->dec_que,
bin->decodebin,
bin->videorate,
bin->videorate_cap_filter,
bin->cap_filter,
bin->tee_rtsp_post_decode, bin->dewarper_bin.bin, NULL);
} else {
g_snprintf (elem_name, sizeof (elem_name), "nvvidconv_elem%d", bin->bin_id);
bin->nvvidconv = gst_element_factory_make (NVDS_ELEM_VIDEO_CONV, elem_name);
if (!bin->nvvidconv) {
NVGSTDS_ERR_MSG_V ("Could not create element 'nvvidconv_elem'");
goto done;
}
g_object_set (G_OBJECT (bin->nvvidconv), "gpu-id", config->gpu_id,
"nvbuf-memory-type", config->nvbuf_memory_type, NULL);
if (config->video_format) {
caps = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, config->video_format, NULL);
} else {
caps = gst_caps_new_empty_simple ("video/x-raw");
}
feature = gst_caps_features_new ("memory:NVMM", NULL);
gst_caps_set_features (caps, 0, feature);
bin->cap_filter1 =
gst_element_factory_make (NVDS_ELEM_CAPS_FILTER,
"src_cap_filter_nvvidconv");
if (!bin->cap_filter1) {
NVGSTDS_ERR_MSG_V ("Could not create 'queue'");
goto done;
}
g_object_set (G_OBJECT (bin->cap_filter1), "caps", caps, NULL);
gst_caps_unref (caps);
gst_bin_add_many (GST_BIN (bin->bin), bin->src_elem,
bin->tee_rtsp_pre_decode,
bin->dec_que,
bin->decodebin,
bin->videorate,
bin->videorate_cap_filter,
bin->cap_filter,
bin->tee_rtsp_post_decode, bin->nvvidconv, bin->cap_filter1, NULL);
}
link_element_to_tee_src_pad (bin->tee_rtsp_pre_decode, bin->dec_que);
NVGSTDS_LINK_ELEMENT (bin->dec_que, bin->decodebin);
if (ctx)
link_element_to_tee_src_pad (bin->tee_rtsp_pre_decode, ctx->recordbin);
// Add videorate in the pipeline
NVGSTDS_LINK_ELEMENT (bin->cap_filter, bin->videorate);
NVGSTDS_LINK_ELEMENT (bin->videorate, bin->videorate_cap_filter);
NVGSTDS_LINK_ELEMENT (bin->videorate_cap_filter, bin->tee_rtsp_post_decode);
if (config->dewarper_config.enable) {
link_element_to_tee_src_pad (bin->tee_rtsp_post_decode,
bin->dewarper_bin.bin);
NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->dewarper_bin.bin, "src");
} else {
link_element_to_tee_src_pad (bin->tee_rtsp_post_decode, bin->nvvidconv);
NVGSTDS_LINK_ELEMENT (bin->nvvidconv, bin->cap_filter1);
NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->cap_filter1, "src");
}
ret = TRUE;
g_timeout_add (1000, watch_source_status, bin);
// Enable local start / stop events in addition to the one
// received from the server.
if (config->smart_record == 2) {
if (bin->config->smart_rec_interval)
g_timeout_add (bin->config->smart_rec_interval * 1000,
smart_record_event_generator, bin);
else
g_timeout_add (10000, smart_record_event_generator, bin);
}
GST_CAT_DEBUG (NVDS_APP,
"Decode bin created. Waiting for a new pad from decodebin to link");
done:
if (!ret) {
NVGSTDS_ERR_MSG_V ("%s failed", __func__);
}
return ret;
}
static gboolean
create_audiodecode_src_bin (NvDsSourceConfig * config, NvDsSrcBin * bin)
{
gboolean ret = FALSE;
guint const MAX_CAPS_LEN = 256;
gchar caps_audio_resampler[MAX_CAPS_LEN];
GstCaps *caps = NULL;
bin->config = config;
config->live_source = FALSE;
if (config->type == NV_DS_SOURCE_AUDIO_WAV) {
bin->src_elem =
gst_element_factory_make (NVDS_ELEM_SRC_MULTIFILE, "src_elem");
if (!bin->src_elem) {
NVGSTDS_ERR_MSG_V ("Could not create element 'src_elem'");
goto done;
}
g_object_set (G_OBJECT (bin->src_elem), "location", config->uri, NULL);
g_object_set (G_OBJECT (bin->src_elem), "loop", config->loop, NULL);
bin->decodebin =
gst_element_factory_make (NVDS_ELEM_WAVPARSE, "decodebin_elem");
if (!bin->decodebin) {
NVGSTDS_ERR_MSG_V ("Could not create element 'decodebin_elem'");
goto done;
}
g_object_set (G_OBJECT (bin->decodebin), "ignore-length", config->loop,
NULL);
} else if (config->type == NV_DS_SOURCE_ALSA_SRC) {
bin->src_elem = gst_element_factory_make (NVDS_ELEM_SRC_ALSA, "src_elem");
if (!bin->src_elem) {
NVGSTDS_ERR_MSG_V ("Could not create element 'src_elem'");
goto done;
}
if (config->alsa_device) {
g_object_set (G_OBJECT (bin->src_elem), "device", config->alsa_device,
NULL);
}
} else {
NVGSTDS_ERR_MSG_V ("Source Type (%d) not supported\n", config->type);
goto done;
}
bin->audio_converter =
gst_element_factory_make ("audioconvert", "audio-convert");
if (!bin->audio_converter) {
NVGSTDS_ERR_MSG_V ("Could not create 'audioconvert'");
goto done;
}
bin->audio_resample =
gst_element_factory_make ("audioresample", "audio-resample");
if (!bin->audio_resample) {
NVGSTDS_ERR_MSG_V ("Could not create 'audioresample'");
goto done;
}
bin->cap_filter =
gst_element_factory_make (NVDS_ELEM_CAPS_FILTER,
"src_cap_filter_audioresample");
if (!bin->cap_filter) {
NVGSTDS_ERR_MSG_V ("Could not create src_cap_filter_audioresample");
goto done;
}
if (snprintf (caps_audio_resampler, MAX_CAPS_LEN, "audio/x-raw, rate=%d",
config->input_audio_rate)
<= 0) {
NVGSTDS_ERR_MSG_V ("Could not create caps to force rate=%d",
config->input_audio_rate);
goto done;
}
caps = gst_caps_from_string (caps_audio_resampler);
g_object_set (G_OBJECT (bin->cap_filter), "caps", caps, NULL);
gst_caps_unref (caps);
if (config->type == NV_DS_SOURCE_AUDIO_WAV) {
gst_bin_add_many (GST_BIN (bin->bin), bin->src_elem, bin->decodebin,
bin->audio_converter, bin->audio_resample, bin->cap_filter, NULL);
gst_element_link_many (bin->src_elem, bin->decodebin, bin->audio_converter,
bin->audio_resample, bin->cap_filter, NULL);
} else if (config->type == NV_DS_SOURCE_ALSA_SRC) {
gst_bin_add_many (GST_BIN (bin->bin), bin->src_elem,
bin->audio_converter, bin->audio_resample, bin->cap_filter, NULL);
gst_element_link_many (bin->src_elem, bin->audio_converter,
bin->audio_resample, bin->cap_filter, NULL);
}
NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->cap_filter, "src");
ret = TRUE;
GST_CAT_DEBUG (NVDS_APP,
"Decode bin created. Waiting for a new pad from decodebin to link");
done:
if (!ret) {
NVGSTDS_ERR_MSG_V ("%s failed", __func__);
}
return ret;
}
static gboolean
create_uridecode_src_bin_audio (NvDsSourceConfig * config, NvDsSrcBin * bin)
{
gboolean ret = FALSE;
guint const MAX_CAPS_LEN = 256;
gchar caps_audio_resampler[MAX_CAPS_LEN];
GstCaps *caps = NULL;
bin->config = config;
bin->src_elem = gst_element_factory_make (NVDS_ELEM_SRC_URI, "src_elem");
if (!bin->src_elem) {
NVGSTDS_ERR_MSG_V ("Could not create element 'src_elem'");
goto done;
}
bin->latency = config->latency;
bin->udp_buffer_size = config->udp_buffer_size;
if (g_strrstr (config->uri, "file:/")) {
config->live_source = FALSE;
}
if (g_strrstr (config->uri, "rtsp://") == config->uri) {
configure_source_for_ntp_sync (bin->src_elem);
}
g_object_set (G_OBJECT (bin->src_elem), "uri", config->uri, NULL);
g_signal_connect (G_OBJECT (bin->src_elem), "pad-added",
G_CALLBACK (cb_newpad_audio), bin);
bin->audio_converter =
gst_element_factory_make (NVDS_ELEM_AUDIO_CONV, "audioconv_elem");
if (!bin->audio_converter) {
NVGSTDS_ERR_MSG_V ("Could not create element audio_converter");
goto done;
}
bin->audio_resample =
gst_element_factory_make (NVDS_ELEM_AUDIO_RESAMPLER,
"audioresampler_elem");
if (!bin->audio_resample) {
NVGSTDS_ERR_MSG_V ("Could not create element audio_resample");
goto done;
}
bin->cap_filter =
gst_element_factory_make (NVDS_ELEM_CAPS_FILTER,
"src_cap_filter_audioresample");
if (!bin->cap_filter) {
NVGSTDS_ERR_MSG_V ("Could not create src_cap_filter_audioresample");
goto done;
}
if (snprintf (caps_audio_resampler, MAX_CAPS_LEN, "audio/x-raw, rate=%d",
config->input_audio_rate)
<= 0) {
NVGSTDS_ERR_MSG_V ("Could not create caps to force rate=%d",
config->input_audio_rate);
goto done;
}
caps = gst_caps_from_string (caps_audio_resampler);
g_object_set (G_OBJECT (bin->cap_filter), "caps", caps, NULL);
gst_caps_unref (caps);
gst_bin_add_many (GST_BIN (bin->bin), bin->src_elem,
bin->audio_converter, bin->audio_resample, bin->cap_filter, NULL);
NVGSTDS_LINK_ELEMENT (bin->audio_converter, bin->audio_resample);
NVGSTDS_LINK_ELEMENT (bin->audio_resample, bin->cap_filter);
NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->cap_filter, "src");
ret = TRUE;
GST_CAT_DEBUG (NVDS_APP,
"Decode bin created. Waiting for a new pad from decodebin to link");
done:
if (!ret) {
NVGSTDS_ERR_MSG_V ("%s failed", __func__);
}
return ret;
}
static gboolean
create_uridecode_src_bin (NvDsSourceConfig * config, NvDsSrcBin * bin)
{
gboolean ret = FALSE;
GstCaps *caps = NULL;
GstCapsFeatures *feature = NULL;
bin->config = config;
bin->src_elem = gst_element_factory_make (NVDS_ELEM_SRC_URI, "src_elem");
if (!bin->src_elem) {
NVGSTDS_ERR_MSG_V ("Could not create element 'src_elem'");
goto done;
}
if (config->dewarper_config.enable) {
if (!create_dewarper_bin (&config->dewarper_config, &bin->dewarper_bin)) {
g_print ("Creating Dewarper bin failed \n");
goto done;
}
}
bin->latency = config->latency;
bin->udp_buffer_size = config->udp_buffer_size;
if (g_strrstr (config->uri, "file:/")) {
config->live_source = FALSE;
}
if (g_strrstr (config->uri, "rtsp://") == config->uri) {
configure_source_for_ntp_sync (bin->src_elem);
}
g_object_set (G_OBJECT (bin->src_elem), "uri", config->uri, NULL);
g_signal_connect (G_OBJECT (bin->src_elem), "pad-added",
G_CALLBACK (cb_newpad), bin);
g_signal_connect (G_OBJECT (bin->src_elem), "child-added",
G_CALLBACK (decodebin_child_added), bin);
g_signal_connect (G_OBJECT (bin->src_elem), "source-setup",
G_CALLBACK (cb_sourcesetup), bin);
bin->cap_filter = gst_element_factory_make (NVDS_ELEM_QUEUE, "queue");
if (!bin->cap_filter) {
NVGSTDS_ERR_MSG_V ("Could not create 'queue'");
goto done;
}
bin->nvvidconv =
gst_element_factory_make (NVDS_ELEM_VIDEO_CONV, "nvvidconv_elem");
if (!bin->nvvidconv) {
NVGSTDS_ERR_MSG_V ("Could not create element 'nvvidconv_elem'");
goto done;
}
g_object_set (G_OBJECT (bin->nvvidconv), "gpu-id", config->gpu_id,
"nvbuf-memory-type", config->nvbuf_memory_type, NULL);
if (config->video_format) {
caps = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, config->video_format, NULL);
} else {
caps = gst_caps_new_empty_simple ("video/x-raw");
}
feature = gst_caps_features_new ("memory:NVMM", NULL);
gst_caps_set_features (caps, 0, feature);
bin->cap_filter1 =
gst_element_factory_make (NVDS_ELEM_CAPS_FILTER,
"src_cap_filter_nvvidconv");
if (!bin->cap_filter1) {
NVGSTDS_ERR_MSG_V ("Could not create 'queue'");
goto done;
}
g_object_set (G_OBJECT (bin->cap_filter1), "caps", caps, NULL);
gst_caps_unref (caps);
g_object_set_data (G_OBJECT (bin->cap_filter), SRC_CONFIG_KEY, config);
gst_bin_add_many (GST_BIN (bin->bin), bin->src_elem, bin->cap_filter,
bin->nvvidconv, bin->cap_filter1, NULL);
NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->cap_filter1, "src");
bin->fakesink = gst_element_factory_make ("fakesink", "src_fakesink");
if (!bin->fakesink) {
NVGSTDS_ERR_MSG_V ("Could not create 'src_fakesink'");
goto done;
}
bin->fakesink_queue = gst_element_factory_make ("queue", "fakequeue");
if (!bin->fakesink_queue) {
NVGSTDS_ERR_MSG_V ("Could not create 'fakequeue'");
goto done;
}
bin->tee = gst_element_factory_make ("tee", NULL);
if (!bin->tee) {
NVGSTDS_ERR_MSG_V ("Could not create 'tee'");
goto done;
}
gst_bin_add_many (GST_BIN (bin->bin), bin->fakesink, bin->tee,
bin->fakesink_queue, NULL);
NVGSTDS_LINK_ELEMENT (bin->fakesink_queue, bin->fakesink);
if (config->dewarper_config.enable) {
gst_bin_add_many (GST_BIN (bin->bin), bin->dewarper_bin.bin, NULL);
NVGSTDS_LINK_ELEMENT (bin->tee, bin->dewarper_bin.bin);
NVGSTDS_LINK_ELEMENT (bin->dewarper_bin.bin, bin->cap_filter);
} else {
link_element_to_tee_src_pad (bin->tee, bin->cap_filter);
}
NVGSTDS_LINK_ELEMENT (bin->cap_filter, bin->nvvidconv);
NVGSTDS_LINK_ELEMENT (bin->nvvidconv, bin->cap_filter1);
link_element_to_tee_src_pad (bin->tee, bin->fakesink_queue);
g_object_set (G_OBJECT (bin->fakesink), "sync", FALSE, "async", FALSE, NULL);
g_object_set (G_OBJECT (bin->fakesink), "enable-last-sample", FALSE, NULL);
ret = TRUE;
GST_CAT_DEBUG (NVDS_APP,
"Decode bin created. Waiting for a new pad from decodebin to link");
done:
if (!ret) {
NVGSTDS_ERR_MSG_V ("%s failed", __func__);
}
return ret;
}
this is on deepstream_sources.h
typedef struct
{
NvDsSourceType type;
gboolean enable;
gboolean loop;
gboolean live_source;
gboolean Intra_decode;
gboolean low_latency_mode;
guint smart_record;
gint source_width;
gint source_height;
gint source_fps_n;
gint source_fps_d;
gint camera_csi_sensor_id;
gint camera_v4l2_dev_node;
gchar *uri;
gchar *dir_path;
gchar *file_prefix;
gint latency;
guint smart_rec_cache_size;
guint smart_rec_container;
guint smart_rec_def_duration;
guint smart_rec_duration;
guint smart_rec_start_time;
guint smart_rec_interval;
guint num_sources;
guint gpu_id;
guint camera_id;
guint source_id;
guint select_rtp_protocol;
guint num_decode_surfaces;
guint num_extra_surfaces;
guint nvbuf_memory_type;
guint cuda_memory_type;
NvDsDewarperConfig dewarper_config;
guint drop_frame_interval;
gboolean extract_sei_type5_data;
gint rtsp_reconnect_interval_sec;
guint rtsp_reconnect_attempts;
gint framerate;
guint udp_buffer_size;
/** Desired input audio rate to nvinferaudio from PGIE config;
* This config shall be copied over from NvDsGieConfig
* at create_multi_source_bin()*/
guint input_audio_rate;
/** ALSA device, as defined in an asound configuration file */
gchar* alsa_device;
/** Video format to be applied at nvvideoconvert source pad. */
gchar* video_format;
} NvDsSourceConfig;
But why, when I make the file, it doesn’t show an error, but when I run the DeepStream app, it shows this?
/opt/nvidia/deepstream/deepstream-7.0/sources/apps/sample_apps/deepstream-nvdsanalytics-test/deepstream-app -c /opt/nvidia/deepstream/deepstream-7.0/sources/apps/sample_apps/deepstream-nvdsanalytics-test/deepstream_app_config.txt
** WARN: <parse_source:661>: Unknown key 'framerate' for group [source0]
** WARN: <parse_source:661>: Unknown key 'framerate' for group [source1]
** WARN: <parse_source:661>: Unknown key 'framerate' for group [source2]
** WARN: <parse_source:661>: Unknown key 'framerate' for group [source3]
Segmentation fault (core dumped)
the code can’t recognize the framerate setting. you need to add parsing code.
could you use gdb to get the crash stack? can you simplify the code to narrow down this issue?
There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks
This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.