Deep stream cpp increasing latency when using 2 cameras

Please provide complete information as applicable to your setup.

• Hardware Platform (amd64)
• DeepStream Version 6.1
• TensorRT Version 8.6.1
• NVIDIA GPU Driver Version (valid for GPU only) Driver Version: 530.30.02
**• Issue Type( questions, new requirements, bugs) bug **

I’m trying to create a deepstream pipeline in cpp for yolo.
it seems to work fine with a camera on 30 fps
but when I connect a camera at 60 fps or 2 cameras at 15 fps each, the latency increases in a very fast rate.
also, the gpu seems to be at 50% (its not like the gpu cant work more)

I tried this

  1. Adding is-live=true to your source
  2. Adding sync=false to your sink
  3. Adding leaky queues before the bottleneck elements: queue leaky=2 max-size-buffers=1
    from
    Deepstream-app latency increasing - #5 by Hnil
    I’t didnt help.
    .
    this is my cpp pipeline:
#include <nvdsmeta.h>
#include <cuda_runtime_api.h>
// #include <gst/app/gstappsrc.h>
// #include <gst/app/gstappsink.h>



#include <chrono>


#include "gstnvdsmeta.h"
#include "nvds_yml_parser.h"


#include "pipeline_creator.h"
#include "osd_probe.h"
#include "set_tracker.h"



#define IMG_HEIGHT 480
#define IMG_WIDTH 640

GstElement* create_pipeline(AppSrcData* data, int argc, char* argv[]) {
    
    gst_init(&argc, &argv);

    // AppSrcData data;
    /* Initialize custom data structure */
    memset (data, 0, sizeof (data));
    data->frame_size = IMG_WIDTH * IMG_HEIGHT * 3;
    data->fps = FPS;
    data->sourceid = 0;

    // Create the pipeline
    GstElement* pipeline = gst_pipeline_new("realsense-pipeline");

    // Create elements for the pipeline
    // App source
    data->app_source = gst_element_factory_make("appsrc", "source");
    g_object_set(G_OBJECT(data->app_source), "is-live", TRUE, NULL);

    // video handle
    GstElement *capsfilter_1 = gst_element_factory_make("capsfilter", "caps-filter-1");    
    GstElement *videoconvert = gst_element_factory_make("videoconvert", "video-convert");
    #if (JETSON == 1)
        g_object_set (videoconvert, "compute-hw", 1, NULL);
    #endif
    GstElement *nvvideoconvert_1 = gst_element_factory_make("nvvideoconvert", "nv-video-convert-1");
    #if (JETSON == 1)
        g_object_set (nvvideoconvert_1, "compute-hw", 1, NULL);
    #endif
    GstElement *capsfilter_2 = gst_element_factory_make("capsfilter", "caps-filter-2");  

    // pipeline 
    GstElement *nvstreammux = gst_element_factory_make("nvstreammux", "stream-mux");
    GstElement *nvinfer = gst_element_factory_make("nvinfer", "inference");
    GstElement *nvvideoconvert_2 = gst_element_factory_make("nvvideoconvert", "nv-video-convert-2");
    #if (JETSON == 1)
        g_object_set (nvvideoconvert_2, "compute-hw", 1, NULL);
    #endif
    GstElement *nvdsosd = gst_element_factory_make("nvdsosd", "osd");
    GstElement *nvegltransform = NULL;
    GstElement *nveglglessink = gst_element_factory_make("nveglglessink", "egl-sink");
    g_object_set(G_OBJECT(nveglglessink), "sync", FALSE, NULL);
    // GstElement *fake_sink = gst_element_factory_make ("fakesink", "nvvideo-renderer");
    GstElement *tee = gst_element_factory_make ("tee", "tee");
    GstElement *appsink = gst_element_factory_make ("appsink", "app-sink");

    /* Add queue elements between every two elements */
    GstElement *queue_1 = gst_element_factory_make("queue", "queue-1");
    g_object_set(G_OBJECT(queue_1), "max-size-buffers", 1, "leaky", 2, NULL);

    GstElement *queue_2 = gst_element_factory_make("queue", "queue-2");
    g_object_set(G_OBJECT(queue_2), "max-size-buffers", 1, "leaky", 2, NULL);

    GstElement *queue_3 = gst_element_factory_make("queue", "queue-3");
    g_object_set(G_OBJECT(queue_3), "max-size-buffers", 1, "leaky", 2, NULL);

    GstElement *queue_4 = gst_element_factory_make("queue", "queue-4");
    g_object_set(G_OBJECT(queue_4), "max-size-buffers", 1, "leaky", 2, NULL);

    GstElement *queue_5 = gst_element_factory_make("queue", "queue-5");
    g_object_set(G_OBJECT(queue_5), "max-size-buffers", 1, "leaky", 2, NULL);

    GstElement *queue_6 = gst_element_factory_make("queue", "queue-6");
    g_object_set(G_OBJECT(queue_6), "max-size-buffers", 1, "leaky", 2, NULL);

    /* We need to have a tracker to track the identified objects */
    GstElement *nvtracker = gst_element_factory_make ("nvtracker", "tracker");

    int current_device = -1;
    cudaGetDevice(&current_device);
    struct cudaDeviceProp prop;
    cudaGetDeviceProperties(&prop, current_device);
    
    GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "/home/daphna/dot_files");

    if(prop.integrated) {
      nvegltransform = gst_element_factory_make ("nvegltransform", "nvegl-transform");
      if(!nvegltransform) {
        // g_printerr ("One tegra element could not be created. Exiting.\n");
        throw std::runtime_error("One tegra element could not be created. Exiting.\n");
      }
    }

    // Check if all elements are created successfully
    if (!pipeline ||  !capsfilter_1 || !videoconvert || !nvvideoconvert_1 || !capsfilter_2 ) {
        // g_printerr("One or more elements !pipeline ||  !capsfilter_1 || !videoconvert || !nvvideoconvert_1 || !capsfilter_2  could not be created. Exiting.\n");
        throw std::runtime_error("One or more elements !pipeline ||  !capsfilter_1 || !videoconvert || !nvvideoconvert_1 || !capsfilter_2  could not be created. Exiting.\n");
    }

    // Check if all elements are created successfully
    if (!nvstreammux || !nvinfer || !nvvideoconvert_2 || !nvdsosd || !nveglglessink || !nvtracker) {
        // g_printerr("One or more elements !nvstreammux || !nvinfer || !nvvideoconvert_2 || !nvdsosd || !nveglglessink || !nvtracker could not be created. Exiting.\n");
        throw std::runtime_error("One or more elements !nvstreammux || !nvinfer || !nvvideoconvert_2 || !nvdsosd || !nveglglessink || !nvtracker could not be created. Exiting.\n");
    }

    // Check if all elements are created successfully
    if (!queue_1 || !queue_2 || !queue_3 || !queue_4 || !queue_5 || !queue_6 ) {
        // g_printerr("One or more elements !queue_1 || !queue_2 || !queue_3 || !queue_4 || !queue_5 || !queue_6 could not be created. Exiting.\n");
        throw std::runtime_error("One or more elements !queue_1 || !queue_2 || !queue_3 || !queue_4 || !queue_5 || !queue_6 could not be created. Exiting.\n");
    }

    // Check if all elements are created successfully
    if (!data->app_source || !appsink || !tee ) {
        // g_printerr("One or more elements !data->app_source || !appsink || !tee  could not be created. Exiting.\n");
        throw std::runtime_error("One or more elements !data->app_source || !appsink || !tee  could not be created. Exiting.\n");
    }

    g_object_set(G_OBJECT(data->app_source), "stream-type", GST_APP_STREAM_TYPE_STREAM, "format", GST_FORMAT_TIME, NULL);

    char caps_string[256];  // Adjust the buffer size as needed
    // snprintf(caps_string, sizeof(caps_string), "video/x-raw, framerate=%d/1, format=BGR, width=%d, height=%d", FPS, IMG_WIDTH, IMG_HEIGHT);
    GstCaps *caps = gst_caps_from_string(caps_string);
    g_object_set(G_OBJECT(capsfilter_1), "caps", caps, NULL);

    AppSource* appSourceInstance = new AppSource(); // Create an instance of AppSource
    data->app_source_object = appSourceInstance;
    g_signal_connect(data->app_source, "need-data", G_CALLBACK(AppSource::start_feed), data);
    g_signal_connect(data->app_source, "enough-data", G_CALLBACK(AppSource::stop_feed), data);

 
    caps = gst_caps_from_string("video/x-raw(memory:NVMM)");
    g_object_set(G_OBJECT(capsfilter_2), "caps", caps, NULL);
    
    gst_caps_unref(caps);
    #if (JETSON == 1)
        g_object_set (G_OBJECT (nvvideoconvert_1), "nvbuf-memory-type", 0, NULL);
    #else
        g_object_set (G_OBJECT (nvvideoconvert_1), "nvbuf-memory-type", 3, NULL);
    #endif

    // cout << get_current_dir() << endl;
    
    g_object_set(G_OBJECT(nvstreammux), "width", IMG_WIDTH, "height", IMG_HEIGHT, "batch-size", 1, "batched-push-timeout", 40000, NULL);
   
    g_object_set(G_OBJECT(nvinfer), "config-file-path", "pipeline_realsense/config/config_infer_primary_yoloV8_orange.txt", "batch-size", 1, NULL);

    g_object_set(G_OBJECT(nveglglessink), "sync", FALSE, NULL);

    /* Set necessary properties of the tracker element. */
    if (!set_tracker_properties(nvtracker)) {
    //   g_printerr ("Failed to set tracker properties. Exiting.\n");
      throw std::runtime_error("Failed to set tracker properties. Exiting.\n");
    }

    // Add elements to the pipeline - camera
    if (nvegltransform){
            gst_bin_add_many(GST_BIN(pipeline), data->app_source, queue_5, capsfilter_1, videoconvert, nvvideoconvert_1, queue_6, capsfilter_2,
                        nvstreammux, queue_1, nvinfer, nvtracker, queue_2, nvvideoconvert_2, queue_3, nvdsosd, queue_4, nvegltransform, nveglglessink, tee, appsink, NULL);
        } else {
        gst_bin_add_many(GST_BIN(pipeline), data->app_source,  queue_5, capsfilter_1, videoconvert, nvvideoconvert_1, queue_6, capsfilter_2,
                        nvstreammux, queue_1, nvinfer, nvtracker, queue_2, nvvideoconvert_2, queue_3, nvdsosd, nveglglessink, tee, appsink, NULL);

    }
   
    GstPad *sinkpad, *srcpad;
    gchar pad_name_sink[16] = "sink_0";
    gchar pad_name_src[16] = "src";

    sinkpad = gst_element_get_request_pad (nvstreammux, pad_name_sink);
    if (!sinkpad) {
        // g_printerr ("Streammux request sink pad failed. Exiting.\n");
        throw std::runtime_error("Streammux request sink pad failed. Exiting.\n");
    }
    // camera source
    srcpad = gst_element_get_static_pad (capsfilter_2, pad_name_src);
    if (!srcpad) {
        // g_printerr ("capsfilter_2 request src pad failed. Exiting.\n");
        throw std::runtime_error("capsfilter_2 request src pad failed. Exiting.\n");
    }
   

    if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
        // g_printerr ("Failed to link capsfilter_2/decoder to stream muxer. Exiting.\n");
        throw std::runtime_error ("Failed to link capsfilter_2/decoder to stream muxer. Exiting.\n");;
    }

    gst_object_unref (sinkpad);
    gst_object_unref (srcpad);


     // Enable debugging before linking attempts
    GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "pipeline_before_linking");

    if (nvegltransform) {        
        /* We link the elements together */
        if (!gst_element_link_many(data->app_source, capsfilter_1, videoconvert, nvvideoconvert_1, queue_6, capsfilter_2, NULL)) {
            // g_printerr("Elements could not be linked with nvegltransform and capsfilter_2. Exiting.\n");
            throw std::runtime_error("Elements could not be linked with nvegltransform and capsfilter_2. Exiting.\n");;
        }
        if (!gst_element_link_many(nvstreammux, queue_1, nvinfer, queue_2, nvtracker, nvvideoconvert_2, tee, NULL)) {
            // g_printerr("Elements could not be linked with nvegltransform and nvstreammux Exiting.\n");
            throw std::runtime_error("Elements could not be linked with nvegltransform and nvstreammux Exiting.\n");;
        }
        if (!gst_element_link_many(tee, queue_3, nvdsosd, queue_4, nvegltransform, nveglglessink, NULL)) {
            // g_printerr("Elements could not be linked with nvegltransform and nveglglessink. Exiting.\n");
            throw std::runtime_error("Elements could not be linked with nvegltransform and nveglglessink. Exiting.\n");
        }
        if (!gst_element_link_many(tee, appsink, NULL) ) {
            // g_printerr("Elements could not be linked with nvegltransform and appsink. Exiting.\n");
            throw std::runtime_error("Elements could not be linked with nvegltransform and appsink. Exiting.\n");
    }
    
} else {
    // appsource
        // Link the elements together
            g_printerr("Link the elements together. without transform.\n");

        if (!gst_element_link_many(data->app_source, capsfilter_1, videoconvert, nvvideoconvert_1, queue_6, capsfilter_2, NULL) || 
            !gst_element_link_many(nvstreammux, queue_1, nvinfer, queue_2, nvtracker, nvvideoconvert_2, tee, NULL) ) {
        // g_printerr("Elements could not be linked capsfilter_2. Exiting.\n");
            throw std::runtime_error("Elements could not be linked capsfilter_2. Exiting.\n");
        }
        if (!gst_element_link_many(tee, nvdsosd, nveglglessink, NULL) ) {
            // g_printerr("Elements could not be linked nveglglessink. Exiting.\n");
            throw std::runtime_error("Elements could not be linked nveglglessink. Exiting.\n");
        }

        if (!gst_element_link_many(tee, appsink, NULL) ) {
            // g_printerr("Elements could not be linked appsink. Exiting.\n");
            throw std::runtime_error("Elements could not be linked appsink. Exiting.\n");
        }
    
    
    }   

    // Enable debugging after linking attempts
    GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "pipeline_after_linking");


    /* Lets add probe to get informed of the meta data generated, we add probe to
    * the sink pad of the osd element, since by that time, the buffer would have
    * had got all the metadata. */
   
    GstPad *tee_src_pad1 = gst_element_get_request_pad(tee, "src_%u");
    GstPad *tee_src_pad2 = gst_element_get_request_pad(tee, "src_%u");

    std :: ignore = gst_element_get_static_pad(appsink, "sink");
    // GstPad *appsink_sink_pad = gst_element_get_static_pad(appsink, "sink");
    GstPad *nvdsosd_sink_pad = gst_element_get_static_pad(nvdsosd, "sink");

    gst_element_link (tee, appsink);


    gst_object_unref(tee_src_pad1);
    gst_object_unref(tee_src_pad2);

    if (!nvdsosd_sink_pad){
      g_print ("Unable to get sink pad\n");
    
    }else{
      gst_pad_add_probe (nvdsosd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, OsdProbe::osd_sink_pad_buffer_probe, NULL, NULL);}    

    /* Configure appsink to extract data from DeepStream pipeline */
    g_object_set (appsink, "emit-signals", TRUE, "async", FALSE, NULL);

    /* Callback to access buffer and object info. */
    AppSinkNode* appSinkInstance = new AppSinkNode(); // Create an instance of AppSource

    g_signal_connect (appsink, "new-sample", G_CALLBACK (AppSinkNode::new_sample), appSinkInstance);

    return pipeline;
}

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks

For live source like RTSP, camera,…, etc, you need to set “live-source” property of nvstreammux to 1. And there are several other errors in your code for live source and multiple sources. Please refer to deepstream-test3 for how to enable multiple sources and live source in one pipeline.

Please read the documents before you start to customize your own application.
https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_plugin_gst-nvstreammux.html#
DeepStream SDK FAQ - Intelligent Video Analytics / DeepStream SDK - NVIDIA Developer Forums
https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_FAQ.html#what-is-the-difference-between-batch-size-of-nvstreammux-and-nvinfer-what-are-the-recommended-values-for-nvstreammux-batch-size

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.