DeepStream SDK FAQ

6. [DS 5.0.1_All_Plugin] Tracker FAQ topic Deepstream Tracker FAQ

7. [DS 5.0GA_All_App] Enable Latency measurement for deepstream sample apps

  1. If you are using deepstream-app, to check the component latency directly, you need to set the env

    1. export NVDS_ENABLE_COMPONENT_LATENCY_MEASUREMENT=1
      export NVDS_ENABLE_LATENCY_MEASUREMENT=1
  2. If you are using other deepstream sample apps such as deepstream-test3, you need to apply the following patch and set the env

    1. export NVDS_ENABLE_COMPONENT_LATENCY_MEASUREMENT=1
      export NVDS_ENABLE_LATENCY_MEASUREMENT=1
diff --git a/apps/deepstream/sample_apps/deepstream-test3/deepstream_test3_app.c b/apps/deepstream/sample_apps/deepstream-test3/deepstream_test3_app.c
index 426bd69..c7c2472 100644
--- a/apps/deepstream/sample_apps/deepstream-test3/deepstream_test3_app.c
+++ b/apps/deepstream/sample_apps/deepstream-test3/deepstream_test3_app.c
@@ -26,6 +26,7 @@
 #include <math.h>
 #include <string.h>
 #include <sys/time.h>
+#include <stdlib.h>

 #include "gstnvdsmeta.h"
 //#include "gstnvstreammeta.h"
@@ -73,6 +74,41 @@ gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",

 //static guint probe_counter = 0;

+typedef struct {
+  GMutex *lock;
+  int num_sources;
+}LatencyCtx;
+
+static GstPadProbeReturn
+latency_measurement_buf_prob(GstPad * pad, GstPadProbeInfo * info, gpointer u_data)
+{
+  LatencyCtx *ctx = (LatencyCtx *) u_data;
+  static int batch_num = 0;
+  guint i = 0, num_sources_in_batch = 0;
+  if(nvds_enable_latency_measurement)
+  {
+    GstBuffer *buf = (GstBuffer *) info->data;
+    NvDsFrameLatencyInfo *latency_info = NULL;
+    g_mutex_lock (ctx->lock);
+    latency_info = (NvDsFrameLatencyInfo *)
+      calloc(1, ctx->num_sources * sizeof(NvDsFrameLatencyInfo));;
+    g_print("\n************BATCH-NUM = %d**************\n",batch_num);
+    num_sources_in_batch = nvds_measure_buffer_latency(buf, latency_info);
+
+    for(i = 0; i < num_sources_in_batch; i++)
+    {
+      g_print("Source id = %d Frame_num = %d Frame latency = %lf (ms) \n",
+          latency_info[i].source_id,
+          latency_info[i].frame_num,
+          latency_info[i].latency);
+    }
+    g_mutex_unlock (ctx->lock);
+    batch_num++;
+  }
+
+  return GST_PAD_PROBE_OK;
+}
+
 /* tiler_sink_pad_buffer_probe  will extract metadata received on OSD sink pad
  * and update params for drawing rectangle, object information etc. */

@@ -107,9 +143,9 @@ tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
                 num_rects++;
             }
         }
-          g_print ("Frame Number = %d Number of objects = %d "
-            "Vehicle Count = %d Person Count = %d\n",
-            frame_meta->frame_num, num_rects, vehicle_count, person_count);
+          // g_print ("Frame Number = %d Number of objects = %d "
+          //   "Vehicle Count = %d Person Count = %d\n",
+          //   frame_meta->frame_num, num_rects, vehicle_count, person_count);
 #if 0
         display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
         NvOSD_TextParams *txt_params  = &display_meta->text_params;
@@ -383,7 +419,7 @@ main (int argc, char *argv[])
 #ifdef PLATFORM_TEGRA
   transform = gst_element_factory_make ("nvegltransform", "nvegl-transform");
 #endif
-  sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
+  sink = gst_element_factory_make ("fakesink", "nvvideo-renderer");

   if (!pgie || !tiler || !nvvidconv || !nvosd || !sink) {
     g_printerr ("One element could not be created. Exiting.\n");
@@ -467,6 +503,18 @@ gst_bin_add_many (GST_BIN (pipeline), queue1, pgie, queue2, tiler, queue3,
         tiler_src_pad_buffer_probe, NULL, NULL);
   gst_object_unref (tiler_src_pad);

+  GstPad *sink_pad =  gst_element_get_static_pad (nvosd, "src");
+  if (!sink_pad)
+    g_print ("Unable to get src pad\n");
+  else {
+    LatencyCtx *ctx = (LatencyCtx *)g_malloc0(sizeof(LatencyCtx));
+    ctx->lock = (GMutex *)g_malloc0(sizeof(GMutex));
+    ctx->num_sources = num_sources;
+    gst_pad_add_probe (sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
+        latency_measurement_buf_prob, ctx, NULL);
+  }
+  gst_object_unref (sink_pad);
+
   /* Set the pipeline to "playing" state */
   g_print ("Now playing:");
   for (i = 0; i < num_sources; i++) {
Delay when I using RTSP camera
Latency measurement (nvds_measure_buffer_latency) gave weird results
Delay, randomness and dropped frames in RTSP output Stream
How to get the latency from deepstream python apps
Deepstream 6 python app performance degradation
Can deepstream handle higher resolutions than 1080p?
The most efficient method to evaluate time each plugin (in DeepStream)cost?
Inference with deepstream yolov5s-3.0 on 2 camera long delay (20-25s)
Unexpected FPS drop with back-to-back detector concept in deepstream-app
Deepstream multiple rtsp output latency
Running deepstream-text1 on tx2 to load yolov5s engine model becomes very delayed
Running deepstream-text1 on tx2 to load yolov5s engine model becomes very delayed
Why my pipeline is stuck and delayed, but deepstream-app is very smooth?
The deepstream-test3 demo using rtsp webcam delayed
DeepStream metrics
How to decrease the latency of pushing streaming to the local
How to accelerate single stream pipeline with batch size grater then 1
Question about tensorRT batch size
Does deepstream pipeline works sequentially?
How to get the latency from deepstream python apps
How to get the latency from deepstream python apps
How to get the latency from deepstream python apps
Print inference time in deepstream 5.1 on TX2NX
Print inference time in deepstream 5.1 on TX2NX
How to decrease the latency of pushing streaming to the local
Deepstream 6 python app performance degradation
Deepstream 6.0: Image capture to muxer large latency

8. [DS 5.0GA_All_App] Enable Perf measurement(FPS) for deepstream sample apps

  1. If you are using deepstream-app, you can add enable-perf-measurement=1 under Application Group in the config file
  2. If you are using other deepstream sample apps such as deepstream-test2, you can apply following patch to enable it
diff --git a/sources/apps/sample_apps/deepstream-test2/deepstream_test2_app.c b/sources/apps/sample_apps/deepstream-test2/deepstream_test2_app.c
index a2231acf535b4826adb766ed28f3aa80294c7f82..e37d7504ed07c9db77e5d3cdac2c4943fd0d1010 100755
--- a/sources/apps/sample_apps/deepstream-test2/deepstream_test2_app.c
+++ b/sources/apps/sample_apps/deepstream-test2/deepstream_test2_app.c
@@ -28,6 +28,7 @@
 #include <string.h>
 
 #include "gstnvdsmeta.h"
+#include "deepstream_perf.h"
 
 #define PGIE_CONFIG_FILE  "dstest2_pgie_config.txt"
 #define SGIE1_CONFIG_FILE "dstest2_sgie1_config.txt"
@@ -51,6 +52,29 @@
  * based on the fastest source's framerate. */
 #define MUXER_BATCH_TIMEOUT_USEC 40000
 
+#define MAX_STREAMS 64
+
+typedef struct
+{
+    /** identifies the stream ID */
+    guint32 stream_index;
+    gdouble fps[MAX_STREAMS];
+    gdouble fps_avg[MAX_STREAMS];
+    guint32 num_instances;
+    guint header_print_cnt;
+    GMutex fps_lock;
+    gpointer context;
+
+    /** Test specific info */
+    guint32 set_batch_size;
+}DemoPerfCtx;
+
+
+typedef struct {
+  GMutex *lock;
+  int num_sources;
+}LatencyCtx;
+
 gint frame_number = 0;
 /* These are the strings of the labels for the respective models */
 gchar sgie1_classes_str[12][32] = { "black", "blue", "brown", "gold", "green",
@@ -80,6 +104,66 @@ guint sgie1_unique_id = 2;
 guint sgie2_unique_id = 3;
 guint sgie3_unique_id = 4;
 
+/**
+ * callback function to print the performance numbers of each stream.
+ */
+static void
+perf_cb (gpointer context, NvDsAppPerfStruct * str)
+{
+  DemoPerfCtx *thCtx = (DemoPerfCtx *) context;
+
+  g_mutex_lock(&thCtx->fps_lock);
+  /** str->num_instances is == num_sources */
+  guint32 numf = str->num_instances;
+  guint32 i;
+
+  for (i = 0; i < numf; i++) {
+    thCtx->fps[i] = str->fps[i];
+    thCtx->fps_avg[i] = str->fps_avg[i];
+  }
+  thCtx->context = thCtx;
+  g_print ("**PERF: ");
+  for (i = 0; i < numf; i++) {
+    g_print ("%.2f (%.2f)\t", thCtx->fps[i], thCtx->fps_avg[i]);
+  }
+  g_print ("\n");
+  g_mutex_unlock(&thCtx->fps_lock);
+}
+
+/**
+ * callback function to print the latency of each component in the pipeline.
+ */
+
+static GstPadProbeReturn
+latency_measurement_buf_prob(GstPad * pad, GstPadProbeInfo * info, gpointer u_data)
+{
+  LatencyCtx *ctx = (LatencyCtx *) u_data;
+  static int batch_num = 0;
+  guint i = 0, num_sources_in_batch = 0;
+  if(nvds_enable_latency_measurement)
+  {
+    GstBuffer *buf = (GstBuffer *) info->data;
+    NvDsFrameLatencyInfo *latency_info = NULL;
+    g_mutex_lock (ctx->lock);
+    latency_info = (NvDsFrameLatencyInfo *)
+      calloc(1, ctx->num_sources * sizeof(NvDsFrameLatencyInfo));;
+    g_print("\n************BATCH-NUM = %d**************\n",batch_num);
+    num_sources_in_batch = nvds_measure_buffer_latency(buf, latency_info);
+
+    for(i = 0; i < num_sources_in_batch; i++)
+    {
+      g_print("Source id = %d Frame_num = %d Frame latency = %lf (ms) \n",
+          latency_info[i].source_id,
+          latency_info[i].frame_num,
+          latency_info[i].latency);
+    }
+    g_mutex_unlock (ctx->lock);
+    batch_num++;
+  }
+
+  return GST_PAD_PROBE_OK;
+}
+
 /* This is the buffer probe function that we have registered on the sink pad
  * of the OSD element. All the infer elements in the pipeline shall attach
  * their metadata to the GstBuffer, here we will iterate & process the metadata
@@ -144,9 +228,9 @@ osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
         nvds_add_display_meta_to_frame(frame_meta, display_meta);
     }
 
-    g_print ("Frame Number = %d Number of objects = %d "
-            "Vehicle Count = %d Person Count = %d\n",
-            frame_number, num_rects, vehicle_count, person_count);
+    // g_print ("Frame Number = %d Number of objects = %d "
+    //         "Vehicle Count = %d Person Count = %d\n",
+    //         frame_number, num_rects, vehicle_count, person_count);
     frame_number++;
     return GST_PAD_PROBE_OK;
 }
@@ -586,6 +670,30 @@ main (int argc, char *argv[])
     gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
         osd_sink_pad_buffer_probe, NULL, NULL);
 
+  GstPad *sink_pad =  gst_element_get_static_pad (nvvidconv1, "src");
+  if (!sink_pad)
+    g_print ("Unable to get sink pad\n");
+  else {
+    LatencyCtx *ctx = (LatencyCtx *)g_malloc0(sizeof(LatencyCtx));
+    ctx->lock = (GMutex *)g_malloc0(sizeof(GMutex));
+    ctx->num_sources = argc - 2;
+    gst_pad_add_probe (sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
+        latency_measurement_buf_prob, ctx, NULL);
+  }
+  gst_object_unref (sink_pad);
+
+  GstPad *tiler_pad =  gst_element_get_static_pad (nvtiler, "sink");
+  if (!tiler_pad)
+    g_print ("Unable to get tiler_pad pad\n");
+  else {
+    NvDsAppPerfStructInt *str =  (NvDsAppPerfStructInt *)g_malloc0(sizeof(NvDsAppPerfStructInt));
+    DemoPerfCtx *perf_ctx = (DemoPerfCtx *)g_malloc0(sizeof(DemoPerfCtx));
+    g_mutex_init(&perf_ctx->fps_lock);
+    str->context = perf_ctx;
+    enable_perf_measurement (str, tiler_pad, argc-2, 1, 0, perf_cb);
+  }
+  gst_object_unref (tiler_pad);
+
   /* Set the pipeline to "playing" state */
   g_print ("Now playing: %s\n", argv[1]);
   gst_element_set_state (pipeline, GST_STATE_PLAYING);

9. [DS 5.0GA_Jetson_App] Capture HW & SW Memory Leak log
nvmemstat.py.txt (4.7 KB)

  1. Download attachment onto Jetson device and rename to nvmemstat.py
  2. Install “lsof” tool
    $ sudo apt-get install lsof
  3. Run your application on Jetson in one terminal or background
  4. Run this script with command :
    $ sudo ./nvmemstat.py -p PROGRAM_NAME // replace PROGRAM_NAME to application name in step#2
    this script will monitor the hardware memory, SW memory, etc.
  5. Share the log on the topic for further triage

10. [ALL_Jetson_plugin] Jetson GStreamer Plugins Using with DeepStream
For the user of Jetson DeepStream (JetPack), there are some accelerated gstreamer plugins which is hardware accelerated by Jetson but are not listed in DeepStream plugin list GStreamer Plugin Overview — DeepStream 6.1.1 Release documentation.

Some of these plugins can be used in the DeepStream pipeline to extend the DeepStream functions while some of them are not compatible to DeepStreamSDK.

The basic document for the Gstreamer accelerated plugins is Multimedia — Jetson Linux
Developer Guide 34.1 documentation (nvidia.com)

DeepStream compatible plugins:

  • nvegltransform: NvEGLTransform

Typical usage:

gst-launch-1.0 uridecodebin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! m.sink_0 nvstreammux name=m batch-size=1 width=1280 height=720 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt ! nvtracker tracker-width=640 tracker-height=480 ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so ll-config-file=config_tracker_NvDCF_perf.yml enable-batch-process=1 ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=RGBA' ! nvmultistreamtiler ! nvdsosd ! nvvideoconvert ! nvegltransform ! nveglglessink

  • nvarguscamerasrc: nvarguscamerasrc: NvArgusCameraSrc

Typical usage:

gst-launch-1.0 nvarguscamerasrc bufapi-version=true sensor-id=0 ! ‘video/x-raw(memory:NVMM),width=640,height=480,framerate=30/1,format=NV12’ ! m.sink_0 nvstreammux name=m batch-size=1 width=1280 height=720 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt ! nvtracker tracker-width=640 tracker-height=480 ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so ll-config-file=config_tracker_NvDCF_perf.yml enable-batch-process=1 ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=RGBA' ! nvmultistreamtiler ! nvdsosd ! nvvideoconvert ! nvegltransform ! nveglglessink

The related topic in forum:

Segfault when nvvideoconvert and nvv4l2h265enc are used together - Intelligent Video Analytics / DeepStream SDK - NVIDIA Developer Forums

  • nvv4l2camerasrc: nvv4l2camerasrc: NvV4l2CameraSrc

Typical usage:

gst-launch-1.0 nvv4l2camerasrc device=/dev/video0 bufapi-version=1 ! 'video/x-raw(memory:NVMM),width=1920,height=1080,framerate=60/1' ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=NV12' ! mx.sink_0 nvv4l2camerasrc device=/dev/video1 bufapi-version=1 ! 'video/x-raw(memory:NVMM),width=1920,height=1080,framerate=60/1' ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=NV12' ! mx.sink_1 nvstreammux width=1920 height=1080 batch-size=2 live-source=1 name=mx ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt batch-size=2 ! nvvideoconvert ! nvmultistreamtiler width=1920 height=1080 rows=1 columns=2 ! nvvideoconvert ! nvdsosd ! nvegltransform ! nveglglessink sync=0

The related topic in forum:
Low camera frame rate - Intelligent Video Analytics / DeepStream SDK - NVIDIA Developer Forums

  • nvdrmvideosink: Nvidia Drm Video Sink

Typical pipeline:
gst-launch-1.0 filesrc location=/opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! qtdemux ! h264parse ! nvv4l2decoder ! nvvideoconvert ! m.sink_0 nvstreammux name=m batch-size=1 width=1920 height=1080 ! nvinfer config-file-path= /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt ! nvdrmvideosink conn_id=0 plane_id=1 set_mode=0 -e

The related topic in forum:
Which videosink for Jetson TX2 in EGLFS? - Jetson & Embedded Systems / Jetson TX2 - NVIDIA Developer Forums

  • nv3dsink: Nvidia 3D sink

Typical pipeline:
gst-launch-1.0 filesrc location=/opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! qtdemux ! h264parse ! nvv4l2decoder ! nvvideoconvert ! m.sink_0 nvstreammux name=m batch-size=1 width=1920 height=1080 ! nvinfer config-file-path= /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt ! nv3dsink sync=false

Note: The nv3dsink plugin is a window-based rendering sink, and based on X11.

  • nvoverlaysink: OpenMax Video Sink

Typical pipeline:

gst-launch-1.0 filesrc location=/opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! qtdemux ! h264parse ! nvv4l2decoder bufapi-version=1 ! nvvideoconvert ! m.sink_0 nvstreammux name=m batch-size=1 width=1920 height=1080 ! nvinfer config-file-path= /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt ! nvoverlaysink sync=0

Note:The nvoverlaysink plugin is deprecated in L4T release 32.1. Please use nvdrmvideosink or nv3dsink for rendering gst-v4l2 decoder output.

DeepStream Incompatible Plugins

Typical pipeline:
gst-launch-1.0 nvcompositor name=comp sink_0::xpos=0 sink_0::ypos=0 sink_0::width=960 sink_0::height=540 sink_1::xpos=960 sink_1::ypos=0 sink_1::width=960 sink_1::height=540 sink_2::xpos=0 sink_2::ypos=540 sink_2::width=1920 sink_2::height=540 ! nvegltransform ! nveglglessink \ filesrc location=/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.mp4 ! qtdemux ! h264parse ! nvv4l2decoder ! comp. \ filesrc location=/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.mp4 ! qtdemux ! h264parse ! nvv4l2decoder ! comp. \ filesrc location=/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.mp4 ! qtdemux ! h264parse ! nvv4l2decoder ! comp. -e

The related topic in forum:
How to Customize layout from Nvmultistream-tiler module from DeepStream - Intelligent Video Analytics / DeepStream SDK - NVIDIA Developer Forums

11. [DS 5.x_All_App] How to implement a python binding

Refer following samples from forum users:
https://github.com/mrtj/pyds_tracker_meta
https://github.com/7633/pyds_analytics_meta

12. [DS 5.0GA_Jetson_App]: Dump NV12 NvBufSurface into a YUV file
Each NV12 NvBufSurface includes two semi-planes which are not continuous in memory.
gstnvinfer_dump_NV12_NvBufSurface.patch (4.9 KB)

This is a sample change to /opt/nvidia/deepstream/deepstream-5.1/sources/gst-plugins/gst-nvinfer/gstnvinfer.cpp to dump the NV12 NvBufSurface before transforming to RGB data.
After getting the YUV file, we can view it in https://rawpixels.net/ as below

13. [DS 5.x_All_App] How to access and modify the NvBufSurface

Refer Deepstream sample code snippet - #3 by bcao

14. [All_Jetson_App] Check memory leakage with valgrind

  1. Install valgrind with below command
    $ sudo apt-get install valgrind valgrind-dbg
  2. Run application with below command
    $ valgrind --tool=memcheck --leak-check=full --num-callers=100 --show-leak-kinds=definite,indirect --track-origins=yes ./app
  1. [DSx_All_App] Debug Tips for DeepStream Accuracy Issue
    15.1 Confirm your model has got good accuracy in training and inference outside DeepStream
    15.2 When deploying a ONNX model to DeepStream with nvinfer plugin, confirm if below nvinfer parameters are set correctly
    15.2.1 Input scale & offset
    1). net-scale-factor =
    2). offsets
    The usage of these two parameters are as below (from doc)


    15.2.2 Input Order
    1). network-input-order= // 0:NCHW 1:NHWC
    2). infer-dims= // if network-input-order=1, i.e. NHWC, infer-dims must be specified, otherwise, nvinfer can’t detect input dims automatically
    3). model-color-format= // 0: RGB 1: BGR 2: GRAY
    15.2.3 scale and padding
    1). maintain-aspect-ratio= // whether to maintain aspect ratio while scaling input
    2). symmetric-padding= // whether to pad image symmetrically while scaling input. By defaulut, it’s asymmetrical padding and the image will be scaled to top left corner.
    15.2.4 inference precision
    1). network-mode= // 0: FP32 1: INT8 2: FP16. If INT8 accuracy is not good, try FP16 or FP32
    15.2.5 threshold
    1). threshold=
    2). pre-cluster-threshold=
    3). Post-cluster-threshold=
    Above are some highlighted parameters for a quick check for accuracy. For more detailed informantion, please refer to nvinfer doc - Gst-nvinfer — DeepStream 6.1.1 Release documentation
    15.3 Dump the input or output of the nvinfer
    Below two items in DeepStream SDK FAQ - #9 by mchi
    2. [DS5.0GA_Jetson_dGPU_Plugin] Dump the Inference Input ==> compare the input between DS and your own standalone inference/training app
    3. [DS5_Jetson_dGPU_Plugin] Dump the Inference outputs ==> then apply your own parser offline check this output data

16. [DeepStream 6.0 GA] python binding installation

Download the wheel files directly from Releases · NVIDIA-AI-IOT/deepstream_python_apps · GitHub
Or build it referring to steps below:

16.1 dGPU+x86 platform & Triton docker

[DeepStream 6.0] Unable to install python_gst into nvcr.io/nvidia/deepstream:6.0-triton container - #5 by rpaliwal_nvidia

16.2 dGPU+x86 platform & non-Triton docker

  please refer to deepstream_python_apps/bindings at master · NVIDIA-AI-IOT/deepstream_python_apps · GitHub and steps below if you use DS6,0GA docker -
## 1.  Prerequisites
apt install -y git python-dev python3 python3-pip python3.6-dev python3.8-dev cmake g++ build-essential \
    libglib2.0-dev libglib2.0-dev-bin python-gi-dev libtool m4 autoconf automake

# 2. Gst-python
cd /opt/nvidia/deepstream/deepstream/sources/apps/
git clone https://github.com/NVIDIA-AI-IOT/deepstream_python_apps.git
cd deepstream_python_apps/
git submodule update --init
apt-get install --reinstall ca-certificates
cd 3rdparty/gst-python/
./autogen.sh
make && make install

# 3. install pyds
cd deepstream_python_apps/bindings/
mkdir build
cd build
cmake ..
make
pip3 install ./pyds-1.1.0-py3-none-linux_x86_64.whl

# 4. run sample
cd deepstream_python_apps
mv  apps/* ./
cd deepstream-test1/
python3 deepstream_test_1.py ../../../../samples/streams/sample_qHD.h264
![image|690x361](upload://yKIofGABfyeSYJKEdsr1j5OFOI2.png)

16.3 Jetson dockers

Rrefer to deepstream_python_apps/bindings at master · NVIDIA-AI-IOT/deepstream_python_apps · GitHub and steps below if you use DS6,0GA docker -

## 1.  Prerequisites
apt-get update
apt install -y git python-dev python3 python3-pip python3.6-dev python3.8-dev cmake g++ build-essential \
    libglib2.0-dev libglib2.0-dev-bin python-gi-dev libtool m4 autoconf automake

# 2. Gst-python
cd /opt/nvidia/deepstream/deepstream/sources/apps/
git clone https://github.com/NVIDIA-AI-IOT/deepstream_python_apps.git
cd deepstream_python_apps/
git submodule update --init
apt-get install --reinstall ca-certificates
cd 3rdparty/gst-python/
./autogen.sh
make && make install

# 3. install pyds
cd deepstream_python_apps/bindings/
mkdir build
cd build
cmake ..  -DPYTHON_MAJOR_VERSION=3 -DPYTHON_MINOR_VERSION=6 -DPIP_PLATFORM=linux_aarch64 -DDS_PATH=/opt/nvidia/deepstream/deepstream
make
pip3 install ./pyds-1.1.0-py3-none-linux_aarch64.whl

# 4. run sample
cd deepstream_python_apps
mv  apps/* ./
cd deepstream-test1/
python3 deepstream_test_1.py ../../../../samples/streams/sample_qHD.h264

17.[DeepStream_dGPU_App] Using OpenCV to run deepstream pipeline

Sometimes the gstreamer pipeline in opencv will fail. Please refer to the following topic to resolve this problem.

How to compile OpenCV with Gstreamer [Ubuntu&Windows] | by Galaktyk 01 | Medium

18. Open model deployment on DeepStream (Thanks for the sharing!)
Yolo2/3/4/5/OR : Improved DeepStream for YOLO models (Thanks @marcoslucianops )
YoloV4 : GitHub - NVIDIA-AI-IOT/yolo_deepstream + deepstream_yolov4.tgz - Google Drive
YoloV4+dspreprocess : deepstream_yolov4_with_nvdspreprocess.tgz - Google Drive
YoloV5 + nvinfer : GitHub - beyondli/Yolo_on_Jetson
Yolov5-small : Custom Yolov5 on Deepstream 6.0 (Thanks @raghavendra.ramya)
YoloV5+Triton : Triton Inference through docker - #7 by mchi
YoloV5_gpu_optimization: GitHub - NVIDIA-AI-IOT/yolov5_gpu_optimization: This repository provides YOLOV5 GPU optimization sample
YoloV7: GitHub - NVIDIA-AI-IOT/yolo_deepstream
YoloV7+Triton: Deepstream / Triton Server - YOLOV7(Thanks @Levi_Pereira )
YoloV7+nvinfer: Tutorial: How to run YOLOv7 on Deepstream(Thanks @vcmike )

19. [DSx_All_App] How to use classification model as pgie?
The input is a blue car picture, we want to get the blue label, here is the test command:
blueCar.zip (37.6 KB)
dstest_appsrc_config.txt (3.7 KB)

gst-launch-1.0 filesrc location=blueCar.jpg ! jpegdec ! videoconvert ! video/x-raw,format=I420 ! nvvideoconvert ! video/x-raw\(memory:NVMM\),format=NV12 ! mux.sink_0 nvstreammux name=mux batch-size=1 width=1280 height=720 ! nvinfer config-file-path=./dstest_appsrc_config.txt ! nvvideoconvert ! video/x-raw\(memory:NVMM\),format=RGBA ! nvdsosd ! nvvideoconvert ! video/x-raw,format=I420 ! jpegenc ! filesink location=out.jpg

[Access output of Primary Classifier]
[Resnet50 with imagenet dataset image classification using deepstream sdk]

20. How to trouble shoot error cuGraphicsGLRegisterBuffer failed with error(219) gst_eglglessink_cuda_init texture = 1

CUDA_ERROR_INVALID_GRAPHICS_CONTEXT = 219

This indicates an error with OpenGL or DirectX context.

Make sure you use nvidia X driver.
Please follow this to setup nvidia X server. Chapter 6. Configuring X for the NVIDIA Driver
These are some common problems you may meet associated with the driver. Chapter 8. Common Problems (nvidia.com)

https://forums.developer.nvidia.com/t/issue-runnung-deepstream-app-docker-container-5-0-6-0-in-rtx-3080-and-a5000-laptop/213783
cuGraphicsGLRegisterBuffer failed with error(219) gst_eglglessink_cuda_init texture = 1 - Intelligent Video Analytics / DeepStream SDK - NVIDIA Developer Forums

21.[Jetson] TRT version miss match between Deepstream 6.1 docker and device version can be fixed by APT update for Jetpack 5.0.1 DP

1 docker run --rm -it --runtime=nvidia REPOSITORY:TAG
2 remove previous TRT package
  apt-get purge --remove libnvinfer8 libnvinfer-plugin8  libnvinfer-bin python3-libnvinfer
3 apt-get update 
4 install TRT 8.4.0.11 package
  apt-get install libnvinfer8 libnvinfer-plugin8  libnvinfer-bin python3-libnvinfer 
5 Verify TRT version
  nm -D /usr/lib/aarch64-linux-gnu/libnvinfer.so.8.4.0 |grep version

related topic 218888

22. [Jetson] VIC Configuration failed image scale factor exceeds 16
this issue is a limitation of Jetson VIC processing and can be fixed by modifying configuration, for example:

# model's dimensions: height is 1168, width is 720.
uff-input-dims=3;1168;720;0  
#if scaling-compute-hw = VIC, input-object-min-height need to be even and greater than or equal to (model height)/16  
input-object-min-height=74
#if scaling-compute-hw = VIC, input-object-min-width need to be even and greater than or equal to( model width)/16  
input-object-min-width=46

related topic [VIC Configuration failed image scale factor exceeds 16, use GPU for Transformation - #3 by Amycao]

23 How to change python sample apps from display to output file or fakesink for the users who do not have monitor in their device, the patch is based on test1 sample.

Usage: python3 deepstream_test_1.py <media file or uri> <sink type: 1-filesink; 2-fakesink; 3-display sink>

nvidia@ubuntu:/opt/nvidia/deepstream/deepstream/sources/deepstream_python_apps/apps/deepstream-test1$ diff -Naur deepstream_test_1.py.orig deepstream_test_1.py
--- deepstream_test_1.py.orig	2022-08-15 20:12:39.809775283 +0800
+++ deepstream_test_1.py	2022-08-15 22:06:27.052250778 +0800
@@ -123,8 +123,8 @@
 
 def main(args):
     # Check input arguments
-    if len(args) != 2:
-        sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
+    if len(args) != 3:
+        sys.stderr.write("usage: %s <media file or uri> <sink type: 1-filesink; 2-fakesink; 3-display sink>\n" % args[0])
         sys.exit(1)
 
     # Standard GStreamer initialization
@@ -179,14 +179,46 @@
     if not nvosd:
         sys.stderr.write(" Unable to create nvosd \n")
 
-    # Finally render the osd output
-    if is_aarch64():
-        transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
-
-    print("Creating EGLSink \n")
-    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
-    if not sink:
-        sys.stderr.write(" Unable to create egl sink \n")
+    if args[2] == '1':
+
+        nvvidconv1 = Gst.ElementFactory.make ("nvvideoconvert", "nvvid-converter1")
+        if not nvvidconv1:
+            sys.stderr.write("Unable to create nvvidconv1")
+        capfilt = Gst.ElementFactory.make ("capsfilter", "nvvideo-caps")
+        if not capfilt:
+            sys.stderr.write("Unable to create capfilt")
+        caps = Gst.caps_from_string ('video/x-raw(memory:NVMM), format=I420')
+#        feature = gst_caps_features_new ("memory:NVMM", NULL)
+#        gst_caps_set_features (caps, 0, feature)
+        capfilt.set_property('caps', caps)
+        print("Creating nvv4l2h264enc \n")
+        nvh264enc = Gst.ElementFactory.make ("nvv4l2h264enc" ,"nvvideo-h264enc")
+        if not nvh264enc:
+            sys.stderr.write("Unable to create nvh264enc")
+        print("Creating filesink \n")    
+        sink = Gst.ElementFactory.make ("filesink", "nvvideo-renderer")
+        sink.set_property('location', './out.h264')
+        if not sink:
+            sys.stderr.write("Unable to create filesink")
+
+    elif args[2] == '2':
+
+        print("Creating fakesink \n")
+        sink = Gst.ElementFactory.make ("fakesink", "fake-renderer")
+        if not sink:
+            sys.stderr.write("Unable to create fakesink")
+
+    elif args[2] == '3':
+
+        print("Creating EGLSink \n")
+        sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
+        if not sink:
+            sys.stderr.write(" Unable to create egl sink \n")
+        if is_aarch64():
+            transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
+            if not transform:
+                sys.stderr.write(" Unable to create egl transform \n")
 
     print("Playing file %s " %args[1])
     source.set_property('location', args[1])
@@ -204,9 +236,17 @@
     pipeline.add(pgie)
     pipeline.add(nvvidconv)
     pipeline.add(nvosd)
-    pipeline.add(sink)
-    if is_aarch64():
-        pipeline.add(transform)
+    if args[2] == '1':
+        pipeline.add(nvvidconv1)
+        pipeline.add(capfilt)
+        pipeline.add(nvh264enc)
+        pipeline.add(sink)
+    elif args[2] == '2':
+        pipeline.add(sink)
+    elif args[2] == '3':
+        pipeline.add(sink)
+        if is_aarch64():
+            pipeline.add(transform)
 
     # we link the elements together
     # file-source -> h264-parser -> nvh264-decoder ->
@@ -225,11 +265,19 @@
     streammux.link(pgie)
     pgie.link(nvvidconv)
     nvvidconv.link(nvosd)
-    if is_aarch64():
-        nvosd.link(transform)
-        transform.link(sink)
-    else:
+    if args[2] == '1':
+        nvosd.link(nvvidconv1)
+        nvvidconv1.link(capfilt)
+        capfilt.link(nvh264enc)
+        nvh264enc.link(sink)
+    elif args[2] == '2':
         nvosd.link(sink)
+    elif args[2] == '3':
+        if is_aarch64():
+            nvosd.link(transform)
+            transform.link(sink)
+        else:
+            nvosd.link(sink)
 
     # create an event loop and feed gstreamer bus mesages to it

24. [DeepStream 6.1.1 GA] simple demo for adding dewarper support to deepstream-app

Usege: deepstream-app -c source1_dewarper_test.txt

source1_dewarper_test.txt (3.6 KB)

---
 .../src/deepstream_config_file_parser.c       |  15 ++-
 .../common/src/deepstream_source_bin.c        |   5 -
 .../common/src/deepstream_streammux.c         |   5 +-
 .../deepstream_app_config_parser.c            |   7 +-
 .../deepstream_app_config_parser_yaml.cpp     |   4 +

diff --git a/apps/deepstream/common/src/deepstream_config_file_parser.c b/apps/deepstream/common/src/deepstream_config_file_parser.c
--- a/apps/deepstream/common/src/deepstream_config_file_parser.c
+++ b/apps/deepstream/common/src/deepstream_config_file_parser.c
@@ -76,6 +76,8 @@ GST_DEBUG_CATEGORY (APP_CFG_PARSER_CAT);
 #define CONFIG_GROUP_STREAMMUX_FRAME_NUM_RESET_ON_STREAM_RESET "frame-num-reset-on-stream-reset"
 #define CONFIG_GROUP_STREAMMUX_FRAME_NUM_RESET_ON_EOS "frame-num-reset-on-eos"
 #define CONFIG_GROUP_STREAMMUX_FRAME_DURATION "frame-duration"
+#define CONFIG_GROUP_STREAMMUX_NUM_SURFACES_PER_FRAME "num-surfaces-per-frame"
+
 #define CONFIG_GROUP_STREAMMUX_CONFIG_FILE_PATH "config-file"
 #define CONFIG_GROUP_STREAMMUX_SYNC_INPUTS "sync-inputs"
 #define CONFIG_GROUP_STREAMMUX_MAX_LATENCY "max-latency"
@@ -742,6 +744,11 @@ parse_streammux (NvDsStreammuxConfig *config, GKeyFile *key_file, gchar *cfg_fil
           g_key_file_get_boolean(key_file, CONFIG_GROUP_STREAMMUX,
           CONFIG_GROUP_STREAMMUX_ASYNC_PROCESS, &error);
       CHECK_ERROR(error);
+    } else if (!g_strcmp0(*key, CONFIG_GROUP_STREAMMUX_NUM_SURFACES_PER_FRAME)) {
+        config->num_surface_per_frame =
+            g_key_file_get_integer(key_file, CONFIG_GROUP_STREAMMUX,
+            CONFIG_GROUP_STREAMMUX_NUM_SURFACES_PER_FRAME, &error);
+        CHECK_ERROR(error);
     } else {
       NVGSTDS_WARN_MSG_V ("Unknown key '%s' for group [%s]", *key,
           CONFIG_GROUP_STREAMMUX);
@@ -1070,8 +1077,12 @@ parse_dewarper (NvDsDewarperConfig * config, GKeyFile * key_file, gchar *cfg_fil
         g_key_file_get_integer (key_file, CONFIG_GROUP_DEWARPER,
             CONFIG_GROUP_DEWARPER_NUM_SURFACES_PER_FRAME, &error);
       CHECK_ERROR (error);
-    }
-    else {
+    } else if (!g_strcmp0 (*key, CONFIG_GROUP_DEWARPER_SOURCE_ID)) {
+      config->source_id =
+          g_key_file_get_integer (key_file, CONFIG_GROUP_DEWARPER,
+          CONFIG_GROUP_DEWARPER_SOURCE_ID, &error);
+      CHECK_ERROR (error);
+    } else {
       NVGSTDS_WARN_MSG_V ("Unknown key '%s' for group [%s]", *key,
           CONFIG_GROUP_DEWARPER);
     }
diff --git a/apps/deepstream/common/src/deepstream_source_bin.c b/apps/deepstream/common/src/deepstream_source_bin.c
--- a/apps/deepstream/common/src/deepstream_source_bin.c
+++ b/apps/deepstream/common/src/deepstream_source_bin.c
@@ -1527,11 +1527,6 @@ create_multi_source_bin (guint num_sub_bins, NvDsSourceConfig * configs,
       goto done;
     }
 
-    if(configs->dewarper_config.enable) {
-        g_object_set(G_OBJECT(bin->sub_bins[i].dewarper_bin.nvdewarper), "source-id",
-                configs[i].source_id, NULL);
-    }
-
     bin->num_bins++;
   }
   NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->streammux, "src");
diff --git a/apps/deepstream/common/src/deepstream_streammux.c b/apps/deepstream/common/src/deepstream_streammux.c
--- a/apps/deepstream/common/src/deepstream_streammux.c
+++ b/apps/deepstream/common/src/deepstream_streammux.c
@@ -92,7 +92,10 @@ set_streammux_properties (NvDsStreammuxConfig *config, GstElement *element)
                config->max_latency, NULL);
   g_object_set (G_OBJECT (element), "frame-num-reset-on-eos",
       config->frame_num_reset_on_eos, NULL);
-
+  if (config->num_surface_per_frame > 1) {
+      g_object_set (G_OBJECT (element), "num-surfaces-per-frame",
+          config->num_surface_per_frame, NULL);
+  }
   ret= TRUE;
 
   return ret;
diff --git a/apps/deepstream/sample_apps/deepstream-app/deepstream_app_config_parser.c b/apps/deepstream/sample_apps/deepstream-app/deepstream_app_config_parser.c
--- a/apps/deepstream/sample_apps/deepstream-app/deepstream_app_config_parser.c
+++ b/apps/deepstream/sample_apps/deepstream-app/deepstream_app_config_parser.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -373,6 +373,11 @@ parse_config_file (NvDsConfig *config, gchar *cfg_file_path)
       parse_err = !parse_osd (&config->osd_config, cfg_file);
     }
 
+    if (!g_strcmp0 (*group, CONFIG_GROUP_DEWARPER)) {
+      parse_err = !parse_dewarper (&config->multi_source_config[0].dewarper_config,
+          cfg_file, cfg_file_path);
+    }
+
     if (!g_strcmp0 (*group, CONFIG_GROUP_PREPROCESS)) {
         parse_err =
             !parse_preprocess (&config->preprocess_config, cfg_file,
diff --git a/apps/deepstream/sample_apps/deepstream-app/deepstream_app_config_parser_yaml.cpp b/apps/deepstream/sample_apps/deepstream-app/deepstream_app_config_parser_yaml.cpp
--- a/apps/deepstream/sample_apps/deepstream-app/deepstream_app_config_parser_yaml.cpp
+++ b/apps/deepstream/sample_apps/deepstream-app/deepstream_app_config_parser_yaml.cpp
@@ -129,6 +129,7 @@ parse_config_file_yaml (NvDsConfig *config, gchar *cfg_file_path)
   std::string sink_str = "sink";
   std::string sgie_str = "secondary-gie";
   std::string msgcons_str = "message-consumer";
+  std::string dewarper_str = "dewarper";
 
   config->source_list_enabled = FALSE;
 
@@ -183,6 +184,9 @@ parse_config_file_yaml (NvDsConfig *config, gchar *cfg_file_path)
     else if (paramKey == "osd") {
       parse_err = !parse_osd_yaml(&config->osd_config, cfg_file_path);
     }
+    else if (paramKey.compare(0, dewarper_str.size(), dewarper_str) == 0) {
+      parse_err = !parse_dewarper_yaml (&config->multi_source_config[0].dewarper_config, cfg_file_path);
+    }
     else if (paramKey == "pre-process") {
       parse_err = !parse_preprocess_yaml(&config->preprocess_config, cfg_file_path);
     }

25. [ALL_ALL_nvdsinfer] Add TensorRT Verbose log

To debug nvinfer related issue inside gst-nvinfer, we can enable nvinfer log with setting the enviroment variable “NVDSINFER_LOG_LEVEL”.

The value can be set to following numbers for different level log:

0: NVDSINFER_LOG_ERROR
1: NVDSINFER_LOG_WARNING
2: NVDSINFER_LOG_INFO
3: NVDSINFER_LOG_DEBUG

Example for enabling debug log:
export NVDSINFER_LOG_LEVEL=3

When the NVDSINFER_LOG_LEVEL environment variable is not set, the default log level is error log.