Deepstream sample code snippet

This topic is mainly for share the sample code snippet for Deepstream, a sample code snippet is always the best answer.
The format as follow to share your code snippet:
1.Simple description for your code.
2.Which version are you based on
3.Which platform have you verified for your code

  1. Simple description for your code
    a. Creating nv12 opencv mat from NvMM memory,
    b. Work on opencv nv12 mat, do conversion to other opencv RGBA mat, rgba mat.
    c. Create another scratch RGBA NvBufSurface and do opencv conversion to rotate RGBA in rotate mat.
    d. Transform rotated RGBA mat to NV12 memory in original input surface
    e. So now NvMM NV12 memory from original buffer is also rotated.
  2. Which version are you based on
    a. DS 5.0 DP
  3. Which platform have you verified for your code
    a. T4
--- a/apps/deepstream/sample_apps/deepstream-test1/deepstream_test1_app.c
+++ b/apps/deepstream/sample_apps/deepstream-test1/deepstream_test1_app.c
@@ -24,6 +24,14 @@
 #include <glib.h>
 #include <stdio.h>
 #include "gstnvdsmeta.h"
+#include <cuda.h>
+#include <cuda_runtime.h>
+#include "nvbufsurface.h"
+#include "nvbufsurftransform.h"
+#include <iostream>
+/* Open CV headers */
+#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/highgui/highgui.hpp"
 
 #define MAX_DISPLAY_LEN 64
 
@@ -40,11 +48,133 @@
  * based on the fastest source's framerate. */
 #define MUXER_BATCH_TIMEOUT_USEC 40000
 
+#define CHECK_CUDA_STATUS(cuda_status,error_str) do { \
+  if ((cuda_status) != cudaSuccess) { \
+    g_print ("Error: %s in %s at line %d (%s)\n", \
+        error_str, __FILE__, __LINE__, cudaGetErrorName(cuda_status)); \
+  } \
+} while (0)
+
+using namespace cv;
+using namespace std;
+
 gint frame_number = 0;
 gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",
   "Roadsign"
 };
+/* nvinfer_sink_pad_buffer_probe  will modify the raw frame data using opencv. */
 
+static GstPadProbeReturn
+infer_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
+    gpointer u_data)
+{
+  GstBuffer *buf = (GstBuffer *) info->data;
+  NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
+  NvDsMetaList * l_frame = NULL;
+  char file_name[128];
+
+  // Get original raw data
+  GstMapInfo in_map_info;
+  if (!gst_buffer_map (buf, &in_map_info, GST_MAP_READ)) {
+      g_print ("Error: Failed to map gst buffer\n");
+      gst_buffer_unmap (buf, &in_map_info);
+      return GST_PAD_PROBE_OK;
+  }
+  NvBufSurface *surface = (NvBufSurface *)in_map_info.data;
+  for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
+    l_frame = l_frame->next) {
+      NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
+      //TODO for cuda device memory we need to use cudamemcpy
+      NvBufSurfaceMap (surface, -1, -1, NVBUF_MAP_READ);
+      /* Cache the mapped data for CPU access */
+      NvBufSurfaceSyncForCpu (surface, 0, 0); //will do nothing for unified memory type on dGPU
+      guint height = surface->surfaceList[frame_meta->batch_id].height;
+      guint width = surface->surfaceList[frame_meta->batch_id].width;
+
+      //Create Mat from NvMM memory, refer opencv API for how to create a Mat
+      Mat nv12_mat = Mat(height*3/2, width, CV_8UC1, surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
+      surface->surfaceList[frame_meta->batch_id].pitch);
+      //Convert nv12 to RGBA to apply algo based on RGBA
+      Mat rgba_mat;
+      cv::cvtColor(nv12_mat, rgba_mat, CV_YUV2BGRA_NV12);
+      //only rotate the first 10 frames
+      if(frame_number < 10){
+        NvBufSurface *inter_buf = nullptr;
+        NvBufSurfaceCreateParams create_params;
+        create_params.gpuId  = surface->gpuId;
+        create_params.width  = width;
+        create_params.height = height;
+        create_params.size = 0;
+        create_params.colorFormat = NVBUF_COLOR_FORMAT_RGBA;
+        create_params.layout = NVBUF_LAYOUT_PITCH;
+      #ifdef __aarch64__
+        create_params.memType = NVBUF_MEM_DEFAULT;
+      #else
+        create_params.memType = NVBUF_MEM_CUDA_UNIFIED;
+      #endif
+        //Create another scratch RGBA NvBufSurface
+        if (NvBufSurfaceCreate (&inter_buf, 1,
+          &create_params) != 0) {
+          GST_ERROR ("Error: Could not allocate internal buffer ");
+          return GST_PAD_PROBE_OK;
+        }
+        if(NvBufSurfaceMap (inter_buf, 0, -1, NVBUF_MAP_READ_WRITE) != 0)
+          cout << "map error" << endl;
+        NvBufSurfaceSyncForCpu (inter_buf, 0, 0);
+        Mat rotate_mat = Mat(height, width, CV_8UC4, inter_buf->surfaceList[0].mappedAddr.addr[0],
+          inter_buf->surfaceList[0].pitch);
+        // Aplly your algo which works with opencv Mat, here we only rotate the Mat for demo
+        rotate(rgba_mat, rotate_mat, ROTATE_180);
+        NvBufSurfaceSyncForDevice(inter_buf, 0, 0);
+        inter_buf->numFilled = 1;
+        NvBufSurfTransformConfigParams transform_config_params;
+        NvBufSurfTransformParams transform_params;
+        NvBufSurfTransformRect src_rect;
+        NvBufSurfTransformRect dst_rect;
+        cudaStream_t cuda_stream;
+        CHECK_CUDA_STATUS (cudaStreamCreate (&cuda_stream),
+          "Could not create cuda stream");
+        transform_config_params.compute_mode = NvBufSurfTransformCompute_Default;
+        transform_config_params.gpu_id = surface->gpuId;
+        transform_config_params.cuda_stream = cuda_stream;
+        /* Set the transform session parameters for the conversions executed in this
+          * thread. */
+        NvBufSurfTransform_Error err = NvBufSurfTransformSetSessionParams (&transform_config_params);
+        if (err != NvBufSurfTransformError_Success) {
+          cout <<"NvBufSurfTransformSetSessionParams failed with error "<< err << endl;
+          return GST_PAD_PROBE_OK;
+        }
+        /* Set the transform ROIs for source and destination, only do the color format conversion*/
+        src_rect = {0, 0, width, height};
+        dst_rect = {0, 0, width, height};
+
+        /* Set the transform parameters */
+        transform_params.src_rect = &src_rect;
+        transform_params.dst_rect = &dst_rect;
+        transform_params.transform_flag =
+          NVBUFSURF_TRANSFORM_FILTER | NVBUFSURF_TRANSFORM_CROP_SRC |
+            NVBUFSURF_TRANSFORM_CROP_DST;
+        transform_params.transform_filter = NvBufSurfTransformInter_Default;
+
+        /* Transformation format conversion, Transform rotated RGBA mat to NV12 memory in original input surface*/
+        err = NvBufSurfTransform (inter_buf, surface, &transform_params);
+        if (err != NvBufSurfTransformError_Success) {
+          cout<< "NvBufSurfTransform failed with error %d while converting buffer" << err <<endl;
+          return GST_PAD_PROBE_OK;
+        }
+
+        // access the surface modified by opencv
+        cv::cvtColor(nv12_mat, rgba_mat, CV_YUV2BGRA_NV12);
+        //dump the original NvbufSurface
+        sprintf(file_name, "nvinfer_probe_rotate_stream%2d_%03d.jpg", frame_meta->source_id, frame_number);
+        imwrite(file_name, rgba_mat);
+        NvBufSurfaceUnMap(inter_buf, 0, 0);
+      }
+
+      NvBufSurfaceUnMap(surface, 0, 0);
+  }
+  return GST_PAD_PROBE_OK;
+}
 /* osd_sink_pad_buffer_probe  will extract metadata received on OSD sink pad
  * and update params for drawing rectangle, object information etc. */
 
@@ -82,7 +212,7 @@ osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
         display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
         NvOSD_TextParams *txt_params  = &display_meta->text_params[0];
         display_meta->num_labels = 1;
-        txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);
+        txt_params->display_text = (char *)g_malloc0 (MAX_DISPLAY_LEN);
         offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ", person_count);
         offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);
 
@@ -91,7 +221,7 @@ osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
         txt_params->y_offset = 12;
 
         /* Font , font-color and font-size */
-        txt_params->font_params.font_name = "Serif";
+        txt_params->font_params.font_name = (char *)"Serif";
         txt_params->font_params.font_size = 10;
         txt_params->font_params.font_color.red = 1.0;
         txt_params->font_params.font_color.green = 1.0;
@@ -108,9 +238,9 @@ osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
         nvds_add_display_meta_to_frame(frame_meta, display_meta);
     }
 
-    g_print ("Frame Number = %d Number of objects = %d "
-            "Vehicle Count = %d Person Count = %d\n",
-            frame_number, num_rects, vehicle_count, person_count);
+    // g_print ("Frame Number = %d Number of objects = %d "
+    //         "Vehicle Count = %d Person Count = %d\n",
+    //         frame_number, num_rects, vehicle_count, person_count);
     frame_number++;
     return GST_PAD_PROBE_OK;
 }
@@ -156,6 +286,7 @@ main (int argc, char *argv[])
   GstBus *bus = NULL;
   guint bus_watch_id;
   GstPad *osd_sink_pad = NULL;
+  GstPad *infer_sink_pad = NULL;
 
   /* Check input arguments */
   if (argc != 2) {
@@ -203,7 +334,7 @@ main (int argc, char *argv[])
 #ifdef PLATFORM_TEGRA
   transform = gst_element_factory_make ("nvegltransform", "nvegl-transform");
 #endif
-  sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
+  sink = gst_element_factory_make ("fakesink", "nvvideo-renderer");
 
   if (!source || !h264parser || !decoder || !pgie
       || !nvvidconv || !nvosd || !sink) {
@@ -295,6 +426,13 @@ main (int argc, char *argv[])
     return -1;
   }
 #endif
+  infer_sink_pad = gst_element_get_static_pad (pgie, "sink");
+  if (!infer_sink_pad)
+    g_print ("Unable to get sink pad\n");
+  else
+    gst_pad_add_probe (infer_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
+        infer_sink_pad_buffer_probe, NULL, NULL);
+  gst_object_unref (infer_sink_pad);
 
   /* Lets add probe to get informed of the meta data generated, we add probe to
    * the sink pad of the osd element, since by that time, the buffer would have

2 Likes

@bcao
How can I transform the rotated image(In my case it is not rotation but facial alignment) to GPU memory in the case that the rotated image has a different shape to the original image? For example 343x532 → 112x112

Hi,
I want to access image array from gstreamer buffer and the feed the custom model, and I see this example,
But one big problem of this example is that copying frame from GPU buffer to CPU buffer, But I don’t want to copy to CPU buffer, because I have to then convert to Tensor on CUDA and then feed to model.
I want to have image array in GPU buffer without copy to CPU buffer.

2 Likes

Hi LoveNvidia,

Please help to open a new topic if it’s still an issue to support.

Thanks

@LoveNvidia

FWIW I am the same. Not really interested in a cv::Mat but a GpuMat would be great. Ideally directly from libArgus without GStreamer in the middle.

Basically Jetson Utils GstCamera but with no Gst. I would imagine just a simple camera class yielding GpuMat would be very popular. If you post a new topic, @me please.

1 Like

Hi sergey.maltsev,

Please help to open a new topic if it’s still an issue to support.

Thanks