How to feed an Opencv frame into Nvidia Deepstream pipeline?

Hi everyone,
I am struggling to find a away to input a single cv::Mat frame into a Nvidia Deepstream Pipeline using c++. I tried the code below but I received the following Error message:

ERROR from element gstappsrc: Internal data stream error.

Error details: gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:dst_opencv/GstAppSrc:source: streaming stopped, reason not-negotiated (-4)

Returned, stopping playback

Deleting pipeline

If anyone have an idea how to do it or show me where I am doing wrong, I will be very thankful.

My Setup:
• Hardware Platform (GPU)
• DeepStream Version: 4.0.1
• TensorRT Version: 5.1.2.2
• NVIDIA GPU Driver Version: 440.100

#include <gst/gst.h>
#include <glib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "gstnvdsmeta.h"
#include "nvdsmeta_schema.h"
#include <gst/app/gstappsrc.h>

/* The muxer output resolution must be set if the input streams will be of
 * different resolution. The muxer will scale all the input frames to this
 * resolution. */
#define MUXER_OUTPUT_WIDTH 1920
#define MUXER_OUTPUT_HEIGHT 1080

#define TILED_OUTPUT_WIDTH 1920
#define TILED_OUTPUT_HEIGHT 1080

/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
 * based on the fastest source's framerate. */
#define MUXER_BATCH_TIMEOUT_USEC 4000000

/* NVIDIA Decoder source pad memory feature. This feature signifies that source
 * pads having this capability will push GstBuffers containing cuda buffers. */
#define GST_CAPS_FEATURES_NVMM "memory:NVMM"

// detection models
#define MODEL_CONFIG "dstest3_pgie_config.txt"
//#define MODEL_CONFIG "yoloV2_pgie_config.txt"
//#define MODEL_CONFIG "fd_lpd_config.txt"

#define FPS_PRINT_INTERVAL 300

static gboolean bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_WARNING:
    {
      gchar *debug;
      GError *error;
      gst_message_parse_warning (msg, &error, &debug);
      g_printerr ("WARNING from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      g_free (debug);
      g_printerr ("Warning: %s\n", error->message);
      g_error_free (error);
      break;
    }
    case GST_MESSAGE_ERROR:
    {
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      if (debug)
        g_printerr ("Error details: %s\n", debug);
      g_free (debug);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
    default:
      break;
  }
  return TRUE;
}
//-------------------------------------------------------
static void cb_newpad (GstElement * decodebin, GstPad * decoder_src_pad, gpointer data)
{
  g_print ("In cb_newpad\n");
  GstCaps *caps = gst_pad_get_current_caps (decoder_src_pad);
  const GstStructure *str = gst_caps_get_structure (caps, 0);
  const gchar *name = gst_structure_get_name (str);
  GstElement *source_bin = (GstElement *) data;
  GstCapsFeatures *features = gst_caps_get_features (caps, 0);

  /* Need to check if the pad created by the decodebin is for video and not
   * audio. */
  if (!strncmp (name, "video", 5)) {
    /* Link the decodebin pad only if decodebin has picked nvidia
     * decoder plugin nvdec_*. We do this by checking if the pad caps contain
     * NVMM memory features. */
    if (gst_caps_features_contains (features, GST_CAPS_FEATURES_NVMM)) {
      /* Get the source bin ghost pad */
      GstPad *bin_ghost_pad = gst_element_get_static_pad (source_bin, "src");
      if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),
              decoder_src_pad)) {
        g_printerr ("Failed to link decoder src pad to source bin ghost pad\n");
      }
      gst_object_unref (bin_ghost_pad);
    } else {
      g_printerr ("Error: Decodebin did not pick nvidia decoder plugin.\n");
    }
  }
}
//-------------------------------------------------------
static void decodebin_child_added (GstChildProxy * child_proxy, GObject * object,gchar * name, gpointer user_data)
{
  g_print ("Decodebin child added: %s\n", name);
  if (g_strrstr (name, "decodebin") == name) {
    g_signal_connect (G_OBJECT (object), "child-added",
        G_CALLBACK (decodebin_child_added), user_data);
  }
  if (g_strstr_len (name, -1, "nvv4l2decoder") == name) {
    g_print ("Seting bufapi_version\n");
    g_object_set (object, "bufapi-version", TRUE, NULL);
  }
}
//-------------------------------------------------------
void buffer_destroy(gpointer data) {cv::Mat* done = (cv::Mat*)data; delete done;}
//-----------------------------------------------------
static gboolean cb_need_data(GstElement* appsrc,guint unused_size,gpointer user_data)
{
  g_print("cb_need_data function \n");
  GstBuffer* buffer; 
  GstMapInfo map;
  guint size,depth,height,width,step,channels;
  GstFlowReturn ret;
  guchar *data1;

  g_print("userdata: %s \n",user_data);

  
  cv::Mat frame=cv::imread((const char*)user_data, CV_LOAD_IMAGE_COLOR);

  height    = frame.size().height;  
  width     = frame.size().width;
  channels  = frame.channels();
  data1      = (guchar *)frame.data;
  gsize sizeInBytes = height*width*channels;
 

  g_print("frame_height: %d \n",height);
  g_print("frame_width: %d \n",width);
  g_print("frame_channels: %d \n",channels);
  g_print("frame_size: %d \n",sizeInBytes);
  
  buffer=gst_buffer_new_allocate(NULL,sizeInBytes,NULL);
  gst_buffer_map(buffer,&map,GST_MAP_WRITE);
  memcpy( (guchar *)map.data, data1,  gst_buffer_get_size( buffer ) );

  g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);
  
  if (ret != GST_FLOW_OK) {g_print("cv 2 gst got an error"); return false;}
  gst_buffer_unref(buffer);
  //gst_buffer_unmap (buffer, &map); 
  g_print("cv converted to gst \n ");
  return true;
}
//-------------------------------------------------------
static GstPadProbeReturn tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,gpointer u_data)
{
    char *msg;
    g_object_get(G_OBJECT(u_data),"last-message",&msg,NULL);
    if (msg!=NULL) {g_print("FPS =%s \n",msg);}
    return GST_PAD_PROBE_OK;
}
//-------------------------------------------------------
//------------------MAIN---------------------------------
//-------------------------------------------------------
	int main(int argc,char** argv)
{
    GMainLoop *loop;
    GstElement *pipeline,*sink,*tiler,*nvvidconv,*nvosd,*nvsink,*pgie; //,*streammux
    GstElement* appsrc,*conv;
    GstBus *bus;
    guint bus_watch_id;
    GstPad *tiler_src_pad;
    guint num_sources;
    guint tiler_rows,tiler_columns;
    guint pgie_batch_size;
    GstCaps *caps;

    //check input args
    if(argc <2) {g_printerr("Usage: %s <uri1> [uri2] ... [uriN] \n", argv[0]); return -1;}
    num_sources=argc-1;
    
    //start gstreamer
    gst_init(&argc,&argv);
    loop=g_main_loop_new(NULL,FALSE);

    //Creating pipeline
    pipeline=gst_pipeline_new("dst_opencv");
    //streammux=gst_element_factory_make("nvstreammux","nvstream-muxer");

    if(!pipeline){g_printerr("pipeline could not be created");}
    //if(!streammux){g_printerr("Streammux could not be created");}
    //gst_bin_add(GST_BIN(pipeline),streammux);

    // Creating bin with all sources

    appsrc=gst_element_factory_make("appsrc","source");
    conv=gst_element_factory_make("videoconvert","conv");

    g_object_set (G_OBJECT (appsrc), "caps",
        gst_caps_new_simple ("video/x-raw",
                     "format", G_TYPE_STRING, "I420",
                     "width", G_TYPE_INT, 1200,
                     "height", G_TYPE_INT, 600,
                     "pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1,
                     NULL), NULL);
    g_object_set(G_OBJECT(appsrc),"stream-type",0,"format",GST_FORMAT_TIME,NULL);
 
     /* Use nvinfer to infer on batched frame. */
    pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");
    
    /* Use nvtiler to composite the batched frames into a 2D tiled array based
   * on the source of the frames. */
    tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");

    nvvidconv=gst_element_factory_make ("nvvideoconvert","nvvideo-converter");

    /* Use convertor to convert from NV12 to RGBA as required by nvosd */
   // nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");
    nvosd=gst_element_factory_make("nvdsosd","nv-onscreendisplay");

    nvsink=gst_element_factory_make ("nveglglessink", "nvvideo-renderer"); //show on display
    //nvsink=gst_element_factory_make("fakesink","nvvideo-render"); //Dont show frames on screen
    sink=gst_element_factory_make("fpsdisplaysink","fps_display");
    //sink=gst_element_factory_make("autovideosink","videosink");

    //check if all plugin were created
    if(!appsrc){g_printerr("appsrc could not be created"); return -1;}
    if(!conv){g_printerr("conv could not be created"); return -1;}
    if(!tiler){g_printerr("tiler could not be created"); return -1;}
    if(!sink){g_printerr("sink could not be created"); return -1;}
    if(!nvvidconv){g_printerr("nvvidconv could not be created"); return -1;}
    if(!pgie){g_printerr("pgie could not be created"); return -1;}
    if(!nvosd){g_printerr("nvosd could not be created"); return -1;}

    //set streammux
     

      /* Configure the nvinfer element using the nvinfer config file. */
     g_object_set (G_OBJECT (pgie),"config-file-path", MODEL_CONFIG, NULL);

    /* Override the batch-size set in the config file with the number of sources. */
    g_object_get (G_OBJECT (pgie), "batch-size", &pgie_batch_size, NULL);
    if (pgie_batch_size != num_sources) {
        g_printerr("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",pgie_batch_size, num_sources);
        g_object_set (G_OBJECT (pgie), "batch-size", num_sources, NULL);}

    //g_print("Flag \n"); 

    //set tiler 
    tiler_rows = (guint) sqrt (num_sources);
    tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);
    /* we set the tiler properties here */
    g_object_set (G_OBJECT (tiler), "rows", tiler_rows, "columns", tiler_columns,
      "width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);

    /* we add a message handler */
    bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
    bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
    gst_object_unref (bus);

   //set fps sink
    g_object_set (G_OBJECT (sink), "text-overlay", FALSE, "video-sink", nvsink, "sync", FALSE, NULL);
    
    //linking all elements
    gst_bin_add_many(GST_BIN(pipeline),appsrc,conv,pgie,tiler,nvvidconv,nvosd,sink,NULL);
    if (!gst_element_link_many(appsrc,conv,pgie,tiler,nvvidconv,nvosd,sink,NULL)){g_printerr("Elements could not be linked"); return -1;}
      
    tiler_src_pad = gst_element_get_static_pad (pgie, "src");
    if (!tiler_src_pad) {g_print ("Unable to get src pad\n");}
    else{gst_pad_add_probe (tiler_src_pad, GST_PAD_PROBE_TYPE_BUFFER,tiler_src_pad_buffer_probe, (gpointer)sink, NULL);}
  
    g_signal_connect (appsrc, "need-data", G_CALLBACK (cb_need_data),(gpointer)argv[1]);

    /* Set the pipeline to "playing" state */
    g_print ("Now playing:");
    for (int i = 0; i < num_sources; i++) {g_print (" %s,", argv[i + 1]);}
    g_print ("\n");
    gst_element_set_state (pipeline, GST_STATE_PLAYING);

    /* Wait till pipeline encounters an error or EOS */
    g_print ("Running...\n");
    g_main_loop_run (loop);

    /* Out of the main loop, clean up nicely */
    g_print ("Returned, stopping playback\n");
    gst_element_set_state (pipeline, GST_STATE_NULL);
    g_print ("Deleting pipeline\n");
    gst_object_unref (GST_OBJECT (pipeline));
    g_source_remove (bus_watch_id);
    g_main_loop_unref (loop);
    return 0;
}
2 Likes

Hi @fischerabati
Please move to DS 5.0. With DS5.0, you can use appsrc to feed data into gstreamer with appsrc and pass the data to nvvideoconvert to convert the data to nvmm buffer so that it can be accepted by nvstreammux, you can wrap this in the application.

Thanks!

And, you can use cvtColor function to convert BGR data to YUV data inside cb_need_data() since nvstreammux can only receive YUV data.

Thanks!

Hello I am trying to make it, but i do not really know why is not working.
This is my code :

#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
#include <opencv2/core/core.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

static GMainLoop *loop;

static void
cb_need_data (GstElement *appsrc,
          guint       unused_size,
          gpointer    user_data)
{
  static gboolean white = FALSE;
  static GstClockTime timestamp = 0;
  guint size,depth,height,width,step,channels;
  GstFlowReturn ret ;
  IplImage* img;
  guchar *data1;
  GstMapInfo map;

  cv::Mat imgMat = imread("cat.jpg",cv::IMREAD_COLOR);
  cvtColor(imgMat,imgMat,cv::COLOR_BGR2YUV);
  IplImage imgIpl = imgMat;
  img = &imgIpl;


  height    = img->height;  
  width     = img->width;
  step      = img->widthStep;
  channels  = img->nChannels;
  depth     = img->depth;
  data1      = (guchar *)img->imageData;
  size = height*width*channels;

  GstBuffer *buffer = NULL;//gst_buffer_new_allocate (NULL, size, NULL);

  g_print("frame_height: %d \n",img->height);
  g_print("frame_width: %d \n",img->width);
  g_print("frame_channels: %d \n",img->nChannels);
  g_print("frame_size: %d \n",height*width*channels);
  

  buffer = gst_buffer_new_allocate (NULL, size, NULL);
  gst_buffer_map (buffer, &map, GST_MAP_WRITE);
  memcpy( (guchar *)map.data, data1,  gst_buffer_get_size( buffer ) );
  /* this makes the image black/white */
  //gst_buffer_memset (buffer, 0, white ? 0xff : 0x0, size);

  white = !white;

  GST_BUFFER_PTS (buffer) = timestamp;
  GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale_int (1, GST_SECOND, 1);

  timestamp += GST_BUFFER_DURATION (buffer);
  //gst_app_src_push_buffer ((GstAppSrc *)appsrc, buffer);

  g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);

  if (ret != GST_FLOW_OK) {
    g_print("quit");
    /* something wrong, stop pushing */
    g_main_loop_quit (loop);
  }
  //g_print("return");
}

gint
main (gint   argc,
      gchar *argv[])
{
  GstElement *pipeline, *appsrc, *conv, *videosink, *sink,*nvosd,*streammux;

  /* init GStreamer */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  /* setup pipeline */
  pipeline = gst_pipeline_new ("pipeline");
  appsrc = gst_element_factory_make ("appsrc", "source");
  conv = gst_element_factory_make ("videoconvert", "conv");
  streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");
  sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
  //videosink = gst_element_factory_make("appsink","app-sink");

  /* setup */
  g_object_set (G_OBJECT (appsrc), "caps",
        gst_caps_new_simple ("video/x-raw",
                     "format", G_TYPE_STRING, "RGB",
                     "width", G_TYPE_INT, 640,
                     "height", G_TYPE_INT, 360,
                     "framerate", GST_TYPE_FRACTION, 1, 1,
                     NULL), NULL);

  gst_bin_add_many (GST_BIN (pipeline), appsrc, conv,streammux,sink,NULL);
  gst_element_link_many (appsrc,conv,streammux,sink ,NULL);
  //g_object_set (videosink, "device", "/dev/video0", NULL);

  /* setup appsrc */
  g_object_set (G_OBJECT (appsrc),
        "stream-type", 0,
        "format", GST_FORMAT_TIME, NULL);
  g_signal_connect (appsrc, "need-data", G_CALLBACK (cb_need_data), NULL);

  /* play */
  gst_element_set_state (pipeline, GST_STATE_PLAYING);
  g_main_loop_run (loop);

  /* clean up */
  gst_element_set_state (pipeline, GST_STATE_NULL);
  gst_object_unref (GST_OBJECT (pipeline));
  g_main_loop_unref (loop);

  return 0;
  }

I would like to push a cv Mat inside DeepStream pipeline to make inference with yolov3 model.
I am an absolutely beginner, if you can show some code is going to be much better.

Thanks.

Hi g.gullo,

Please open a new topic for your issue. Thanks