Program terminated with signal SIGSEGV when put nvinfer between nvstreammux and nvstreamdemux

[Thread debugging using libthread_db enabled]
Using host libthread_db library “/lib/x86_64-linux-gnu/libthread_db.so.1”.

warning: the debug information found in “/lib64/ld-2.23.so” does not match “/lib64/ld-linux-x86-64.so.2” (CRC mismatch).

Core was generated by `./deepstream-test1-app sample_720p.h264 sample_720p2.h264’.
Program terminated with signal SIGSEGV, Segmentation fault.
#0 0x00007fa685e588e4 in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
[Current thread is 1 (Thread 0x7fa628ffd700 (LWP 15077))]
(gdb) bt
#0 0x00007fa685e588e4 in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#1 0x00007fa685e598b9 in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#2 0x00007fa685d76e69 in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#3 0x00007fa685c989a5 in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#4 0x00007fa685c98a4e in ?? () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#5 0x00007fa685ddf3ca in cuMemsetD8_v2 () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#6 0x00007fa686b447a2 in ?? () from /usr/local/cuda-9.2/lib64/libcudart.so.9.2
#7 0x00007fa686b25142 in ?? () from /usr/local/cuda-9.2/lib64/libcudart.so.9.2
#8 0x00007fa686b4a8d5 in cudaMemset () from /usr/local/cuda-9.2/lib64/libcudart.so.9.2
#9 0x00007fa6849403ee in gst_nvstream_allocator_alloc () from /usr/lib/x86_64-linux-gnu/gstreamer-1.0/libgstnvmultistream.so
#10 0x00007fa689fc16a2 in gst_buffer_new_allocate (allocator=0x307e22b0, size=8, params=) at gstbuffer.c:767
#11 0x00007fa689fc4b68 in default_alloc_buffer (pool=, buffer=0x7fa628ffc960, params=)
at gstbufferpool.c:228
#12 0x00007fa689fc4c16 in do_alloc_buffer (pool=pool@entry=0x6bdfbd0, buffer=buffer@entry=0x7fa628ffc960, params=params@entry=0x0)
at gstbufferpool.c:268
#13 0x00007fa689fc50fc in default_start (pool=0x6bdfbd0) at gstbufferpool.c:320
#14 0x00007fa689fc54b1 in do_start (pool=0x6bdfbd0) at gstbufferpool.c:353
#15 gst_buffer_pool_set_active (pool=0x6bdfbd0, active=) at gstbufferpool.c:487
#16 0x00007fa68494504d in gst_nvstreammux_alloc_output_buffers () from /usr/lib/x86_64-linux-gnu/gstreamer-1.0/libgstnvmultistream.so
#17 0x00007fa684945d1e in gst_nvstreammux_sink_event () from /usr/lib/x86_64-linux-gnu/gstreamer-1.0/libgstnvmultistream.so
#18 0x00007fa689ff4367 in gst_pad_send_event_unchecked (pad=pad@entry=0x132b480, event=event@entry=0x7fa6040040e0,
type=, type@entry=GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM) at gstpad.c:5587
#19 0x00007fa689ff479e in gst_pad_push_event_unchecked (pad=pad@entry=0xf8fd80, event=0x7fa6040040e0,
type=type@entry=GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM) at gstpad.c:5245
#20 0x00007fa689ff4be0 in push_sticky (pad=pad@entry=0xf8fd80, ev=ev@entry=0x7fa628ffcd70, user_data=user_data@entry=0x7fa628ffcde0)
at gstpad.c:3790
#21 0x00007fa689ff288f in events_foreach (pad=pad@entry=0xf8fd80, func=func@entry=0x7fa689ff4a90 <push_sticky>,
user_data=user_data@entry=0x7fa628ffcde0) at gstpad.c:603
#22 0x00007fa689ffe924 in check_sticky (event=0x7fa6040040e0, pad=0xf8fd80) at gstpad.c:3847
#23 gst_pad_push_event (pad=0xf8fd80, event=event@entry=0x7fa6040040e0) at gstpad.c:5376
#24 0x00007fa68847dd13 in gst_queue_push_one (queue=0xfbc4a0) at gstqueue.c:1432
#25 gst_queue_loop (pad=) at gstqueue.c:1509
#26 0x00007fa68a027f31 in gst_task_func (task=0x6bdf3b0) at gsttask.c:332
#27 0x00007fa689a9454e in ?? () from /lib/x86_64-linux-gnu/libglib-2.0.so.0
#28 0x00007fa689a93bb5 in ?? () from /lib/x86_64-linux-gnu/libglib-2.0.so.0
#29 0x00007fa688b2a6ba in start_thread (arg=0x7fa628ffd700) at pthread_create.c:333
#30 0x00007fa68976041d in clone () at …/sysdeps/unix/sysv/linux/x86_64/clone.S:109
(gdb)

pipeline is

gst-launch-1.0 nvstreammux name=mux batch-size=2 ! nvinfer config-file-path=./dstest1_pgie_config.txt ! nvstreamdemux name=demux
filesrc location=./sample_720p.h264 ! h264parse ! nvdec_h264 ! queue ! mux.sink_0
filesrc location=./sample_720p2.h264 ! h264parse ! nvdec_h264 ! queue ! mux.sink_1
demux.src_0 ! “video/x-raw(memory:NVMM), format=NV12” ! queue ! nvvidconv ! “video/x-raw(memory:NVMM), format=RGBA” ! nvosd font-size=15 ! nvvidconv ! “video/x-raw, format=RGBA” ! videoconvert ! “video/x-raw, format=NV12” ! x264enc ! qtmux ! filesink location=./out3.mp4
demux.src_1 ! “video/x-raw(memory:NVMM), format=NV12” ! queue ! nvvidconv ! “video/x-raw(memory:NVMM), format=RGBA” ! nvosd font-size=15 ! nvvidconv ! “video/x-raw, format=RGBA” ! videoconvert ! “video/x-raw, format=NV12” ! x264enc ! qtmux ! filesink location=./out4.mp4

gst-launch work fine , but got core when use c code

/*
 * Copyright (c) 2018 NVIDIA Corporation.  All rights reserved.
 *
 * NVIDIA Corporation and its licensors retain all intellectual property
 * and proprietary rights in and to this software, related documentation
 * and any modifications thereto.  Any use, reproduction, disclosure or
 * distribution of this software and related documentation without an express
 * license agreement from NVIDIA Corporation is strictly prohibited.
 *
 */



#include <gst/gst.h>
#include <glib.h>

#include "gstnvdsmeta.h"

#define MAX_DISPLAY_LEN 64

#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2

gint frame_number = 0;
gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",
  "Roadsign"
};

/* osd_sink_pad_buffer_probe  will extract metadata received on OSD sink pad
 * and update params for drawing rectangle, object information etc. */

static GstPadProbeReturn
osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
    gpointer u_data)
{

  GstMeta *gst_meta = NULL;
  NvDsMeta *nvdsmeta = NULL;
  gpointer state = NULL;
  static GQuark _nvdsmeta_quark = 0;
  GstBuffer *buf = (GstBuffer *) info->data;
  NvDsFrameMeta *frame_meta = NULL;
  guint num_rects = 0, rect_index = 0, l_index = 0;
  NvDsObjectParams *obj_meta = NULL;
  guint i = 0;
  NvOSD_TextParams *txt_params = NULL;
  guint vehicle_count = 0;
  guint person_count = 0;

  if (!_nvdsmeta_quark)
    _nvdsmeta_quark = g_quark_from_static_string (NVDS_META_STRING);

  while ((gst_meta = gst_buffer_iterate_meta (buf, &state))) {
    if (gst_meta_api_type_has_tag (gst_meta->info->api, _nvdsmeta_quark)) {

      nvdsmeta = (NvDsMeta *) gst_meta;

      /* We are interested only in intercepting Meta of type
       * "NVDS_META_FRAME_INFO" as they are from our infer elements. */
      if (nvdsmeta->meta_type == NVDS_META_FRAME_INFO) {
        frame_meta = (NvDsFrameMeta *) nvdsmeta->meta_data;
        if (frame_meta == NULL) {
          g_print ("NvDS Meta contained NULL meta \n");
          return GST_PAD_PROBE_OK;
        }

        /* We reset the num_strings here as we plan to iterate through the
         *  the detected objects and form our own strings.
         *  The pipeline generated strings shall be discarded.
         */
        frame_meta->num_strings = 0;

        num_rects = frame_meta->num_rects;

        /* This means we have num_rects in frame_meta->obj_params,
         * now lets iterate through them */

        for (rect_index = 0; rect_index < num_rects; rect_index++) {
          /* Now using above information we need to form a text that should
           * be displayed on top of the bounding box, so lets form it here. */

          obj_meta = (NvDsObjectParams *) & frame_meta->obj_params[rect_index];

          txt_params = &(obj_meta->text_params);
          if (txt_params->display_text)
            g_free (txt_params->display_text);

          txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);

          g_snprintf (txt_params->display_text, MAX_DISPLAY_LEN, "%s ",
              pgie_classes_str[obj_meta->class_id]);

          if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE)
            vehicle_count++;
          if (obj_meta->class_id == PGIE_CLASS_ID_PERSON)
            person_count++;

          /* Now set the offsets where the string should appear */
          txt_params->x_offset = obj_meta->rect_params.left;
          txt_params->y_offset = obj_meta->rect_params.top - 25;

          /* Font , font-color and font-size */
          txt_params->font_params.font_name = "Arial";
          txt_params->font_params.font_size = 10;
          txt_params->font_params.font_color.red = 1.0;
          txt_params->font_params.font_color.green = 1.0;
          txt_params->font_params.font_color.blue = 1.0;
          txt_params->font_params.font_color.alpha = 1.0;

          /* Text background color */
          txt_params->set_bg_clr = 1;
          txt_params->text_bg_clr.red = 0.0;
          txt_params->text_bg_clr.green = 0.0;
          txt_params->text_bg_clr.blue = 0.0;
          txt_params->text_bg_clr.alpha = 1.0;

          frame_meta->num_strings++;
        }
      }
    }
  }
  g_print ("Frame Number = %d Number of objects = %d "
      "Vehicle Count = %d Person Count = %d\n",
      frame_number, num_rects, vehicle_count, person_count);
  frame_number++;

  return GST_PAD_PROBE_OK;
}

static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_ERROR:{
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      g_free (debug);
      g_printerr ("Error: %s\n", error->message);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
    default:
      break;
  }
  return TRUE;
}

int
main (int argc, char *argv[])
{
  GMainLoop *loop = NULL;
  GstElement *pipeline = NULL, *source = NULL, *source2 = NULL, *h264parser = NULL, *h264parser2 = NULL, *nvstreamdemux = NULL, *nvstreammux = NULL, *queue = NULL, *queue2 = NULL, *queue3 = NULL, *queue4 = NULL, *videoconvert = NULL, *videoconvert2 = NULL, *x264enc = NULL, *x264enc2 = NULL, *qtmux = NULL, *qtmux2 = NULL,
      *decoder = NULL, *decoder2 = NULL, *sink = NULL, *sink2 = NULL, *pgie = NULL, *nvvidconv = NULL, *nvvidconv2 = NULL, *nvvidconv3 = NULL, *nvvidconv4 = NULL, *nvosd = NULL, *nvosd2 = NULL, *filter1 = NULL, *filter2 = NULL , *filter3 = NULL, *filter4 = NULL, *filter5 = NULL, *filter6 = NULL, *filter7 = NULL, *filter8 = NULL;
  GstBus *bus = NULL;
  guint bus_watch_id;
  GstCaps *caps1 = NULL, *caps2 = NULL;
  GstCaps *caps3 = NULL, *caps4 = NULL;
  GstCaps *caps5 = NULL, *caps6 = NULL;
  GstCaps *caps7 = NULL, *caps8 = NULL;
  gulong osd_probe_id = 0;
  GstPad *osd_sink_pad = NULL;
  
  GstPad *mux_1_pad, *mux_2_pad;
  GstPad *queuemux_1_pad, *queuemux_2_pad;
  
  GstPad *mux_3_pad, *mux_4_pad;
  GstPad *queuemux_3_pad, *queuemux_4_pad;

  /* Check input arguments */
  /* if (argc != 2) {
    g_printerr ("Usage: %s <H264 filename>\n", argv[0]);
    return -1;
  } */

  /* Standard GStreamer initialization */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  /* Create gstreamer elements */
  /* Create Pipeline element that will form a connection of other elements */
  pipeline = gst_pipeline_new ("dstest1-pipeline");

  /* Source element for reading from the file */
  source = gst_element_factory_make ("filesrc", "file-source");
  source2 = gst_element_factory_make ("filesrc", "file-source2");

  /* Since the data format in the input file is elementary h264 stream,
   * we need a h264parser */
  h264parser = gst_element_factory_make ("h264parse", "h264-parser");
  h264parser2 = gst_element_factory_make ("h264parse", "h264-parser2");

  /* Use nvdec_h264 for hardware accelerated decode on GPU */
  decoder = gst_element_factory_make ("nvdec_h264", "nvh264-decoder");
  decoder2 = gst_element_factory_make ("nvdec_h264", "nvh264-decoder2");
  
  /*create queue*/
  queue = gst_element_factory_make ("queue", "queue1");
  queue2 = gst_element_factory_make ("queue", "queue2");
  queue3 = gst_element_factory_make ("queue", "queue3");
  queue4 = gst_element_factory_make ("queue", "queue4");
  
  nvstreammux = gst_element_factory_make ("nvstreammux", "nvstreammux");
  nvstreamdemux = gst_element_factory_make ("nvstreamdemux", "nvstreamdemux");

  /* Use nvinfer to run inferencing on decoder's output,
   * behaviour of inferencing is set through config file */
  pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");


  /* Use convertor to convert from NV12 to RGBA as required by nvosd */
  nvvidconv = gst_element_factory_make ("nvvidconv", "nvvideo-converter");
  nvvidconv2 = gst_element_factory_make ("nvvidconv", "nvvideo-converter2");
  nvvidconv3 = gst_element_factory_make ("nvvidconv", "nvvideo-converter3");
  nvvidconv4 = gst_element_factory_make ("nvvidconv", "nvvideo-converter4");
  
  videoconvert = gst_element_factory_make ("videoconvert", "videoconvert");
  videoconvert2 = gst_element_factory_make ("videoconvert", "videoconvert2");
  
  qtmux = gst_element_factory_make ("qtmux", "qtmux");
  qtmux2 = gst_element_factory_make ("qtmux", "qtmux2");
  
  x264enc = gst_element_factory_make ("x264enc", "x264enc");
  x264enc2 = gst_element_factory_make ("x264enc", "x264enc2");
  
  /* Create OSD to draw on the converted RGBA buffer */
  nvosd = gst_element_factory_make ("nvosd", "nv-onscreendisplay");
  nvosd2 = gst_element_factory_make ("nvosd", "nv-onscreendisplay2");

  /* Finally render the osd output */
  sink = gst_element_factory_make ("filesink", "filesink");
  sink2 = gst_element_factory_make ("filesink", "filesink2");

  /* caps filter for nvvidconv to convert NV12 to RGBA as nvosd expects input
   * in RGBA format */
  filter1 = gst_element_factory_make ("capsfilter", "filter1");
  filter2 = gst_element_factory_make ("capsfilter", "filter2");
  filter3 = gst_element_factory_make ("capsfilter", "filter3");
  filter4 = gst_element_factory_make ("capsfilter", "filter4");
  filter5 = gst_element_factory_make ("capsfilter", "filter5");
  filter6 = gst_element_factory_make ("capsfilter", "filter6");
  filter7 = gst_element_factory_make ("capsfilter", "filter7");
  filter8 = gst_element_factory_make ("capsfilter", "filter8");
  if (!pipeline || !source || !h264parser || !decoder || !source2 || !h264parser2 || !decoder2 || !nvstreammux || !nvstreamdemux || !pgie
      || !filter1 || !nvvidconv || !filter2 || !filter3 || !filter4 || !filter5 || !filter6 || !filter7 || !filter8 || !queue || !queue2 || !queue3 || !queue4 || !nvosd|| !nvosd2 || !nvvidconv || !nvvidconv2 || !nvvidconv3 || !nvvidconv4 || !videoconvert || !videoconvert2 || !qtmux || !qtmux2 || !x264enc || !x264enc2 || !sink || !sink2) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

  /* we set the input filename to the source element */
  g_object_set (G_OBJECT (source), "location", argv[1], NULL);
  g_object_set (G_OBJECT (source2), "location", argv[2], NULL);
  g_object_set (G_OBJECT (nvstreammux), "batch-size", "2", NULL);
  g_object_set (G_OBJECT (sink), "location", "./out1.mp4", NULL);
  g_object_set (G_OBJECT (sink2), "location", "./out2.mp4", NULL);

  /* Set all the necessary properties of the nvinfer element,
   * the necessary ones are : */
  g_object_set (G_OBJECT (pgie),
      "config-file-path", "dstest1_pgie_config.txt", NULL);

  /* we set the osd properties here */
  g_object_set (G_OBJECT (nvosd), "font-size", 15, NULL);
  g_object_set (G_OBJECT (nvosd2), "font-size", 15, NULL);

  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* Set up the pipeline */
  /* we add all elements into the pipeline */
  gst_bin_add_many (GST_BIN (pipeline),
      source, h264parser, decoder, source2, h264parser2, decoder2, pgie, nvstreammux, nvstreamdemux, videoconvert, videoconvert2, qtmux, qtmux2, x264enc, x264enc2,
      filter1, nvvidconv, nvvidconv2, nvvidconv3, nvvidconv4, filter2, filter3, filter4, filter5, filter6, filter7, filter8, nvosd, nvosd2, sink, sink2, queue, queue2, queue3, queue4, NULL);
  caps1 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=NV12");
  g_object_set (G_OBJECT (filter1), "caps", caps1, NULL);
  gst_caps_unref (caps1);
  caps2 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=RGBA");
  g_object_set (G_OBJECT (filter2), "caps", caps2, NULL);
  gst_caps_unref (caps2);
  caps3 = gst_caps_from_string ("video/x-raw, format=RGBA");
  g_object_set (G_OBJECT (filter3), "caps", caps3, NULL);
  gst_caps_unref (caps3);
  caps4 = gst_caps_from_string ("video/x-raw, format=NV12");
  g_object_set (G_OBJECT (filter4), "caps", caps4, NULL);
  gst_caps_unref (caps4);
  caps5 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=NV12");
  g_object_set (G_OBJECT (filter5), "caps", caps5, NULL);
  gst_caps_unref (caps5);
  caps6 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=RGBA");
  g_object_set (G_OBJECT (filter6), "caps", caps6, NULL);
  gst_caps_unref (caps6);
  caps7 = gst_caps_from_string ("video/x-raw, format=RGBA");
  g_object_set (G_OBJECT (filter7), "caps", caps7, NULL);
  gst_caps_unref (caps7);
  caps8 = gst_caps_from_string ("video/x-raw, format=NV12");
  g_object_set (G_OBJECT (filter8), "caps", caps8, NULL);
  gst_caps_unref (caps8);

  /* we link the elements together */
  /* file-source -> h264-parser -> nvh264-decoder ->
   * nvinfer -> filter1 -> nvvidconv -> filter2 -> nvosd -> video-renderer */
  //gst_element_link_many (source, h264parser, decoder, source2, h264parser2, decoder2, pgie, filter1, filter2, filter3, filter4, filter5, filter6, filter7, filter8,
      //nvstreammux, nvstreamdemux, queue, queue2, queue3, queue4, nvvidconv, nvvidconv2, nvvidconv3, nvvidconv4, nvosd, nvosd2, videoconvert, videoconvert2, sink, sink2,x264enc, x264enc2, qtmux, qtmux2, NULL);
	  
	  
  if(gst_element_link_many (source, h264parser, decoder, queue, NULL) != TRUE)
  {
	g_printerr ("1 Elements could not be linked.\n");
    gst_object_unref (pipeline);
    return -1;  
  }
  if(gst_element_link_many (source2, h264parser2, decoder2, queue2, NULL) != TRUE)
  {
	g_printerr ("2 Elements could not be linked.\n");
    gst_object_unref (pipeline);
    return -1;  
  } 
  
  if(gst_element_link_many (nvstreammux, pgie ,nvstreamdemux, NULL) != TRUE)
  {
	g_printerr ("3 Elements could not be linked.\n");
    gst_object_unref (pipeline);
    return -1;  
  }

  if(gst_element_link_many (filter1, queue3, nvvidconv, filter2, nvosd, nvvidconv2, filter3, videoconvert, filter4, x264enc, qtmux, sink, NULL) != TRUE)
  {
	g_printerr ("4 Elements could not be linked.\n");
    gst_object_unref (pipeline);
    return -1;  
  }
  
  if(gst_element_link_many (filter5, queue4, nvvidconv3, filter6, nvosd2, nvvidconv4, filter7, videoconvert2, filter8, x264enc2, qtmux2, sink2, NULL) != TRUE)
  {
	g_printerr ("5 Elements could not be linked.\n");
    gst_object_unref (pipeline);
    return -1;  
  } 
  
  mux_1_pad = gst_element_get_static_pad (queue, "src");
  mux_2_pad = gst_element_get_static_pad (queue2, "src");
  queuemux_1_pad = gst_element_get_request_pad (nvstreammux, "sink_0");
  queuemux_2_pad = gst_element_get_request_pad (nvstreammux, "sink_1");
  if (gst_pad_link (mux_1_pad, queuemux_1_pad) != GST_PAD_LINK_OK )
  {
  	g_printerr ("mux_1_pad, queuemux_1_pad could not be linked 1 .\n");
  	gst_object_unref (pipeline);
  	return -1;
  }
  if (gst_pad_link (mux_2_pad, queuemux_2_pad) != GST_PAD_LINK_OK )
  {
  	g_printerr ("mux_2_pad, queuemux_2_pad could not be linked 1 .\n");
  	gst_object_unref (pipeline);
  	return -1;
  }
  gst_object_unref (mux_1_pad);
  gst_object_unref (mux_2_pad);
  
  mux_3_pad = gst_element_get_static_pad (filter1, "sink");
  mux_4_pad = gst_element_get_static_pad (filter5, "sink");
  queuemux_3_pad = gst_element_get_request_pad (nvstreamdemux, "src_0");
  queuemux_4_pad = gst_element_get_request_pad (nvstreamdemux, "src_1");

  if (gst_pad_link (queuemux_3_pad , mux_3_pad ) != GST_PAD_LINK_OK )
  {
  	g_printerr ("queuemux_3_pad , mux_3_pad could not be linked 1 .\n");
  	gst_object_unref (pipeline);
  	return -1;
  }
  if (gst_pad_link (queuemux_4_pad , mux_4_pad) != GST_PAD_LINK_OK )
  {
  	g_printerr ("queuemux_4_pad , mux_4_pad could not be linked 1 .\n");
  	gst_object_unref (pipeline);
  	return -1;
  } 
  gst_object_unref (mux_3_pad);
  gst_object_unref (mux_4_pad);
  
  /* Lets add probe to get informed of the meta data generated, we add probe to
   * the sink pad of the osd element, since by that time, the buffer would have
   * had got all the metadata. */
  osd_sink_pad = gst_element_get_static_pad (nvosd, "sink");
  if (!osd_sink_pad)
    g_print ("Unable to get sink pad\n");
  else
    osd_probe_id = gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
        osd_sink_pad_buffer_probe, NULL, NULL);

  /* Set the pipeline to "playing" state */
  g_print ("Now playing: %s %s\n", argv[1], argv[2]);
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* Wait till pipeline encounters an error or EOS */
  g_print ("Running...\n");
  g_main_loop_run (loop);

  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  return 0;


}
1 Like

We will give the multiple stream sample in Deepstream 3.0.