How to run deepstream-test3 with usb camera

Hi,
You should modify

deepstream_sdk_v4.0_jetson\sources\apps\sample_apps\deepstream-test3\deepstream_test3_app.c

deepstream_source_bin.c is for your reference.

Hi DaneLLL,

Thanks for your help, I create a function named “create_camera_source_bin” to replace “uridecodebin” as deepstream_source_bin.c does. But when I run it with command “./deepstream-test3-app v4l2src”(here the second parameter can be ignored), I encounter the following errors.

(deepstream-test3-app:15312): GStreamer-WARNING **: 20:06:48.863: Name 'src_cap_filter' is not unique in bin 'source-bin-00', not adding
Failed to link 'nvvidconv2, cap_filter'
Failed to create source bin. Exiting.

The error information shows link nvvidconv2 with cap_filter failed, but I have no idea about how to fix this problem, can you give me some advise?

Here is the code of “create_camera_source_bin” function I add to deepstream_test3_app.c.

static GstElement *
create_camera_source_bin (guint index, gchar * uri)
{
  GstElement *bin = NULL;
  GstCaps *caps = NULL;
  gboolean ret = FALSE;

  gchar bin_name[16] = { };
  g_snprintf (bin_name, 15, "source-bin-%02d", index);
  bin = gst_bin_new (bin_name);

  GstElement *src_elem = gst_element_factory_make ("v4l2src", "src_elem");
  if (!src_elem)
  {
    g_printerr ("Could not create 'src_elem'\n");
    return NULL;
  }

  GstElement *cap_filter = gst_element_factory_make ("capsfilter", "src_cap_filter");
  if (!cap_filter){
    g_printerr ("Could not create 'src_cap_filter'\n");
    return NULL;
  }

  caps = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING, "YUY2", 
		  "width", G_TYPE_INT, 848, "height", G_TYPE_INT, 480, 
		  "framerate", GST_TYPE_FRACTION, 30, 1, NULL);

  GstElement *nvvidconv1, *nvvidconv2;
  GstCapsFeatures *feature = NULL;

  nvvidconv1 = gst_element_factory_make ("videoconvert", "nvvidconv1");
  if (!nvvidconv1){
    g_printerr ("Failed to create 'nvvidcovn1'\n");
    return NULL;
  }

  feature = gst_caps_features_new ("memory:NVMM", NULL);
  gst_caps_set_features (caps, 0, feature);
  g_object_set (G_OBJECT (cap_filter), "caps", caps, NULL);

  nvvidconv2 = gst_element_factory_make ("nvvideoconvert", "nvvidconv2");
  if (!nvvidconv2){
    g_printerr ("Failed to create 'nvvidcovn2'\n");
    return NULL;
  }

  g_object_set (G_OBJECT (nvvidconv2), "gpu-id", 0, NULL);

  gst_bin_add_many (GST_BIN (bin), src_elem, cap_filter, nvvidconv1, nvvidconv2, cap_filter, NULL);

  //NVGSTDS_LINK_ELEMENT (src_elem, nvvidconv1);
  //NVGSTDS_LINK_ELEMENT (nvvidconv1, nvvidconv2);
  //NVGSTDS_LINK_ELEMENT (nvvidconv2, cap_filter);
  if (!gst_element_link (src_elem,nvvidconv1)){
    g_printerr ("Failed to link 'src_elem, nvvidcovn1'\n");
    return NULL;
  }
  if (!gst_element_link (nvvidconv1,nvvidconv2)){
    g_printerr ("Failed to link 'nvvidcovn1, nvvidcovn2'\n");
    return NULL;
  }
  if (!gst_element_link (nvvidconv2, cap_filter)){
    g_printerr ("Failed to link 'nvvidcovn2, cap_filter'\n");
    return NULL;
  }

  // NVGSTDS_BIN_ADD_GHOST_PAD (bin, cap_filter, "src");
  GstPad *gstpad = gst_element_get_static_pad (cap_filter, "src");
  gst_element_add_pad (bin, gst_ghost_pad_new ("src", gstpad));
  gst_object_unref(gstpad);

  gchar device[64];
  g_snprintf (device, sizeof (device), "/dev/video%d", 0);
  g_object_set (G_OBJECT (src_elem), "device", device, NULL);

  return bin;
}

And I replace line 325 in deepstream_test3_app.c

GstElement *source_bin = create_source_bin (i, argv[i + 1]);

with

GstElement *source_bin = create_camera_source_bin (i, argv[i + 1]);

Thanks,
Newman97

Hi,
Do you run ‘v4l2-ctl’ command to get all modes? ‘848x480 YUYV 30fps’ is an example and you have to adapt it to your camera output.

1 Like

Hi DaneLLL,

Yes I have run ‘v4l2-ctl’ command and hav tried different resolution and format config (my usb camera have ‘MJPG’ and ‘YUYV’ pixel format). Finally I solved the problem by replacing ‘YUY2’ format with ‘NV12’ format in line 25~27.

caps = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING, "YUY2", 
		  "width", G_TYPE_INT, 848, "height", G_TYPE_INT, 480, 
		  "framerate", GST_TYPE_FRACTION, 30, 1, NULL);

with

caps = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING, "NV12", 
		  "width", G_TYPE_INT, 848, "height", G_TYPE_INT, 480, 
		  "framerate", GST_TYPE_FRACTION, 30, 1, NULL);

It works now, but the video is slow and I am trying to solve it with the two reference post.

Thanks for your help.

Best regards,
Newman97

Hi,
You should create two cap_filter in the implementation. One is for v4l2src and the other is for nvvideoconvert.

For v4l2src, pick one supported mode:

video/x-raw,format=YUY2,width=848,height=480,framerate=30/1

For nvvideoconvert, it is

video/x-raw(memory:NVMM),format=NV12

It looks like you only create one ‘video/x-raw(memory:NVMM),format=YUY2,width=848,height=480,framerate=30/1’. It should not work.

Hi,
newman97
Could I get your code?

Thanks.

Hi DaneLLL,
I am trying to use USB camera on Jetson Nano. It runs smoothly with deepstream-app. I also tried using test-1 sample app with the code below but output is choppy. Could you please indicate what I am doing wrong?
Best regards.

static GstElement *
add_source_bin (void)
{
  GstElement *src_bin = NULL;
  gchar srcbin_name[16] = { };
  GstCaps *caps = NULL;
  GstCaps *lnvcaps = NULL;  
  src_bin = gst_bin_new (srcbin_name);
  GstElement *src_elem = gst_element_factory_make (NVDS_ELEM_SRC_CAMERA_V4L2, "src_elem");
  gchar device[64];
  g_snprintf (device, sizeof (device), "/dev/video%d", 0);
  g_object_set (G_OBJECT (src_elem), "device", device, NULL);
  GstElement *cap_filter = gst_element_factory_make (NVDS_ELEM_CAPS_FILTER, "src_cap_filter");  
  GstElement *nvvidconv1, *nvvidconv2;
  GstCapsFeatures *feature = NULL;
  nvvidconv1 = gst_element_factory_make ("videoconvert", "nvvidconv1");
  nvvidconv2 = gst_element_factory_make (NVDS_ELEM_VIDEO_CONV, "nvvidconv2");
  g_object_set (G_OBJECT (nvvidconv2), "gpu-id", 0, "nvbuf-memory-type", 0, NULL);
  caps = gst_caps_new_simple("video/x-raw","format", G_TYPE_STRING, "YUY2",  
		  "width", G_TYPE_INT, 800, "height", G_TYPE_INT, 600, 
		  "framerate", GST_TYPE_FRACTION, 30,1, NULL);  
  lnvcaps = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING,"NV12", 
		  "width", G_TYPE_INT, 480, "height", G_TYPE_INT, 272, 
		  "framerate", GST_TYPE_FRACTION, 30,1, NULL);
  feature = gst_caps_features_new ("memory:NVMM", NULL);
  gst_caps_set_features (lnvcaps, 0, feature);
  g_object_set (G_OBJECT (cap_filter), "caps", lnvcaps, NULL);
  gst_bin_add_many (GST_BIN (src_bin), src_elem, nvvidconv1, nvvidconv2, cap_filter, NULL);
  if (!gst_element_link_filtered (src_elem, nvvidconv1, caps)){
    g_printerr ("Failed to link 'src_elem, nvvidconv1'\n");
    return NULL;
  }
  if (!gst_element_link (nvvidconv1,nvvidconv2)){
    g_printerr ("Failed to link 'nvvidconv1, nvvidconv2'\n");
    return NULL;
  }
  if (!gst_element_link (nvvidconv2, cap_filter)){
    g_printerr ("Failed to link 'nvvidconv2, cap_filter'\n");
    return NULL;
  }
  GstPad *gstpad = gst_element_get_static_pad (cap_filter, "src");
  gst_element_add_pad (src_bin, gst_ghost_pad_new ("src", gstpad));
  gst_object_unref(gstpad);
  gst_caps_unref (caps);
  gst_caps_unref (lnvcaps);
  return src_bin;
}
int
main (int argc, char *argv[])
{
  GMainLoop *loop = NULL;
  GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *pgie = NULL, *nvvidconv = NULL,
      *nvosd = NULL;
  GstElement *transform = NULL;
  GstBus *bus = NULL;
  guint bus_watch_id;
  GstPad *osd_sink_pad = NULL;
  /* Standard GStreamer initialization */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);
  /* Create gstreamer elements */
  pipeline = gst_pipeline_new ("dstest1-pipeline");
  GstElement *source = add_source_bin();
  streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");
  pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");
  nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");
  g_object_set (G_OBJECT (nvvidconv), "gpu-id", 0, NULL);
  nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");
  transform = gst_element_factory_make ("nvegltransform", "nvegl-transform");
  sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
  g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
      MUXER_OUTPUT_HEIGHT, "batch-size", 1,
      "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);
  g_object_set (G_OBJECT (pgie),
      "config-file-path", "dstest1_pgie_config.txt", NULL);
  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);
  /* Set up the pipeline */
  gst_bin_add_many (GST_BIN (pipeline),
      source, streammux, pgie, nvvidconv, nvosd, transform, sink, NULL);
  GstPad *sinkpad, *srcpad;
  gchar pad_name_sink[16] = "sink_0";
  gchar pad_name_src[16] = "src";
  sinkpad = gst_element_get_request_pad (streammux, pad_name_sink);
  if (!sinkpad) {
    g_printerr ("Streammux request sink pad failed. Exiting.\n");
    return -1;
  }
  srcpad = gst_element_get_static_pad (source, pad_name_src);
  if (!srcpad) {
    g_printerr ("Source request src pad failed. Exiting.\n");
    return -1;
  }
  if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
      g_printerr ("Failed to link decoder to stream muxer. Exiting.\n");
      return -1;
  }
  gst_object_unref (sinkpad);
  gst_object_unref (srcpad);
  /* we link the elements together */
  if (!gst_element_link_many (streammux, pgie,
      nvvidconv, nvosd, transform, sink, NULL)) {
    g_printerr ("Elements could not be linked: 2. Exiting.\n");
    return -1;
  }
  /* Set the pipeline to "playing" state */
  gst_element_set_state (pipeline, GST_STATE_PLAYING);
  /* Wait till pipeline encounters an error or EOS */
  g_main_loop_run (loop);
  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  return 0;
}

Hi,
Please check this patch

Now it works smoothly.
Thanks a lot

Hi @DaneLLL,

I am having trouble applying that patch to this code. Things are different enough that I am lost.

Here is the function that works for me for two cameras, but it is slow as expected.

static GstElement *
create_camera_source_bin (guint index, gchar * uri)
{
  GstElement *bin = NULL;
  GstCaps *caps = NULL;
  gboolean ret = FALSE;

  gchar bin_name[16] = { };
  g_snprintf (bin_name, 15, "source-bin-%02d", index);
  bin = gst_bin_new (bin_name);

  GstElement *src_elem = gst_element_factory_make ("v4l2src", "src_elem");
  if (!src_elem)
  {
    g_printerr ("Could not create 'src_elem'\n");
    return NULL;
  }

  gchar cap_filt_char[64];
  g_snprintf (cap_filt_char, sizeof (cap_filt_char), "src_cap_filter_%d", index);

  GstElement *cap_filter = gst_element_factory_make ("capsfilter", cap_filt_char);
  if (!cap_filter){
    g_printerr ("Could not create 'src_cap_filter'\n");
    return NULL;
  }

  caps = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING, "NV12",
		  "width", G_TYPE_INT, 1920, "height", G_TYPE_INT, 1080,
		  "framerate", GST_TYPE_FRACTION, 60, 1, NULL);

  GstElement *nvvidconv1, *nvvidconv2;
  GstCapsFeatures *feature = NULL;

  nvvidconv1 = gst_element_factory_make ("videoconvert", "nvvidconv1");
  if (!nvvidconv1){
    g_printerr ("Failed to create 'nvvidcovn1'\n");
    return NULL;
  }

  feature = gst_caps_features_new ("memory:NVMM", NULL);
  gst_caps_set_features (caps, 0, feature);
  g_object_set (G_OBJECT (cap_filter), "caps", caps, NULL);

  nvvidconv2 = gst_element_factory_make ("nvvideoconvert", "nvvidconv2");
  if (!nvvidconv2){
    g_printerr ("Failed to create 'nvvidcovn2'\n");
    return NULL;
  }

  g_object_set (G_OBJECT (nvvidconv2), "gpu-id", 0, NULL);

  gst_bin_add_many (GST_BIN (bin), src_elem, cap_filter, nvvidconv1, nvvidconv2, cap_filter, NULL);

  //NVGSTDS_LINK_ELEMENT (src_elem, nvvidconv1);
  //NVGSTDS_LINK_ELEMENT (nvvidconv1, nvvidconv2);
  //NVGSTDS_LINK_ELEMENT (nvvidconv2, cap_filter);
  if (!gst_element_link (src_elem,nvvidconv1)){
    g_printerr ("Failed to link 'src_elem, nvvidcovn1'\n");
    return NULL;
  }
  if (!gst_element_link (nvvidconv1,nvvidconv2)){
    g_printerr ("Failed to link 'nvvidcovn1, nvvidcovn2'\n");
    return NULL;
  }
  if (!gst_element_link (nvvidconv2, cap_filter)){
    g_printerr ("Failed to link 'nvvidcovn2, cap_filter'\n");
    return NULL;
  }

  // NVGSTDS_BIN_ADD_GHOST_PAD (bin, cap_filter, "src");
  GstPad *gstpad = gst_element_get_static_pad (cap_filter, "src");
  gst_element_add_pad (bin, gst_ghost_pad_new ("src", gstpad));
  gst_object_unref(gstpad);

  gchar device[64];
  g_snprintf (device, sizeof (device), "/dev/video%d", index);
  g_object_set (G_OBJECT (src_elem), "device", device, NULL);

  return bin;
}

How would I apply that patch to this example? I find all this gstreamer code to be pretty non-intuitive. My cameras are Econsystems USB3 cameras with UYVY output. I am using DS5.0 and the Jetson Nano.

Thank you!

And here is my attempt to merge in the patch. It does not work, throwing the error “streaming stopped, reason not-linked”.

static GstElement *
create_camera_source_bin (guint index, gchar * uri)
{
  GstElement *bin = NULL;
  GstCaps *caps = NULL;

  GstCapsFeatures *feature = NULL;
  gboolean ret = FALSE;

//  GstElement *nvvidconv1, *nvvidconv2;
  GstElement *conv1 = NULL, *conv2 = NULL, *nvconv = NULL;
  GstCaps *caps_uyvy = NULL, *caps_nv12_nvmm = NULL, *caps_nv12 = NULL;

  gchar bin_name[16] = { };
  g_snprintf (bin_name, 15, "source-bin-%02d", index);
  bin = gst_bin_new (bin_name);

  GstElement *src_elem = gst_element_factory_make ("v4l2src", "src_elem");
  gchar cap_filt_char[64];
  g_snprintf (cap_filt_char, sizeof (cap_filt_char), "src_cap_filter_%d", index);
  GstElement *cap_filter = gst_element_factory_make ("capsfilter", cap_filt_char);

  conv1 = gst_element_factory_make ("nvvidconv", "conv1");
  conv2 = gst_element_factory_make ("nvvidconv", "conv2");

  caps_uyvy = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "UYVY",
      "width", G_TYPE_INT, 1920, "height", G_TYPE_INT,
      1080, "framerate", GST_TYPE_FRACTION,
      60, 1, NULL);
  caps_nv12_nvmm = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "NV12", NULL);
  feature = gst_caps_features_new ("memory:NVMM", NULL);
  gst_caps_set_features (caps_nv12_nvmm, 0, feature);
  g_object_set (G_OBJECT (cap_filter), "caps", caps_nv12_nvmm, NULL);
  caps_nv12 = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "NV12", NULL);

  //nvvidconv2 = gst_element_factory_make ("nvvideoconvert", "nvvidconv2");
  nvconv = gst_element_factory_make ("nvvideoconvert", "nvconv");

  gchar device[64];
  g_snprintf (device, sizeof (device), "/dev/video%d", index);
  g_object_set (G_OBJECT (src_elem), "device", device, NULL);


//  g_object_set (G_OBJECT (nvvidconv2), "gpu-id", 0, NULL);
  gst_bin_add_many (GST_BIN (bin), src_elem, cap_filter, conv1, nvconv, NULL);

  gst_element_link_filtered(src_elem, conv1, caps_uyvy);
  gst_bin_add (GST_BIN (bin), conv2);
  gst_element_link_filtered(conv1, conv2, caps_nv12_nvmm);
  gst_element_link_filtered(conv2, nvconv, caps_nv12);
  gst_element_link_filtered(nvconv, cap_filter, caps_nv12_nvmm);


  // NVGSTDS_BIN_ADD_GHOST_PAD (bin, cap_filter, "src");
  GstPad *gstpad = gst_element_get_static_pad (cap_filter, "src");
  gst_element_add_pad (bin, gst_ghost_pad_new ("src", gstpad));
  gst_object_unref(gstpad);

  return bin;
}

This code is merged in to deepstream_test3_app.c from DS5.0. As mentioned above you must also replace line 325 in deepstream_test3_app.c

GstElement *source_bin = create_source_bin (i, argv[i + 1]);

with

GstElement *source_bin = create_camera_source_bin (i, argv[i + 1]);

Any help is appreciated!

Hi,
By default it is uridecodebin in deepstream-test3. You need to replace it with

v4l2src ! video/x-raw,format=_SOURCE_FORMAT_,width=_SOURCE_WIDTH_,height=SOURCE_HEIGHT_,framerate=_SOURCE_FRAMERATR_ ! videoconvert ! nvvideoconvert ! video/x-raw(memory:NVMM),format=NV12

You can refer to this post to set precise format,width,height,framerate.

Thank you, but I don’t understand how to convert these pipeline commands in to C++ code. I wasn’t able to understand it from reading the code. I suppose I will have to find a tutorial that explains it. Do you know of such a tutorial?

I did succeed in getting fast video processing with one camera using the recommended modifications to deepstream-test-1, I just do not get how to update deepstream-test-3 to do the same with multiple cameras.

Is there anyway to skip videoconvert plugin?

Hi,

We will have enhancement in next release. Please wait for DS 5.0 GA.

1 Like

How can i do this in deepstream 5

Hi,
deepstream-test5 and deepstream-app are with common code in source group. You can configure it in config file. Please look at
DeepStream Reference Application - deepstream-app — DeepStream 6.1.1 Release documentation

Hey @DaneLLL @tlalexander

I have done some modification in the DS-test3 python script… But am not able to get good fps. I need to use this pipeline for 2 USB cameras. Please take a look in the script and give me your suggestions.

def create_source_bin(index,uri):
    print("Creating source bin")

    # Create a source GstBin to abstract this bin's content from the rest of the
    # pipeline
    bin_name="source-bin-%02d" %index
    print(bin_name)
    nbin=Gst.Bin.new(bin_name)
    if not nbin:
        sys.stderr.write(" Unable to create source bin \n")

    usb_cam_source=Gst.ElementFactory.make("v4l2src", "source")
    usb_cam_source.set_property("device",uri)



    caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
    if not caps_v4l2src:
        sys.stderr.write(" Unable to create v4l2src capsfilter \n")
    
    # videoconvert to make sure a superset of raw formats are supported
    vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
    if not vidconvsrc:
        sys.stderr.write(" Unable to create videoconvert \n")


    nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
    if not nvvidconvsrc:
        sys.stderr.write(" Unable to create Nvvideoconvert \n")
    
    caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
    if not caps_vidconvsrc:
        sys.stderr.write(" Unable to create capsfilter \n")
        
    caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1"))
    caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM), format=NV12"))

    print('adding element to source bin')
    Gst.Bin.add(nbin,usb_cam_source)
    #Gst.Bin.add(nbin,jpegDec)
    
    Gst.Bin.add(nbin,caps_v4l2src)
    Gst.Bin.add(nbin,vidconvsrc)
    
    Gst.Bin.add(nbin,nvvidconvsrc)
    Gst.Bin.add(nbin,caps_vidconvsrc)

    print('linking elemnent in source bin')
    usb_cam_source.link(caps_v4l2src)
    caps_v4l2src.link(vidconvsrc)
    vidconvsrc.link(nvvidconvsrc)
    
    nvvidconvsrc.link(caps_vidconvsrc)

    pad = caps_vidconvsrc.get_static_pad("src")
    ghostpad = Gst.GhostPad.new("src",pad)
    bin_pad=nbin.add_pad(ghostpad)
    if not bin_pad:
        sys.stderr.write(" Failed to add ghost pad in source bin \n")
        return None
    return nbin


def main(args):
 # Check input arguments
if len(args) < 2:
    sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0])
    sys.exit(1)

for i in range(0,len(args)-1):
    fps_streams["stream{0}".format(i)]=GETFPS(i)
number_sources=len(args)-1

# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)

# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
is_live = False

if not pipeline:
    sys.stderr.write(" Unable to create Pipeline \n")
print("Creating streamux \n ")

# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
    sys.stderr.write(" Unable to create NvStreamMux \n")

pipeline.add(streammux)
for i in range(number_sources):
    print("Creating source_bin ",i," \n ")
    usb_cam=args[i+1]
    # if uri_name.find("rtsp://") == 0 :
    #     is_live = True
    source_bin=create_source_bin(i, usb_cam)
    if not source_bin:
        sys.stderr.write("Unable to create source bin \n")
    pipeline.add(source_bin)
    padname="sink_%u" %i
    sinkpad= streammux.get_request_pad(padname) 
    if not sinkpad:
        sys.stderr.write("Unable to create sink pad bin \n")
    srcpad=source_bin.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to create src pad bin \n")
    srcpad.link(sinkpad)
queue1=Gst.ElementFactory.make("queue","queue1")
queue2=Gst.ElementFactory.make("queue","queue2")
queue3=Gst.ElementFactory.make("queue","queue3")
queue4=Gst.ElementFactory.make("queue","queue4")
queue5=Gst.ElementFactory.make("queue","queue5")
pipeline.add(queue1)
pipeline.add(queue2)
pipeline.add(queue3)
pipeline.add(queue4)
pipeline.add(queue5)
print("Creating Pgie \n ")
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
    sys.stderr.write(" Unable to create pgie \n")
print("Creating tiler \n ")
tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
if not tiler:
    sys.stderr.write(" Unable to create tiler \n")
print("Creating nvvidconv \n ")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
    sys.stderr.write(" Unable to create nvvidconv \n")
print("Creating nvosd \n ")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
    sys.stderr.write(" Unable to create nvosd \n")
nvosd.set_property('process-mode',OSD_PROCESS_MODE)
nvosd.set_property('display-text',OSD_DISPLAY_TEXT)
if(is_aarch64()):
    print("Creating transform \n ")
    transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
    if not transform:
        sys.stderr.write(" Unable to create transform \n")

print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
if not sink:
    sys.stderr.write(" Unable to create egl sink \n")

# if is_live:
#     print("Atleast one of the sources is live")
#     streammux.set_property('live-source', 1)

streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', number_sources)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "dstest3_pgie_config.txt")
pgie_batch_size=pgie.get_property("batch-size")
if(pgie_batch_size != number_sources):
    print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n")
    pgie.set_property("batch-size",number_sources)
tiler_rows=int(math.sqrt(number_sources))
tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows))
tiler.set_property("rows",tiler_rows)
tiler.set_property("columns",tiler_columns)
tiler.set_property("width", TILED_OUTPUT_WIDTH)
tiler.set_property("height", TILED_OUTPUT_HEIGHT)
sink.set_property("qos",0)
sink.set_property('sync', False)

print("Adding elements to Pipeline \n")
pipeline.add(pgie)
pipeline.add(tiler)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
if is_aarch64():
    pipeline.add(transform)
pipeline.add(sink)

print("Linking elements in the Pipeline \n")
streammux.link(queue1)
queue1.link(pgie)
pgie.link(queue2)
queue2.link(tiler)
tiler.link(queue3)
queue3.link(nvvidconv)
nvvidconv.link(queue4)
queue4.link(nvosd)
if is_aarch64():
    nvosd.link(queue5)
    queue5.link(transform)
    transform.link(sink)
else:
    nvosd.link(queue5)
    queue5.link(sink)
1 Like

Hi ajith_ABD,

I will suggest to open a new topic if it’s still an issue.
This is an old thread.