How to run two pipelines correctly?

• Hardware Platform (dGPU)
• DeepStream Version 5.0

How to run two pipelines correctly? one pipeline works on video, and the second pipeline turns on by event and works on pictures. At the moment I am getting an error when changing the state of the second pipeline to play.
pipeline for images:


pipeline for video:

gdb error when load primary detector engine of second pipeline:

0:00:48.991474828 15955 0x555555e72ad0 INFO                 nvinfer gstnvinfer.cpp:619:gst_nvinfer_logger:<secondary3-nvinference-engine> NvDsInferContext[UID 4]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1701> [UID = 4]: deserialized trt engine from :/media/daliel/hdd/from_main/deepstream/python/apps/deepstream-app-3/ocr_ctc_17.onnx_b4_gpu0_fp32.engine
INFO: [FullDims Engine Info]: layers num: 2
0   INPUT  kFLOAT Placeholder:0   3x133x133       min: 1x3x133x133     opt: 4x3x133x133     Max: 4x3x133x133     
1   OUTPUT kFLOAT Reshape:0       880             min: 0               opt: 0               Max: 0               

ERROR: [TRT]: INVALID_ARGUMENT: Cannot find binding of given name: Reshape
0:00:48.991540664 15955 0x555555e72ad0 WARN                 nvinfer gstnvinfer.cpp:616:gst_nvinfer_logger:<secondary3-nvinference-engine> NvDsInferContext[UID 4]: Warning from NvDsInferContextImpl::checkBackendParams() <nvdsinfer_context_impl.cpp:1669> [UID = 4]: Could not find output layer 'Reshape' in engine
0:00:48.991547118 15955 0x555555e72ad0 INFO                 nvinfer gstnvinfer.cpp:619:gst_nvinfer_logger:<secondary3-nvinference-engine> NvDsInferContext[UID 4]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:1805> [UID = 4]: Use deserialized engine model: /media/daliel/hdd/from_main/deepstream/python/apps/deepstream-app-3/ocr_ctc_17.onnx_b4_gpu0_fp32.engine
[New Thread 0x7fff27a15700 (LWP 19824)]
[New Thread 0x7fff617fe700 (LWP 19825)]
[New Thread 0x7fff60ffd700 (LWP 19826)]
0:00:48.993186706 15955 0x555555e72ad0 INFO                 nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus:<secondary3-nvinference-engine> [UID 4]: Load new model:numbers_sgie3_config.txt sucessfully

Thread 1 "deepstream-test" received signal SIGSEGV, Segmentation fault.
0x00007fffa577aac0 in ?? () from /usr/lib/x86_64-linux-gnu/libnvinfer.so.7

same code:

  gst_init (&argc, &argv);
  
  loop = g_main_loop_new (NULL, FALSE);
  
  pipeline = gst_pipeline_new ("dstest3-pipeline");
  
  streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");
 
  if (!pipeline || !streammux) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }
  gst_bin_add (GST_BIN (pipeline), streammux);
  
  for (i = 0; i < num_sources; i++) {
    GstPad *sinkpad, *srcpad;
    gchar pad_name[16] = { };
    //GstElement *source_bin = create_source_bin (i, uri.c_str());
    GstElement *source_bin = create_source_bin (i, uri);

    if (!source_bin) {
      g_printerr ("Failed to create source bin. Exiting.\n");
      return -1;
    }

    gst_bin_add (GST_BIN (pipeline), source_bin);

    g_snprintf (pad_name, 15, "sink_%u", i);
    sinkpad = gst_element_get_request_pad (streammux, pad_name);
    if (!sinkpad) {
      g_printerr ("Streammux request sink pad failed. Exiting.\n");
      return -1;
    }

    srcpad = gst_element_get_static_pad (source_bin, "src");
    if (!srcpad) {
      g_printerr ("Failed to get src pad of source bin. Exiting.\n");
      return -1;
    }

    if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
      g_printerr ("Failed to link source bin to stream muxer. Exiting.\n");
      return -1;
    }

    gst_object_unref (srcpad);
    gst_object_unref (sinkpad);
  }
  
  pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");
  if (!pgie) {
    g_printerr ("Failed to create primary inference pipeline 1 element. Exiting.\n");
    return -1;
  }
  // Use tracker
  nvtracker = gst_element_factory_make ("nvtracker", "tracker");

  //use secondary infer for classification
  sgie1 = gst_element_factory_make("nvinfer", "secondary1-nvinference-engine");
  if (!sgie1) {
    g_printerr ("Failed to create secondary inference pipeline 1 element. Exiting.\n");
    return -1;
  }
  
  tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");

  /* Use convertor to convert from NV12 to RGBA as required by nvosd */
  nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");
  if (save_result){
    nvvidconv2 = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter1");
    if (!nvvidconv2) {
      g_printerr ("Failed to create nvvideo-converter2 element. Exiting.\n");
      return -1;
    }
  }
  caps_filter = gst_element_factory_make ("capsfilter", NULL);
  if (!caps_filter) {
    g_printerr ("Failed to create capsfilter element. Exiting.\n");
    return -1;
  }

  GstCaps *caps = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "RGBA", NULL);
  GstCapsFeatures *feature = gst_caps_features_new (GST_CAPS_FEATURES_NVMM, NULL);
  gst_caps_set_features (caps, 0, feature);
  g_object_set (G_OBJECT (caps_filter), "caps", caps, NULL);
  
  /* Create OSD to draw on the converted RGBA buffer */
  nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");

  /* Finally render the osd output */
#ifdef PLATFORM_TEGRA
  transform = gst_element_factory_make ("nvegltransform", "nvegl-transform");
#endif
  sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
 

  if (!pgie || !nvtracker || !tiler || !nvvidconv || !nvosd || !sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }
  g_object_set(G_OBJECT(nvosd), "nvbuf-memory-type", NVBUF_MEM_CUDA_UNIFIED, NULL);
  

#ifdef PLATFORM_TEGRA
  if(!transform) {
    g_printerr ("One tegra element could not be created. Exiting.\n");
    return -1;
  }
#endif
  g_printerr("setup pipeline\n");
  g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
      MUXER_OUTPUT_HEIGHT, "batch-size", num_sources,
      "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);

  /* Configure the nvinfer element using the nvinfer config file. */
  g_object_set (G_OBJECT (pgie),
      "config-file-path", "DeepStream/config_infer_primary_yoloV4.txt", NULL);
  g_object_set (G_OBJECT (pgie), "interval", 9, NULL);
  g_object_set (G_OBJECT (sgie1),"config-file-path", "sgie_car_models.txt", NULL);

  /* Override the batch-size set in the config file with the number of sources. */
  g_object_get (G_OBJECT (pgie), "batch-size", &pgie_batch_size, NULL);
  if (pgie_batch_size != num_sources) {
    g_printerr
        ("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
        pgie_batch_size, num_sources);
    g_object_set (G_OBJECT (pgie), "batch-size", num_sources, NULL);
  }

  if (!set_tracker_properties(nvtracker, GPUID)) {
    g_printerr ("Failed to set tracker properties. Exiting.\n");
    return -1;
  }
  g_object_set (G_OBJECT (nvosd),
      "nvbuf-memory-type", 3, NULL);
  tiler_rows = (guint) sqrt (num_sources);
  tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);
  /* we set the tiler properties here */
  g_object_set (G_OBJECT (tiler), "rows", tiler_rows, "columns", tiler_columns,
      "width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);

  //set GPU id
  g_object_set (G_OBJECT (pgie), "gpu-id", GPUID, NULL);
  g_object_set (G_OBJECT (streammux), "gpu-id", GPUID, NULL);
  g_object_set (G_OBJECT (nvtracker), "gpu-id", GPUID, NULL);
  g_object_get (G_OBJECT (sgie1), "gpu-id", GPUID, NULL);
  
  g_object_set (G_OBJECT (tiler), "gpu-id", GPUID, NULL);
  g_object_set (G_OBJECT (nvvidconv), "gpu-id", GPUID, NULL);
  
  g_object_set (G_OBJECT (nvosd), "gpu-id", GPUID, NULL);

  g_printerr ("Create pipeline.\n");
  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* Set up the pipeline */
  /* we add all elements into the pipeline */
  
    gst_bin_add_many (GST_BIN (pipeline), pgie, nvtracker, sgie1, tiler, nvvidconv, caps_filter, nvosd, sink,
       NULL);
    //link queue process
    if (!gst_element_link_many (streammux, pgie, nvtracker, sgie1, tiler, nvvidconv, caps_filter, nvosd, sink, NULL)) {
      g_printerr ("Elements process could not be linked. Exiting.\n");
      return -1;
    }
  
  tiler_src_pad = gst_element_get_static_pad (tiler, "sink");
  if (!tiler_src_pad)
    g_print ("Unable to get src pad\n");
  else
    gst_pad_add_probe (tiler_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
        tiler_src_pad_buffer_probe, NULL, NULL);

  /* Set the pipeline to "playing" state */
  GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "video-pipeline");
  gst_element_set_state (pipeline, GST_STATE_PLAYING);
  
    g_print ("Running...\n");
  
  g_main_loop_run (loop);
  
    g_print ("Returned, stopping playback\n");
  
  gst_element_set_state (pipeline, GST_STATE_NULL);
  
    g_print ("Deleting pipeline\n");
  
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  //gst_deinit();
  
  //second pipe
  //gst_init (&argc, &argv);
  
  loop = g_main_loop_new (NULL, FALSE);
  pipeline = gst_pipeline_new ("dstest3-pipeline");
  if (!pipeline){
    g_printerr("Failed to create pipeline2. Exiting.\n");
    return -1;
  }
  /* Create nvstreammux instance to form batches from one or more sources. */
  
  streammux2 = gst_element_factory_make ("nvstreammux", "stream-muxer2");
  if (!streammux2){
    g_printerr("Failed to create streammux2 element. Exiting.\n");
    return -1;
  }
  g_object_set(G_OBJECT (streammux2), "width", 416, NULL);
  g_object_set(G_OBJECT (streammux2), "height", 416, NULL);
  g_object_set(G_OBJECT (streammux2), "batch-size", 1, NULL);
  g_object_set(G_OBJECT (streammux2), "batched-push-timeout", 4000000, NULL);
  
  if(!gst_bin_add (GST_BIN (pipeline), streammux2)){
    g_print("Failed add streammux2 to pipeline for image. Exiting.\n");
    return -1;
  }
  //source image input
  char images_uri[500];
  char test[3] = "%d";
  snprintf(images_uri, 500, "%s/images/%s.jpg", TMP_DIR, test);

  GstElement *source_bin_image = create_source_image_bin (10, images_uri);

  if (!source_bin_image) {
    g_printerr ("Failed to create source bin image. Exiting.\n");
    return -1;
  }

  if(!gst_bin_add (GST_BIN (pipeline), source_bin_image)){
    g_print("Failed add source bin to pipeline for image. Exiting.\n");
    return -1;
  }
  GstPad *sinkpad2, *srcpad2;
  
  sinkpad2 = gst_element_get_request_pad (streammux2, "sink_0");
  if (!sinkpad2) {
    g_printerr ("Streammux2 request sink pad failed. Exiting.\n");
    return -1;
  }

  srcpad2 = gst_element_get_static_pad (source_bin_image, "src");
  if (!srcpad2) {
    g_printerr ("Failed to get src pad of source bin image. Exiting.\n");
    return -1;
  }

  if (gst_pad_link (srcpad2, sinkpad2) != GST_PAD_LINK_OK) {
    g_printerr ("Failed to link source bin to stream muxer. Exiting.\n");
    return -1;
  }

  gst_object_unref (srcpad2);
  gst_object_unref (sinkpad2);
  
  fakesink = gst_element_factory_make("fakesink", "fakesink_image");
  sgie2 = gst_element_factory_make("nvinfer", "primary2-nvinference-engine");
  if (!sgie2) {
    g_printerr ("Failed to create primary inference pipeline 2 element. Exiting.\n");
    return -1;
  }
  sgie3 = gst_element_factory_make("nvinfer", "secondary3-nvinference-engine");
  if (!sgie3) {
    g_printerr ("Failed to create secondary inference pipeline 2 element. Exiting.\n");
    return -1;
  }
  g_object_set (G_OBJECT (sgie2),"config-file-path", "DeepStream/config_infer_primary_yoloV4-tiny.txt", NULL);
  g_object_get (G_OBJECT (sgie2), "batch-size", &spgie_batch_size, NULL);
  if (spgie_batch_size != 1) {
    g_printerr("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n", pgie_batch_size, num_sources);
    g_object_set (G_OBJECT (sgie2), "batch-size", 1, NULL);
  }
  g_object_set (G_OBJECT (sgie3),"config-file-path", "numbers_sgie3_config.txt", NULL);
  g_object_set (G_OBJECT (sgie2), "gpu-id", GPUID, NULL);
  g_object_set (G_OBJECT (sgie3), "gpu-id", GPUID, NULL);
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);
  //pipeline2 for image decoding
  gst_bin_add_many (GST_BIN (pipeline), sgie2, sgie3, fakesink, NULL);
  if (!gst_element_link_many(streammux2, sgie2, sgie3, fakesink, NULL)){
    g_printerr ("Elements pipeline2 could not be linked. Exiting.\n");
    return -1;
  }
  sgie_sink_pad = gst_element_get_static_pad (sgie3, "sink");
  if (!sgie_sink_pad)
    g_print ("Unable to get sink pad\n");
  else
    gst_pad_add_probe (sgie_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
        sgie_sink_pad_buffer_probe, NULL, NULL);

  GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "image-pipeline");
  
      g_print("Starting pipeline image\n");
      g_print("reading files %s\n", images_uri);
    
    gst_element_set_state (pipeline, GST_STATE_PLAYING);
    
      g_print ("Running...\n");
    g_main_loop_run (loop);
    
      g_print ("Returned, stopping playback\n");
    
  gst_element_set_state (pipeline, GST_STATE_NULL);
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);

The model can not be handled by TensorRT.

when I run only the pipeline for processing images, I also see this error, but the networks are loaded and the pipeline works out and it was intended. in my case the primary detector for pipeline of images is not loaded. even in the code, I exclude the secondary classifier from the pipeline. if I run only one of the pipelines - it works.

0:01:08.706019628 26183 0x555555f40c00 INFO                 nvinfer gstnvinfer.cpp:619:gst_nvinfer_logger:<secondary3-nvinference-engine> NvDsInferContext[UID 4]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1701> [UID = 4]: deserialized trt engine from :/media/daliel/hdd/from_main/deepstream/python/apps/deepstream-app-3/ocr_ctc_17.onnx_b4_gpu0_fp32.engine
INFO: [FullDims Engine Info]: layers num: 2
0   INPUT  kFLOAT Placeholder:0   3x133x133       min: 1x3x133x133     opt: 4x3x133x133     Max: 4x3x133x133     
1   OUTPUT kFLOAT Reshape:0       880             min: 0               opt: 0               Max: 0               

0:01:08.706136068 26183 0x555555f40c00 INFO                 nvinfer gstnvinfer.cpp:619:gst_nvinfer_logger:<secondary3-nvinference-engine> NvDsInferContext[UID 4]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:1805> [UID = 4]: Use deserialized engine model: /media/daliel/hdd/from_main/deepstream/python/apps/deepstream-app-3/ocr_ctc_17.onnx_b4_gpu0_fp32.engine
[New Thread 0x7fff545e4700 (LWP 622)]
[New Thread 0x7fff54de5700 (LWP 623)]
[New Thread 0x7fff557fe700 (LWP 624)]
0:01:08.706981735 26183 0x555555f40c00 INFO                 nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus:<secondary3-nvinference-engine> [UID 4]: Load new model:numbers_sgie3_config.txt sucessfully

Thread 1 "deepstream-test" received signal SIGSEGV, Segmentation fault.
0x00007fffa577aac0 in ?? () from /usr/lib/x86_64-linux-gnu/libnvinfer.so.7

this message tells me that the network was loaded sucessfully:

0:01:08.706981735 26183 0x555555f40c00 INFO                 nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus:<secondary3-nvinference-engine> [UID 4]: Load new model:numbers_sgie3_config.txt sucessfully

Can you share your model and the nvinfer config file for us to reproduce the issue?

i created the engine using GitHub - marcoslucianops/DeepStream-Yolo: NVIDIA DeepStream SDK 6.1 / 6.0.1 / 6.0 configuration for YOLO models
the second pipeline started. but now I get a Segmentation fault at a random time during the first pipeline. Sometimes everything works fine.

This topic was automatically closed 60 days after the last reply. New replies are no longer allowed.