Deepstream_parallel_inference_app add rtsp reconnects

What method do you use to achieve RTSP reconnection functionality?
Because I noticed that your configuration file doesn’t include the rtsp-reconnect-attempts setting.

Just modify the source code in the deepstream_source_bin.c like below.

static gboolean
create_rtsp_src_bin (NvDsSourceConfig * config, NvDsSrcBin * bin)
{
...
  //bin->rtsp_reconnect_interval_sec = config->rtsp_reconnect_interval_sec;
  bin->rtsp_reconnect_interval_sec = 10;
  //bin->rtsp_reconnect_attempts = config->rtsp_reconnect_attempts;
  bin->rtsp_reconnect_attempts = -1;
...

I would like to ask why you are using the following configuration:

sink0:
  enable: 0
  #Type - 1=FakeSink 2=EglSink 3=File 7=nv3dsink (Jetson only)
  type: 2
  sync: 1
  source-id: 0
  gpu-id: 0
  nvbuf-memory-type: 0

sink2:
  enable: 1
  #Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming
  type: 4
  #1=h264 2=h265
  codec: 1
  #encoder type 0=Hardware 1=Software
  enc-type: 0
  sync: 0
  bitrate: 2000000
  #H264 Profile - 0=Baseline 2=Main 4=High
  #H265 Profile - 0=Main 1=Main10
  profile: 0
  # set below properties in case of RTSPStreaming
  rtsp-port: 8554

Would there be any impact on the results if I used this configuration instead?

sink0:
  enable: 1
  #Type - 1=FakeSink 2=EglSink 3=File 7=nv3dsink (Jetson only)
  type: 2
  sync: 1
  source-id: 0
  gpu-id: 0
  nvbuf-memory-type: 0

I noticed that this is the only difference between our settings.

This is not directly related with the reconnection issue. Since I use a remote server, I use the RTSPStreaming to output the result through rtsp source. So I can check the result with a media player on my side.

So when your RTSP stream disconnects and reconnects, you haven’t encountered the issue where the detection stops working, correct?

Yes. The detection works properly after the reconnection.

No matter how I test it, I still encounter the issue where the model’s detection stops working after RTSP reconnection. Are there any differences between the RTSP stream from a real surveillance camera and the RTSP stream set up using the instructions from the Build rtsp server? If this issue persists, does it mean that the problem cannot be resolved?

After continuous testing, I found that if I use the following function, the model detection remains functional after reconnection. What could be the reason for this?

#if 0
static gboolean
create_parallel_infer_bin (guint num_sub_bins, NvDsConfig *config,
    NvDsParallelGieBin *bin, AppCtx *appCtx)
{
  gboolean ret = FALSE;
  GstElement *sink_elem = NULL;
  GstElement *src_elem = NULL;
  GstElement *nvvidconv = NULL, *caps_filter = NULL;
  GstCaps *caps = NULL;
  GstCapsFeatures *feature = NULL;
  gchar name[50];
  guint i = 0;

  bin->bin = gst_bin_new ("parallel_infer_bin");
  if (!bin->bin) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'parallel_infer_bin'");
    goto done;
  }

  bin->tee = gst_element_factory_make (NVDS_ELEM_TEE, "infer_bin_tee");
  if (!bin->tee) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'infer_bin_tee'");
    goto done;
  }
  gst_bin_add (GST_BIN (bin->bin), bin->tee);

  bin->muxer = gst_element_factory_make ("nvdsmetamux", "infer_bin_muxer");
  if (!bin->muxer) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'infer_bin_muxer'");
    goto done;
  }
  g_object_set (G_OBJECT (bin->muxer), "config-file",
		 GET_FILE_PATH (config->meta_mux_config.config_file_path), NULL);

  NVGSTDS_ELEM_ADD_PROBE (bin->muxer_buffer_probe_id, bin->muxer, "src",
      body_pose_gie_src_pad_buffer_probe, GST_PAD_PROBE_TYPE_BUFFER,
      appCtx);

  gst_bin_add (GST_BIN (bin->bin), bin->muxer);

  for (i = 0; i < num_sub_bins; i++) {
    if (config->primary_gie_sub_bin_config[i].enable
		    || config->video_template_sub_bin_config[i].enable) {
      if (config->video_template_sub_bin_config[i].enable) {
        if (!create_primary_gie_videotemplate_bin (&config->video_template_sub_bin_config[i],
              &bin->primary_gie_bin[i])) {
          goto done;
        }
      } else {
        if (!create_primary_gie_bin (&config->primary_gie_sub_bin_config[i],
              &bin->primary_gie_bin[i])) {
          goto done;
        }
      }
      g_snprintf (name, sizeof (name), "primary_gie_%d_bin", i);
      gst_element_set_name (bin->primary_gie_bin[i].bin, name);
      gst_bin_add (GST_BIN (bin->bin), bin->primary_gie_bin[i].bin);

      sink_elem = bin->primary_gie_bin[i].bin;
      src_elem = bin->primary_gie_bin[i].bin;
    }

    if (config->pre_process_sub_bin_config[i].enable) {
      if (!create_preprocess_bin (&config->pre_process_sub_bin_config[i],
            &bin->preprocess_bin[i])) {
        g_print ("creating preprocess bin failed\n");
        goto done;
      }
      g_snprintf (name, sizeof (name), "preprocess_%d_bin", i);
      gst_element_set_name (bin->preprocess_bin[i].bin, name);
      gst_bin_add (GST_BIN (bin->bin), bin->preprocess_bin[i].bin);

      if (sink_elem) {
        NVGSTDS_LINK_ELEMENT (bin->preprocess_bin[i].bin, sink_elem);
      }

      sink_elem = bin->preprocess_bin[i].bin;
    }

    /* Add video convert to avoid parallel infer operate on the same batch meta */
    nvvidconv = gst_element_factory_make ("nvvideoconvert", NULL);
    caps_filter = gst_element_factory_make ("capsfilter", NULL);
    caps =
        gst_caps_new_simple ("video/x-raw",
        "width", G_TYPE_INT, 1920,
        "height", G_TYPE_INT, 1082,
        NULL);
    feature = gst_caps_features_new ("memory:NVMM", NULL);
    gst_caps_set_features (caps, 0, feature);
    g_object_set (G_OBJECT (caps_filter), "caps", caps, NULL);
    gst_bin_add (GST_BIN (bin->bin), nvvidconv);
    gst_bin_add (GST_BIN (bin->bin), caps_filter);
    NVGSTDS_LINK_ELEMENT (nvvidconv, caps_filter);
    NVGSTDS_LINK_ELEMENT (caps_filter, sink_elem);
    sink_elem = nvvidconv;

    link_element_to_tee_src_pad (bin->tee, sink_elem);
    link_element_to_metamux_sink_pad (bin->muxer, src_elem, i);
  }

  NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->tee, "sink");

  NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->muxer, "src");

  ret = TRUE;
done:
  if (!ret) {
    NVGSTDS_ERR_MSG_V ("%s failed", __func__);
  }
  return ret;
}
#endif

Your config file may not be the same as mine because the image you attached only shows one video. So you can refer to our FAQ to get the GStreamer Pipeline Graph to compare the differences.
Also could you use our latest version 7.0 to try your scenario?

Alright, I will try testing with version 7.0. Thanks for your suggestions and help.
Are there any differences in RTSP stream setup and reconnection in version 7.0?

Yes. This is more of an issue with Gstreamer rtspsrc plugin. DS6.3 uses Gstreamer 1.16 and DS7.0 uses Gstreamer 1.20.

I encountered the same issue when testing with version 7.0. When I used the following code:

static gboolean
create_parallel_infer_bin (guint num_sub_bins, NvDsConfig *config,
    NvDsParallelGieBin *bin, AppCtx *appCtx)
{
  gboolean ret = FALSE;
  GstElement *sink_elem = NULL;
  GstElement *src_elem = NULL;
  GstElement *queue = NULL;
  GstElement *nvvidconv = NULL, *caps_filter = NULL;
  GstCaps *caps = NULL;
  GstCapsFeatures *feature = NULL;
  gchar name[50];
  guint i, j;
  std::string str;
  std::vector<std::string> vec;
  guint src_id_num;

  bin->bin = gst_bin_new ("parallel_infer_bin");
  if (!bin->bin) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'parallel_infer_bin'");
    goto done;
  }

  bin->tee = gst_element_factory_make (NVDS_ELEM_TEE, "infer_bin_tee");
  if (!bin->tee) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'infer_bin_tee'");
    goto done;
  }
  gst_bin_add (GST_BIN (bin->bin), bin->tee);

  bin->muxer = gst_element_factory_make ("nvdsmetamux", "infer_bin_muxer");
  if (!bin->muxer) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'infer_bin_muxer'");
    goto done;
  }
  g_object_set (G_OBJECT (bin->muxer), "config-file",
		 GET_FILE_PATH (config->meta_mux_config.config_file_path), NULL);

  NVGSTDS_ELEM_ADD_PROBE (bin->muxer_buffer_probe_id, bin->muxer, "src",
      body_pose_gie_src_pad_buffer_probe, GST_PAD_PROBE_TYPE_BUFFER,
      appCtx);

  gst_bin_add (GST_BIN (bin->bin), bin->muxer);
  sink_elem = bin->muxer;

  queue = gst_element_factory_make (NVDS_ELEM_QUEUE, NULL);
  if (!queue) {
    NVGSTDS_ERR_MSG_V ("Could not create 'queue'");
    goto done;
  }
  gst_bin_add (GST_BIN (bin->bin), queue);
  link_element_to_metamux_sink_pad (bin->muxer, queue, 0);
  sink_elem = queue;

  link_element_to_tee_src_pad (bin->tee, sink_elem);

  bin->demuxer =
      gst_element_factory_make (NVDS_ELEM_STREAM_DEMUX, NULL);
  if (!bin->demuxer) {
    NVGSTDS_ERR_MSG_V ("Failed to create element 'demuxer'");
    goto done;
  }
  g_object_set (G_OBJECT (bin->demuxer), "per-stream-eos", TRUE, NULL);
  gst_bin_add (GST_BIN (bin->bin), bin->demuxer);
  sink_elem = bin->demuxer;

  queue = gst_element_factory_make (NVDS_ELEM_QUEUE, NULL);
  if (!queue) {
    NVGSTDS_ERR_MSG_V ("Could not create 'queue'");
    goto done;
  }
  gst_bin_add (GST_BIN (bin->bin), queue);
  NVGSTDS_LINK_ELEMENT (queue, sink_elem);
  sink_elem = queue;

  link_element_to_tee_src_pad (bin->tee, sink_elem);


  for (i = 0; i < num_sub_bins; i++) {
    sink_elem = src_elem = NULL;

    if (config->primary_gie_sub_bin_config[i].enable
                    || config->video_template_sub_bin_config[i].enable) {

      if (config->num_secondary_gie_sub_bins > 0 && config->num_secondary_gie_num[i] > 0) {
         if (!create_secondary_gie_bin (config->num_secondary_gie_num[i],
              config->primary_gie_sub_bin_config[i].unique_id,
              config->secondary_gie_sub_bin_config[i],
              &bin->secondary_gie_bin[i])) {
           g_print("create_secondary_gie_bin failed");
           goto done;
         }
         g_snprintf (name, sizeof (name), "sgie_%d_bin", i);
         gst_element_set_name (bin->secondary_gie_bin[i].bin, name);
      	 gst_bin_add (GST_BIN (bin->bin), bin->secondary_gie_bin[i].bin);
         sink_elem = bin->secondary_gie_bin[i].bin;
         src_elem = bin->secondary_gie_bin[i].bin;
       }  
    }
    //add analysis
    if (config->tracker_config[i].enable && config->dsanalytics_config[i].enable) {
      if (!create_dsanalytics_bin (&config->dsanalytics_config[i],
              &bin->dsanalytics_bin[i])) {
        g_print ("creating dsanalytics bin failed\n");
        goto done;
      }
      
      g_snprintf (name, sizeof (name), "analytics_%d_bin", i);
      gst_element_set_name (bin->dsanalytics_bin[i].bin, name);
      gst_bin_add (GST_BIN (bin->bin), bin->dsanalytics_bin[i].bin);
      if (sink_elem) {
        NVGSTDS_LINK_ELEMENT (bin->dsanalytics_bin[i].bin, sink_elem);
      }
      sink_elem = bin->dsanalytics_bin[i].bin;
      if (!src_elem) {
          src_elem = bin->dsanalytics_bin[i].bin;
      }
    }
    //add tracker
    if (config->tracker_config[i].enable) {
      if (!create_tracking_bin (&config->tracker_config[i],
              &bin->tracker_bin[i])) {
        g_print ("creating tracker bin failed\n");
        goto done;
      }
      
      g_snprintf (name, sizeof (name), "tracking_%d_bin", i);
      gst_element_set_name (bin->tracker_bin[i].bin, name);
      gst_bin_add (GST_BIN (bin->bin),
          bin->tracker_bin[i].bin);

      if (sink_elem) {
        NVGSTDS_LINK_ELEMENT (bin->tracker_bin[i].bin, sink_elem);
      }
      sink_elem = bin->tracker_bin[i].bin;     
      if (!src_elem) {
         src_elem = bin->tracker_bin[i].bin;
      }
    }
	  
    if (config->primary_gie_sub_bin_config[i].enable
		    || config->video_template_sub_bin_config[i].enable) {
      if (config->video_template_sub_bin_config[i].enable) {
        if (!create_primary_gie_videotemplate_bin (&config->video_template_sub_bin_config[i],
              &bin->primary_gie_bin[i])) {
          goto done;
        }
      } else {
        if (!create_primary_gie_bin (&config->primary_gie_sub_bin_config[i],
              &bin->primary_gie_bin[i])) {
          goto done;
        }
      }
      g_snprintf (name, sizeof (name), "primary_gie_%d_bin", i);
      gst_element_set_name (bin->primary_gie_bin[i].bin, name);
      gst_bin_add (GST_BIN (bin->bin), bin->primary_gie_bin[i].bin);

      if (sink_elem) {
        NVGSTDS_LINK_ELEMENT (bin->primary_gie_bin[i].bin, sink_elem);
      }
     
      sink_elem = bin->primary_gie_bin[i].bin;
      if (!src_elem) {
         src_elem = bin->primary_gie_bin[i].bin;
      }
    }

    if (config->pre_process_sub_bin_config[i].enable) {
      if (!create_preprocess_bin (&config->pre_process_sub_bin_config[i],
            &bin->preprocess_bin[i])) {
        g_print ("creating preprocess bin failed\n");
        goto done;
      }
      g_snprintf (name, sizeof (name), "preprocess_%d_bin", i);
      gst_element_set_name (bin->preprocess_bin[i].bin, name);
      gst_bin_add (GST_BIN (bin->bin), bin->preprocess_bin[i].bin);

      if (sink_elem) {
        NVGSTDS_LINK_ELEMENT (bin->preprocess_bin[i].bin, sink_elem);
      }

      sink_elem = bin->preprocess_bin[i].bin;
    }

    /* streamdemux and streammux to select source to inference */
    bin->streammux[i] =
        gst_element_factory_make (NVDS_ELEM_STREAM_MUX, NULL);
    if (!bin->streammux[i]) {
      NVGSTDS_ERR_MSG_V ("Failed to create element 'streammux'");
      goto done;
    }
    gst_bin_add (GST_BIN (bin->bin), bin->streammux[i]);
     if (config->streammux_config.is_parsed){
      if(!set_streammux_properties (&config->streammux_config,
          bin->streammux[i])){
           NVGSTDS_WARN_MSG_V("Failed to set streammux properties");
      }
    }

    str = config->srcids_config[i].src_ids;
    vec = split_string (str);
    src_id_num = vec.size();
    g_print("i:%d, src_id_num:%d\n", i, src_id_num);
    g_object_set (G_OBJECT (bin->streammux[i]), "batch-size", src_id_num, NULL);

    if(!link_streamdemux_to_streammux(config, bin, i)){
        goto done;
    }
    
    NVGSTDS_LINK_ELEMENT (bin->streammux[i], sink_elem);
    
    link_element_to_metamux_sink_pad (bin->muxer, src_elem, i+1);
  }

  NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->tee, "sink");

  NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->muxer, "src");

  ret = TRUE;
done:
  if (!ret) {
    NVGSTDS_ERR_MSG_V ("%s failed", __func__);
  }
  return ret;
}

After executing a reconnection, the following messages appear, and after reconnection, model recognition disappears:

streaming stopped, reason error (-5)
nvstreammux: Successfully handled EOS for source_id=0
nvstreammux: Successfully handled EOS for source_id=0
** WARN: <watch_source_status:761>: No data from source 0 since last 10 sec. Trying reconnection
** INFO: <reset_source_pipeline:1719>: Resetting source 0
ERROR from element src_elem0: Could not write to resource.
Error details: ../gst/rtsp/gstrtspsrc.c(6607): gst_rtspsrc_try_send (): /GstPipeline:deepstream-tensorrt-openpose-pipeline/GstBin:multi_src_bin/GstBin:src_sub_bin0/GstRTSPSrc:src_elem0:
Could not send message. (Received end-of-file)
ERROR from element src_elem0: Could not write to resource.
Error details: ../gst/rtsp/gstrtspsrc.c(9034): gst_rtspsrc_pause (): /GstPipeline:deepstream-tensorrt-openpose-pipeline/GstBin:multi_src_bin/GstBin:src_sub_bin0/GstRTSPSrc:src_elem0:
Could not send message. (Received end-of-file)
ERROR from element src_elem0: Unauthorized
Error details: ../gst/rtsp/gstrtspsrc.c(6740): gst_rtspsrc_send (): /GstPipeline:deepstream-tensorrt-openpose-pipeline/GstBin:multi_src_bin/GstBin:src_sub_bin0/GstRTSPSrc:src_elem0:
Unauthorized (401)
ERROR from element src_elem0: Could not write to resource.
Error details: ../gst/rtsp/gstrtspsrc.c(8346): gst_rtspsrc_close (): /GstPipeline:deepstream-tensorrt-openpose-pipeline/GstBin:multi_src_bin/GstBin:src_sub_bin0/GstRTSPSrc:src_elem0:
Could not send message. (Generic error)

There are two occurrences of nvstreammux: Successfully handled EOS for source_id=0 right after the disconnection.

However, when I use the following code:

#if 1
static gboolean
create_parallel_infer_bin (guint num_sub_bins, NvDsConfig *config,
    NvDsParallelGieBin *bin, AppCtx *appCtx)
{
  gboolean ret = FALSE;
  GstElement *sink_elem = NULL;
  GstElement *src_elem = NULL;
  GstElement *nvvidconv = NULL, *caps_filter = NULL;
  GstCaps *caps = NULL;
  GstCapsFeatures *feature = NULL;
  gchar name[50];
  guint i = 0;

  bin->bin = gst_bin_new ("parallel_infer_bin");
  if (!bin->bin) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'parallel_infer_bin'");
    goto done;
  }

  bin->tee = gst_element_factory_make (NVDS_ELEM_TEE, "infer_bin_tee");
  if (!bin->tee) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'infer_bin_tee'");
    goto done;
  }
  gst_bin_add (GST_BIN (bin->bin), bin->tee);

  bin->muxer = gst_element_factory_make ("nvdsmetamux", "infer_bin_muxer");
  if (!bin->muxer) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'infer_bin_muxer'");
    goto done;
  }
  g_object_set (G_OBJECT (bin->muxer), "config-file",
		 GET_FILE_PATH (config->meta_mux_config.config_file_path), NULL);

  NVGSTDS_ELEM_ADD_PROBE (bin->muxer_buffer_probe_id, bin->muxer, "src",
      body_pose_gie_src_pad_buffer_probe, GST_PAD_PROBE_TYPE_BUFFER,
      appCtx);

  gst_bin_add (GST_BIN (bin->bin), bin->muxer);

  for (i = 0; i < num_sub_bins; i++) {
    if (config->primary_gie_sub_bin_config[i].enable
		    || config->video_template_sub_bin_config[i].enable) {
      if (config->video_template_sub_bin_config[i].enable) {
        if (!create_primary_gie_videotemplate_bin (&config->video_template_sub_bin_config[i],
              &bin->primary_gie_bin[i])) {
          goto done;
        }
      } else {
        if (!create_primary_gie_bin (&config->primary_gie_sub_bin_config[i],
              &bin->primary_gie_bin[i])) {
          goto done;
        }
      }
      g_snprintf (name, sizeof (name), "primary_gie_%d_bin", i);
      gst_element_set_name (bin->primary_gie_bin[i].bin, name);
      gst_bin_add (GST_BIN (bin->bin), bin->primary_gie_bin[i].bin);

      sink_elem = bin->primary_gie_bin[i].bin;
      src_elem = bin->primary_gie_bin[i].bin;
    }

    if (config->pre_process_sub_bin_config[i].enable) {
      if (!create_preprocess_bin (&config->pre_process_sub_bin_config[i],
            &bin->preprocess_bin[i])) {
        g_print ("creating preprocess bin failed\n");
        goto done;
      }
      g_snprintf (name, sizeof (name), "preprocess_%d_bin", i);
      gst_element_set_name (bin->preprocess_bin[i].bin, name);
      gst_bin_add (GST_BIN (bin->bin), bin->preprocess_bin[i].bin);

      if (sink_elem) {
        NVGSTDS_LINK_ELEMENT (bin->preprocess_bin[i].bin, sink_elem);
      }

      sink_elem = bin->preprocess_bin[i].bin;
    }

    /* Add video convert to avoid parallel infer operate on the same batch meta */
    nvvidconv = gst_element_factory_make ("nvvideoconvert", NULL);
    caps_filter = gst_element_factory_make ("capsfilter", NULL);
    caps =
        gst_caps_new_simple ("video/x-raw",
        "width", G_TYPE_INT, 1920,
        "height", G_TYPE_INT, 1082,
        NULL);
    feature = gst_caps_features_new ("memory:NVMM", NULL);
    gst_caps_set_features (caps, 0, feature);
    g_object_set (G_OBJECT (caps_filter), "caps", caps, NULL);
    gst_bin_add (GST_BIN (bin->bin), nvvidconv);
    gst_bin_add (GST_BIN (bin->bin), caps_filter);
    NVGSTDS_LINK_ELEMENT (nvvidconv, caps_filter);
    NVGSTDS_LINK_ELEMENT (caps_filter, sink_elem);
    sink_elem = nvvidconv;

    link_element_to_tee_src_pad (bin->tee, sink_elem);
    link_element_to_metamux_sink_pad (bin->muxer, src_elem, i);
  }

  NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->tee, "sink");

  NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->muxer, "src");

  ret = TRUE;
done:
  if (!ret) {
    NVGSTDS_ERR_MSG_V ("%s failed", __func__);
  }
  return ret;
}
#endif

After executing a reconnection, the following messages appear, and after reconnection, model recognition works correctly:

streaming stopped, reason error (-5)
nvstreammux: Successfully handled EOS for source_id=0
** WARN: <watch_source_status:761>: No data from source 0 since last 10 sec. Trying reconnection
** INFO: <reset_source_pipeline:1719>: Resetting source 0
ERROR from element src_elem0: Could not write to resource.
Error details: ../gst/rtsp/gstrtspsrc.c(6607): gst_rtspsrc_try_send (): /GstPipeline:deepstream-tensorrt-openpose-pipeline/GstBin:multi_src_bin/GstBin:src_sub_bin0/GstRTSPSrc:src_elem0:
Could not send message. (Received end-of-file)
ERROR from element src_elem0: Could not write to resource.
Error details: ../gst/rtsp/gstrtspsrc.c(9034): gst_rtspsrc_pause (): /GstPipeline:deepstream-tensorrt-openpose-pipeline/GstBin:multi_src_bin/GstBin:src_sub_bin0/GstRTSPSrc:src_elem0:
Could not send message. (Received end-of-file)
ERROR from element src_elem0: Unauthorized
Error details: ../gst/rtsp/gstrtspsrc.c(6740): gst_rtspsrc_send (): /GstPipeline:deepstream-tensorrt-openpose-pipeline/GstBin:multi_src_bin/GstBin:src_sub_bin0/GstRTSPSrc:src_elem0:
Unauthorized (401)
ERROR from element src_elem0: Could not write to resource.
Error details: ../gst/rtsp/gstrtspsrc.c(8346): gst_rtspsrc_close (): /GstPipeline:deepstream-tensorrt-openpose-pipeline/GstBin:multi_src_bin/GstBin:src_sub_bin0/GstRTSPSrc:src_elem0:
Could not send message. (Generic error)

In this case, there is only one occurrence of nvstreammux: Successfully handled EOS for source_id=0 right after the disconnection.

I’m not sure if this will have any structural impact. I am still testing this further…

OK. It’s weird that it worked on my side. Have you tried to compare the pipeline of the 2 scenarios by reffering to the FAQ I attached before?

I set up an RTSP stream using your method, but after disconnection and reconnection, the following messages appear:

** WARN: <watch_source_status:761>: No data from source 0 since last 10 sec. Trying reconnection
** INFO: <reset_source_pipeline:1719>: Resetting source 0
** WARN: <watch_source_status:761>: No data from source 0 since last 10 sec. Trying reconnection
** INFO: <reset_source_pipeline:1719>: Resetting source 0
ERROR from element src_elem0: Could not open resource for reading.
Error details: ../gst/rtsp/gstrtspsrc.c(6427): gst_rtspsrc_setup_auth (): /GstPipeline:deepstream-tensorrt-openpose-pipeline/GstBin:multi_src_bin/GstBin:src_sub_bin0/GstRTSPSrc:src_elem0:
No supported authentication protocol was found
ERROR from element src_elem0: Not found
Error details: ../gst/rtsp/gstrtspsrc.c(6736): gst_rtspsrc_send (): /GstPipeline:deepstream-tensorrt-openpose-pipeline/GstBin:multi_src_bin/GstBin:src_sub_bin0/GstRTSPSrc:src_elem0:
Not Found (404)
** INFO: <reset_source_pipeline:1719>: Resetting source 0

Additionally, the model recognition still works fine. The message nvstreammux: Successfully handled EOS for source_id=0 does not appear, but I am unsure if this is causing any issues.

Do you know where this message originates from?

It seems like using a self-built RTSP stream and a real surveillance RTSP stream might have some differences.

Yes. It’s from the Gstreamer plugin rtspsrc gstrtspsrc.c. But if you have properly handled the error message in the bus_call, you can ignore this log and wait until the reconnection succeeds.

Why does the following message not appear when using a self-built RTSP stream after disconnection?

nvstreammux: Successfully handled EOS for source_id=0

I tried forwarding the RTSP stream from the surveillance camera through the same method to create a self-built RTSP stream. After disconnection, this message still doesn’t appear, and the model recognition continues to work after reconnection. However, when using the RTSP stream directly from the surveillance camera, the model recognition stops working after disconnection. Why does this issue occur?

Test results show that when you first stop the running RTSP server container with the command:

docker run --rm -it --network=host aler9/rtsp-simple-server

The following message appears:

nvstreammux: Successfully handled EOS for source_id=0
nvstreammux: Successfully handled EOS for source_id=0

Then, after restarting the RTSP server container, the issue of the model recognition disappearing reoccurs.

The EOS was sent by the rtsp source, so this behavior is determined by the gstrtspsrc. The differences between your two operations requires debugging the Gstreamer source code.

Have you ever tried to just stop the ffmpeg command?

I recommend debugging the nvinfer source code to check whether the data is sent to the TensorRT. You can add some log info in the sources\libs\nvdsinfer\nvdsinfer_context_impl.cpp.

NvDsInferStatus
NvDsInferContextImpl::queueInputBatch(NvDsInferContextBatchInput &batchInput)

If you only stop the ffmpeg command and then restart it, the RTSP stream reconnects, and the model recognition remains functional. However, if you stop the RTSP server and then restart it, when the RTSP stream disconnects and reconnects, the model recognition disappears. You can try it out yourself.

Could you describe that step by step from the beginning?

I used your method to set up an RTSP server to forward my RTSP stream. If I directly stop and restart the RTSP stream, the model recognition works fine after the RTSP disconnects and reconnects. However, if I stop and restart the RTSP server, the model recognition disappears after the RTSP disconnects and reconnects.
RTSP server is docker run --rm -it --network=host aler9/rtsp-simple-server


The issue caused by shutting down the RTSP server is the same as when I directly pull the RTSP stream from the camera and reconnect after a disconnection.

Additionally, because I think this topic might be more challenging, I adopted the following code. The goal is to perform parallel inference using multiple models without needing to branch and distinguish which model to use for each stream. Initially, both models could recognize objects correctly. However, after the RTSP stream disconnects and reconnects, only the first model continues to produce recognition results, while the second model stops working. I suspect this issue might be related to the RTSP disconnection and reconnection, similar to the previous situation.

#if 0
static gboolean
create_parallel_infer_bin (guint num_sub_bins, NvDsConfig *config,
    NvDsParallelGieBin *bin, AppCtx *appCtx)
{
  gboolean ret = FALSE;
  GstElement *sink_elem = NULL;
  GstElement *src_elem = NULL;
  GstElement *nvvidconv = NULL, *caps_filter = NULL;
  GstCaps *caps = NULL;
  GstCapsFeatures *feature = NULL;
  gchar name[50];
  guint i = 0;

  bin->bin = gst_bin_new ("parallel_infer_bin");
  if (!bin->bin) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'parallel_infer_bin'");
    goto done;
  }

  bin->tee = gst_element_factory_make (NVDS_ELEM_TEE, "infer_bin_tee");
  if (!bin->tee) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'infer_bin_tee'");
    goto done;
  }
  gst_bin_add (GST_BIN (bin->bin), bin->tee);

  bin->muxer = gst_element_factory_make ("nvdsmetamux", "infer_bin_muxer");
  if (!bin->muxer) {
    NVGSTDS_ERR_MSG_V ("Failed to create 'infer_bin_muxer'");
    goto done;
  }
  g_object_set (G_OBJECT (bin->muxer), "config-file",
		 GET_FILE_PATH (config->meta_mux_config.config_file_path), NULL);

  NVGSTDS_ELEM_ADD_PROBE (bin->muxer_buffer_probe_id, bin->muxer, "src",
      body_pose_gie_src_pad_buffer_probe, GST_PAD_PROBE_TYPE_BUFFER,
      appCtx);

  gst_bin_add (GST_BIN (bin->bin), bin->muxer);
  //g_print (".......................................num_sub_bins: %d \n",num_sub_bins);
  for (i = 0; i < num_sub_bins; i++) {
    if (config->primary_gie_sub_bin_config[i].enable
		    || config->video_template_sub_bin_config[i].enable) {
      if (config->video_template_sub_bin_config[i].enable) {
        if (!create_primary_gie_videotemplate_bin (&config->video_template_sub_bin_config[i],
              &bin->primary_gie_bin[i])) {
          goto done;
        }
      } else {
        if (!create_primary_gie_bin (&config->primary_gie_sub_bin_config[i],
              &bin->primary_gie_bin[i])) {
          goto done;
        }
      }
      g_snprintf (name, sizeof (name), "primary_gie_%d_bin", i);
      gst_element_set_name (bin->primary_gie_bin[i].bin, name);
      gst_bin_add (GST_BIN (bin->bin), bin->primary_gie_bin[i].bin);

      sink_elem = bin->primary_gie_bin[i].bin;
      src_elem = bin->primary_gie_bin[i].bin;
    }

    if (config->pre_process_sub_bin_config[i].enable) {
      if (!create_preprocess_bin (&config->pre_process_sub_bin_config[i],
            &bin->preprocess_bin[i])) {
        g_print ("creating preprocess bin failed\n");
        goto done;
      }
      g_snprintf (name, sizeof (name), "preprocess_%d_bin", i);
      gst_element_set_name (bin->preprocess_bin[i].bin, name);
      gst_bin_add (GST_BIN (bin->bin), bin->preprocess_bin[i].bin);

      if (sink_elem) {
        NVGSTDS_LINK_ELEMENT (bin->preprocess_bin[i].bin, sink_elem);
      }

      sink_elem = bin->preprocess_bin[i].bin;
    }

    /* Add video convert to avoid parallel infer operate on the same batch meta */
    nvvidconv = gst_element_factory_make ("nvvideoconvert", NULL);
    caps_filter = gst_element_factory_make ("capsfilter", NULL);
    caps =
        gst_caps_new_simple ("video/x-raw",
        "width", G_TYPE_INT, 1920,
        "height", G_TYPE_INT, 1082,
        NULL);
    feature = gst_caps_features_new ("memory:NVMM", NULL);
    gst_caps_set_features (caps, 0, feature);
    g_object_set (G_OBJECT (caps_filter), "caps", caps, NULL);
    gst_bin_add (GST_BIN (bin->bin), nvvidconv);
    gst_bin_add (GST_BIN (bin->bin), caps_filter);
    NVGSTDS_LINK_ELEMENT (nvvidconv, caps_filter);
    NVGSTDS_LINK_ELEMENT (caps_filter, sink_elem);
    sink_elem = nvvidconv;

    link_element_to_tee_src_pad (bin->tee, sink_elem);
    link_element_to_metamux_sink_pad (bin->muxer, src_elem, i);
  }

  NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->tee, "sink");

  NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->muxer, "src");

  ret = TRUE;
done:
  if (!ret) {
    NVGSTDS_ERR_MSG_V ("%s failed", __func__);
  }
  return ret;
}