Issues in modifying deepstream_sink_bin.c in order to add support for rtmpsink

I’m trying to add support for rtmpsink on the deepstream pipeline setup on my T4 instance.

I’ve already referred to the following posts: 1 2 3
And I could successfully compile the following with deepstream-app (under the sampel_apps).

static gboolean
create_rtmpsink_bin (NvDsSinkEncoderConfig * config, NvDsSinkBinSubBin * bin)
{
  GstCaps *caps = NULL;
  gboolean ret = FALSE;
  gchar elem_name[50];
  gchar encode_name[50];

  uid++;

  g_snprintf (elem_name, sizeof (elem_name), "sink_sub_bin%d", uid);
  bin->bin = gst_bin_new (elem_name);
  if (!bin->bin) {
    NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
    goto done;
  }

  g_snprintf (elem_name, sizeof (elem_name), "sink_sub_bin_queue%d", uid);
  bin->queue = gst_element_factory_make (NVDS_ELEM_QUEUE, elem_name);
  if (!bin->queue) {
    NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
    goto done;
  }

  g_snprintf (elem_name, sizeof (elem_name), "sink_sub_bin_transform%d", uid);
  bin->transform = gst_element_factory_make (NVDS_ELEM_VIDEO_CONV, elem_name);
  if (!bin->transform) {
    NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
    goto done;
  }

  g_snprintf (elem_name, sizeof (elem_name), "sink_sub_bin_cap_filter%d", uid);
  bin->cap_filter = gst_element_factory_make (NVDS_ELEM_CAPS_FILTER, elem_name);
  if (!bin->cap_filter) {
    NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
    goto done;
  }

  if (config->enc_type == NV_DS_ENCODER_TYPE_SW)
    caps = gst_caps_from_string ("video/x-raw, format=I420");
  else
    caps = gst_caps_from_string ("video/x-raw(memory:NVMM), format=I420");

  g_object_set (G_OBJECT (bin->cap_filter), "caps", caps, NULL);

  g_snprintf (encode_name, sizeof (encode_name), "sink_sub_bin_encoder%d", uid);

  switch (config->codec) {
    case NV_DS_ENCODER_H264:
      bin->codecparse = gst_element_factory_make ("h264parse", "h264-parser");
      bin->encoder = gst_element_factory_make (NVDS_ELEM_ENC_H264_HW, encode_name);
      if (config->enc_type == NV_DS_ENCODER_TYPE_SW)
        bin->encoder = gst_element_factory_make (NVDS_ELEM_ENC_H264_SW, encode_name);
      else
        bin->encoder = gst_element_factory_make (NVDS_ELEM_ENC_H264_HW, encode_name);
      break;
    case NV_DS_ENCODER_H265:
      bin->codecparse = gst_element_factory_make ("h265parse", "h265-parser");
      if (config->enc_type == NV_DS_ENCODER_TYPE_SW)
        bin->encoder = gst_element_factory_make (NVDS_ELEM_ENC_H265_SW, encode_name);
      else
        bin->encoder = gst_element_factory_make (NVDS_ELEM_ENC_H265_HW, encode_name);
      break;
    default:
      goto done;
  }

  if (!bin->encoder) {
    NVGSTDS_ERR_MSG_V ("Failed to create '%s'", encode_name);
    goto done;
  }

  if (config->enc_type  == NV_DS_ENCODER_TYPE_SW) {
    //bitrate is in kbits/sec for software encoder x264enc and x265enc
    g_object_set (G_OBJECT (bin->encoder), "bitrate", config->bitrate/1000, NULL);
  } else {
      g_object_set (G_OBJECT (bin->encoder), "bitrate", config->bitrate, NULL);
      g_object_set (G_OBJECT (bin->encoder), "profile", config->profile, NULL);
      g_object_set (G_OBJECT (bin->encoder), "iframeinterval", config->iframeinterval, NULL);
  }

#ifdef IS_TEGRA
  g_object_set (G_OBJECT (bin->encoder), "preset-level", 1, NULL);
  g_object_set (G_OBJECT (bin->encoder), "insert-sps-pps", 1, NULL);
  g_object_set (G_OBJECT (bin->encoder), "bufapi-version", 1, NULL);
#else
  g_object_set (G_OBJECT (bin->transform), "gpu-id", config->gpu_id, NULL);
#endif

  bin->flvmux = gst_element_factory_make ("flvmux", elem_name);
  if (!bin->flvmux) {
    NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
    goto done;
  }
  g_object_set (G_OBJECT (bin->flvmux), "name", "mux",  "streamable", TRUE, NULL);

  bin->sink = gst_element_factory_make ("rtmpsink", elem_name);
  if (!bin->sink) {
    NVGSTDS_ERR_MSG_V ("Failed to create '%s'", elem_name);
    goto done;
  }
  g_object_set (G_OBJECT (bin->sink), "location", config->rtmp_location, NULL);

  g_print ("%s: DEBUGGER create_rtmp_sinkn \n", config->rtmp_location);

  gst_bin_add_many (GST_BIN (bin->bin), bin->queue, bin->transform,
      bin->encoder, bin->codecparse, bin->flvmux, bin->sink, NULL);
  
  NVGSTDS_LINK_ELEMENT (bin->queue, bin->transform);
  NVGSTDS_LINK_ELEMENT (bin->transform, bin->encoder);
  NVGSTDS_LINK_ELEMENT (bin->encoder, bin->codecparse);
  NVGSTDS_LINK_ELEMENT (bin->codecparse, bin->flvmux);
  NVGSTDS_LINK_ELEMENT (bin->flvmux, bin->sink);
  NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->queue, "sink");

  ret = TRUE;

  if (ret != TRUE) {
    g_print ("%s: start_rtmp_straming function failed\n", __func__);
  }
  g_print ("%s: Started streaming RTMP\n", __func__);

done:
  if (caps) {
    gst_caps_unref (caps);
  }
  if (!ret) {
    NVGSTDS_ERR_MSG_V ("%s failed", __func__);
  }
  return ret;
}

When I start the pipeline, I get the following output:

**PERF: 29.26 (4.21)
**PERF: 0.00 (1.82)
**PERF: 0.00 (1.16)
**PERF: 0.00 (0.85)
**PERF: 0.00 (0.67)
**PERF: 0.00 (0.56)
**PERF: 0.00 (0.47)
**PERF: 0.00 (0.41)
**PERF: 0.00 (0.37)

There’s some issue with the rtmpsink as the stream can’t penetrate through the pipeline.
Any kind of help would be appreciated.

• Hardware Platform (Jetson / GPU)
T4
• DeepStream Version
5.0
• TensorRT Version
7.0.0.11

The codes seem OK. Can you provide more information about the failure?

@Fiona.Chen Thanks for your response.

  • After making the above changes, the deepstream-app under sample_apps compiles with the following output:
$ make
cc -c -o ../../apps-common/src/deepstream_sink_bin.o -I../../apps-common/includes -I../../../includes -DDS_VERSION_MINOR=0 -DDS_VERSION_MAJOR=5 `pkg-config --cflags gstreamer-1.0 gstre
amer-video-1.0 x11` ../../apps-common/src/deepstream_sink_bin.c
cc -o deepstream-app deepstream_app.o deepstream_app_main.o deepstream_app_config_parser.o ../../apps-common/src/deepstream_tracker_bin.o ../../apps-common/src/deepstream_primary_gie_b
in.o ../../apps-common/src/deepstream_source_bin.o ../../apps-common/src/deepstream_c2d_msg.o ../../apps-common/src/deepstream_config_file_parser.o ../../apps-common/src/deepstream_com
mon.o ../../apps-common/src/deepstream_sink_bin.o ../../apps-common/src/deepstream_perf.o ../../apps-common/src/deepstream_dewarper_bin.o ../../apps-common/src/deepstream_dsexample.o .
./../apps-common/src/deepstream_secondary_gie_bin.o ../../apps-common/src/deepstream_tiled_display_bin.o ../../apps-common/src/deepstream_osd_bin.o ../../apps-common/src/deepstream_str
eammux.o -L/opt/nvidia/deepstream/deepstream-5.0/lib/ -lnvdsgst_meta -lnvds_meta -lnvdsgst_helper -lnvdsgst_smartrecord -lnvds_utils -lm -lgstrtspserver-1.0 -ldl -Wl,-rpath,/opt/nvidia
/deepstream/deepstream-5.0/lib/ `pkg-config --libs gstreamer-1.0 gstreamer-video-1.0 x11`
  • Then I proceed to run the binary with the deepstream_app_config_yoloV2.txt, I get the following output:
create_rtmpsink_bin: Started streaming RTMP
Warn: 'threshold' parameter has been deprecated. Use 'pre-cluster-threshold' instead.
0:00:01.509149698  3424 0x556ce6954b00 INFO                 nvinfer gstnvinfer.cpp:602:gst_nvinfer_logger:<primary_gie> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::buildModel() <nvdsinfer_context_impl.cpp:1591> [UID = 1]: Trying to create engine from model files
Loading pre-trained weights...
Loading weights of yolov2 complete!
Total Number of weights read : 50983561
Loading pre-trained weights...
Loading weights of yolov2 complete!
Total Number of weights read : 50983561
Loading weights of yolov2 complete!
Total Number of weights read : 50983561
Building Yolo network...
      layer               inp_size            out_size       weightPtr
(0)   conv-bn-leaky     3 x 608 x 608      32 x 608 x 608    992   
(1)   maxpool          32 x 608 x 608      32 x 304 x 304    992   
(2)   conv-bn-leaky    32 x 304 x 304      64 x 304 x 304    19680 
(3)   maxpool          64 x 304 x 304      64 x 152 x 152    19680 
(4)   conv-bn-leaky    64 x 152 x 152     128 x 152 x 152    93920 
(5)   conv-bn-leaky   128 x 152 x 152      64 x 152 x 152    102368
(6)   conv-bn-leaky    64 x 152 x 152     128 x 152 x 152    176608
(7)   maxpool         128 x 152 x 152     128 x  76 x  76    176608
(8)   conv-bn-leaky   128 x  76 x  76     256 x  76 x  76    472544
(9)   conv-bn-leaky   256 x  76 x  76     128 x  76 x  76    505824
(10)  conv-bn-leaky   128 x  76 x  76     256 x  76 x  76    801760
(11)  maxpool         256 x  76 x  76     256 x  38 x  38    801760
(12)  conv-bn-leaky   256 x  38 x  38     512 x  38 x  38    1983456
(13)  conv-bn-leaky   512 x  38 x  38     256 x  38 x  38    2115552
(14)  conv-bn-leaky   256 x  38 x  38     512 x  38 x  38    3297248
(15)  conv-bn-leaky   512 x  38 x  38     256 x  38 x  38    3429344
(16)  conv-bn-leaky   256 x  38 x  38     512 x  38 x  38    4611040
(17)  maxpool         512 x  38 x  38     512 x  19 x  19    4611040
(18)  conv-bn-leaky   512 x  19 x  19    1024 x  19 x  19    9333728
(19)  conv-bn-leaky  1024 x  19 x  19     512 x  19 x  19    9860064
(20)  conv-bn-leaky   512 x  19 x  19    1024 x  19 x  19    14582752
(21)  conv-bn-leaky  1024 x  19 x  19     512 x  19 x  19    15109088
(22)  conv-bn-leaky   512 x  19 x  19    1024 x  19 x  19    19831776
(23)  conv-bn-leaky  1024 x  19 x  19    1024 x  19 x  19    29273056
(24)  conv-bn-leaky  1024 x  19 x  19    1024 x  19 x  19    38714336
(25)  route                  -            512 x  38 x  38    38714336
(26)  conv-bn-leaky   512 x  38 x  38      64 x  38 x  38    38747360
(27)  reorg            64 x  38 x  38     256 x  19 x  19    38747360
(28)  route                  -           1280 x  19 x  19    38747360
(29)  conv-bn-leaky  1280 x  19 x  19    1024 x  19 x  19    50547936
(30)  conv-linear    1024 x  19 x  19     425 x  19 x  19    50983561
(31)  region          425 x  19 x  19     425 x  19 x  19    50983561
Anchors are being converted to network input resolution i.e. Anchors x 32 (stride)
Output yolo blob names :
region_32
Total number of yolo layers: 76
Building yolo network complete!
Building the TensorRT Engine...
INFO: ../nvdsinfer/nvdsinfer_func_utils.cpp:37 [TRT]: Detected 1 inputs and 1 output network tensors.
Building complete!
0:01:01.142986737  3424 0x556ce6954b00 INFO                 nvinfer gstnvinfer.cpp:602:gst_nvinfer_logger:<primary_gie> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::buildModel() <nvdsinfer_context_impl.cpp:1624> [UID = 1]: serialize cuda engine to file: /opt/nvidia/deepstream/deepstream-5.0/sources/objectDetector_Yolo/model_b1_gpu0_fp32.engine successfully
WARNING: ../nvdsinfer/nvdsinfer_func_utils.cpp:34 [TRT]: Current optimization profile is: 0. Please ensure there are no enqueued operations pending in this context prior to switching profiles
INFO: ../nvdsinfer/nvdsinfer_model_builder.cpp:685 [Implicit Engine Info]: layers num: 2
0   INPUT  kFLOAT data            3x608x608       
1   OUTPUT kFLOAT region_32       425x19x19       

0:01:01.148922390  3424 0x556ce6954b00 INFO                 nvinfer gstnvinfer_impl.cpp:311:notifyLoadModelStatus:<primary_gie> [UID 1]: Load new model:/opt/nvidia/deepstream/deepstream-5.0/sources/objectDetector_Yolo/config_infer_primary_yoloV2.txt sucessfully

Runtime commands:
        h: Print this help
        q: Quit

        p: Pause
        r: Resume

NOTE: To expand a source in the 2D tiled display and view object details, left-click on the source.
      To go back to the tiled display, right-click anywhere on the window.


**PERF: FPS 0 (Avg)
**PERF: 0.00 (0.00)
** INFO: <bus_callback:181>: Pipeline ready

**PERF: 0.00 (0.00)
** INFO: <bus_callback:167>: Pipeline running

**PERF: 47.35 (3.93)
**PERF: 0.00 (1.63)
**PERF: 0.00 (1.03)
**PERF: 0.00 (0.75)
  • There’s no failure until this point, I can monitor the stream entering the system and works well if I use the rtspsink, but the above issue when run with rtmpsink. (You see the stream doesn’t penetrate through the pipeline)

  • The source config:

[source0]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP
type=2
uri=rtmp://localhost:1935/show/stream_0
num-sources=1
#drop-frame-interval=2
gpu-id=0
# (0): memtype_device   - Memory type Device
# (1): memtype_pinned   - Memory type Host Pinned
# (2): memtype_unified  - Memory type Unified
cudadec-memtype=0

Please have a look and let me know in case I missed something out.

Thanks!

@Fiona.Chen Any updates on this?

I’ve tried your code and configuration, the source is rtmp and sink is also rtmp, it seems work well.

1 Like

Not sure what might have led to the issue that I’m facing. Can you share the config that you’re using? That might help I guess.

Thanks for the confirmation.

Just using the modified source4_1080p_dec_infer-resnet_tracker_sgie_tiled_display_int8.txt in Deepstream-app sample. The source and sink changed as your configuraion.

1 Like

I noticed after making a few changes to the config, the fps would (later)increase to come in sync with the source.

Thanks for all the help.