Rtsp capability to redaction app

Output:

Failed to link pads. Exiting.

The following code has been edited from the following location as it doesn’t support RTSP. I hate to sound desperate but i need to finish this by this week.

https://github.com/NVIDIA-AI-IOT/redaction_with_deepstream/blob/master/deepstream_redaction_app.c

#include <gst/gst.h>
#include <gst/gstmessage.h>
#include <glib.h>
#include <stdio.h>
#include <time.h>
#include <gstnvdsmeta.h>

/* Define global variables:
 * `frame_number` & `pgie_classes_str` are used for writing meta to kitti;
 * `pgie_config`,`input_mp4`,`output_mp4`,`output_kitti` are configurable file paths parsed through command line. */
clock_t t_start;
clock_t t_end;

GstElement *pipeline, *video_full_processing_bin;
gint frame_number = 0;
gchar pgie_classes_str[4][32] = { "face", "license_plate", "make", "model" };

gchar *pgie_config = NULL;
gchar *input_mp4 = NULL;
gchar *output_mp4 = NULL;
gchar *output_kitti = NULL;

/* initialize our gstreamer components for the app */

GMainLoop *loop = NULL;
GstElement *source = NULL, *decoder = NULL, *streammux = NULL,
    *queue_pgie = NULL, *pgie = NULL, *nvvidconv_osd =
    NULL, *osd = NULL, *queue_sink = NULL, *nvvidconv_sink =
    NULL, *filter_sink = NULL, *videoconvert = NULL, *encoder = NULL, *muxer =
    NULL, *sink = NULL, *nvvidconv_src = NULL, *vidconv_src=NULL, *filter_src=NULL;

#ifdef PLATFORM_TEGRA
  GstElement *transform = NULL;
#endif
GstBus *bus = NULL;
guint bus_watch_id;
GstCaps *caps_filter_osd = NULL, *caps_filter_sink = NULL, *caps_filter_src = NULL;
gulong osd_probe_id = 0;
GstPad *osd_sink_pad = NULL, *videopad = NULL;

/* osd_sink_pad_buffer_probe function will extract metadata received on OSD sink pad
 * and then update params for drawing rectangle and write bbox to kitti file. */
static GstPadProbeReturn
osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
    gpointer u_data)
{

  GstBuffer *buf = (GstBuffer *) info->data;
  NvDsObjectMeta *obj_meta = NULL;
  NvDsMetaList * l_frame = NULL;
  NvDsMetaList * l_obj = NULL;
  NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
  FILE *bbox_params_dump_file = NULL;
  gchar bbox_file[1024] = { 0 };

  for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {

    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);

    if (frame_meta == NULL) {
      g_print ("NvDS Meta contained NULL meta \n");
      frame_number++;
      return GST_PAD_PROBE_OK;
    }

    if (output_kitti) {
      g_snprintf (bbox_file, sizeof (bbox_file) - 1, "%s/%06d.txt",
          output_kitti, frame_number);
        bbox_params_dump_file = fopen (bbox_file, "w");
    }

    for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
        l_obj = l_obj->next) {
      obj_meta = (NvDsObjectMeta *) (l_obj->data);

      NvOSD_RectParams * rect_params = &(obj_meta->rect_params);
      NvOSD_TextParams * text_params = &(obj_meta->text_params);

      if (text_params->display_text) {
        text_params->set_bg_clr = 0;
        text_params->font_params.font_size = 0;
      }

      /* Draw black patch to cover license plates (class_id = 1) */
      if (obj_meta->class_id == 1) {
        rect_params->border_width = 0;
        rect_params->has_bg_color = 1;
        rect_params->bg_color.red = 0.0;
        rect_params->bg_color.green = 0.0;
        rect_params->bg_color.blue = 0.0;
        rect_params->bg_color.alpha = 1.0;
      }
      /* Draw skin-color patch to cover faces (class_id = 0) */
      if (obj_meta->class_id == 0) {
        rect_params->border_width = 0;
        rect_params->has_bg_color = 1;
        rect_params->bg_color.red = 0.92;
        rect_params->bg_color.green = 0.75;
        rect_params->bg_color.blue = 0.56;
        rect_params->bg_color.alpha = 1.0;
      }
      if (bbox_params_dump_file) {
        int left = (int) (rect_params->left);
        int top = (int) (rect_params->top);
        int right = left + (int) (rect_params->width);
        int bottom = top + (int) (rect_params->height);
        int class_index = obj_meta->class_id;
        char *text = pgie_classes_str[obj_meta->class_id];
        fprintf (bbox_params_dump_file,
            "%s 0.0 0 0.0 %d.00 %d.00 %d.00 %d.00 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n",
            text, left, top, right, bottom);
      }

    }
    if (bbox_params_dump_file) {
      fclose (bbox_params_dump_file);
      bbox_params_dump_file = NULL;
    }
  }
  frame_number++;
  return GST_PAD_PROBE_OK;
}

/* This bus callback function detects the error and state change in the main pipe
 * and then export messages, export pipeline images or terminate the pipeline accordingly. */
static gboolean bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  GstMessageType msgType= (GST_MESSAGE_TYPE (msg));
  if(msgType==GST_MESSAGE_EOS)
  {
      g_print ("End of stream\n");
      t_end = clock();
      clock_t t = t_end - t_start;
      double time_taken = ((double)t)/CLOCKS_PER_SEC; // in seconds
      double fps = frame_number/time_taken;
      g_print("\nThe program took %.2f seconds to redact %d frames, pref = %.2f fps \n\n", time_taken,frame_number,fps);

      g_main_loop_quit (loop);
  }
  else if(msgType==GST_MESSAGE_ERROR)
  {
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n", GST_OBJECT_NAME (msg->src), error->message);
      g_free (debug);
      g_printerr ("Error: %s\n", error->message);
      g_error_free (error);
      g_main_loop_quit (loop);
  }
  else if(msgType==GST_MESSAGE_STATE_CHANGED)
  {
      GstState oldstate, newstate;
      gst_message_parse_state_changed (msg, &oldstate, &newstate, NULL);
      if (GST_ELEMENT (GST_MESSAGE_SRC (msg)) == pipeline)
      {
        switch (newstate)
        {
          case GST_STATE_PLAYING:
            g_print ("Pipeline running\n");
            GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipeline),
                GST_DEBUG_GRAPH_SHOW_ALL, "ds-app-playing");
            t_start = clock();
            break;
          case GST_STATE_PAUSED:
            if (oldstate == GST_STATE_PLAYING) {
              g_print ("Pipeline paused\n");
            }
            break;
          case GST_STATE_READY:
            // GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipeline), GST_DEBUG_GRAPH_SHOW_ALL,"ds-app-ready");
            if (oldstate == GST_STATE_NULL)
            {
              g_print ("Pipeline ready\n");
            } else {
              g_print ("Pipeline stopped\n");
            }
            break;
          default:
              ;
            break;
        }
      }
  }
  else
  {
    g_print("inside else\n");
  }
  return TRUE;
}

/* The callback function is called when the decoder_bin establishes video source from the input mp4 file,
 * then connects the src pad to the following video_full_processing_bin.
 * The connection is dynamic. */
static void
cb_newpad (GstElement * decodebin, GstPad * pad, gpointer data)
{
  GstCaps *caps;
  GstStructure *str;
  GstPad *videopad;
  /*only link once */
  videopad = gst_element_get_static_pad (video_full_processing_bin, "sink");
  if (GST_PAD_IS_LINKED (videopad)) {
    g_object_unref (videopad);
    return;
  }
  /* check media type */
  caps = gst_pad_query_caps (pad, NULL);
  str = gst_caps_get_structure (caps, 0);
  if (!g_strrstr (gst_structure_get_name (str), "video")) {
    gst_caps_unref (caps);
    gst_object_unref (videopad);
    return;
  }
  gst_caps_unref (caps);
  /* link’n’play */
  gst_pad_link (pad, videopad);
  g_object_unref (videopad);
}

static void cb_new_rtspsrc_pad(GstElement *element,GstPad*pad,gpointer  data)
{
    gchar *name;
    GstCaps * p_caps;
    gchar * description;
    GstElement *p_rtph264depay;

    name = gst_pad_get_name(pad);
    g_print("A new pad %s was created\n", name);

    // here, you would setup a new pad link for the newly created pad
    // sooo, now find that rtph264depay is needed and link them?
    p_caps = gst_pad_get_pad_template_caps (pad);

    description = gst_caps_to_string(p_caps);
    printf("%s\n",p_caps,", ",description,"\n");
    g_free(description);

    p_rtph264depay = GST_ELEMENT(data);

    // try to link the pads then ...
    if(!gst_element_link_pads(element, name, p_rtph264depay, "sink"))
    {
        printf("Failed to link elements 3\n");
    }

    g_free(name);
}

/* In the main function, we create elements, configure the elements, assemble to elements to pipe and run the pipe */
int
main (int argc, char *argv[])
{
  /* initialize the app using GOption */
  GOptionContext *ctx;
  GError *err = NULL;
  GOptionEntry entries[] = {
    {"pgie_config", 'c', 0, G_OPTION_ARG_STRING, &pgie_config,
          "(required) configuration file for the nvinfer detector (primary gie)",
        NULL},
    {"input_mp4", 'i', 0, G_OPTION_ARG_STRING, &input_mp4,
          "(optional) path to input mp4 file. If this is unset then usb webcam will be used as input", NULL},
    {NULL},
  };
  ctx = g_option_context_new("\n\n  'Fast Video Redaction App using NVIDIA Deepstream contact: Shuo Wang (shuow@nvidia.com)");
  g_option_context_add_main_entries (ctx, entries, NULL);
  g_option_context_add_group (ctx, gst_init_get_option_group ());
  if (!g_option_context_parse (ctx, &argc, &argv, &err))
  {
    g_print ("Failed to initialize: %s\n", err->message);
    g_clear_error (&err);
    g_option_context_free (ctx);
    return 1;
  }
  g_option_context_free (ctx);

  /* Check input arguments */
  if (!pgie_config) {
    g_printerr ("missing pgie configuration file\n");
    g_printerr ("run: %s --help for usage instruction\n", argv[0]);
    return -1;
  }

  /* Create gstreamer loop */
  loop = g_main_loop_new (NULL, FALSE);

  /* Create gstreamer pipeline */
  pipeline = gst_pipeline_new ("ds-redaction-pipeline");

  /* Create components for the input source */
  if (input_mp4) {
    source = gst_element_factory_make ("filesrc", "file-source");
    g_object_set (G_OBJECT (source), "location", input_mp4, NULL);
    decoder = gst_element_factory_make ("decodebin", "decoder");
    g_signal_connect (decoder, "pad-added", G_CALLBACK (cb_newpad), NULL);

    gst_bin_add_many (GST_BIN (pipeline), source, decoder, NULL);
    gst_element_link (source, decoder);
  }
  else
  {
    /*
    source = gst_element_factory_make ("v4l2src", "camera-source");
    g_object_set (G_OBJECT (source), "device", "/dev/video0", NULL);
    vidconv_src = gst_element_factory_make ("videoconvert", "vidconv_src");
    nvvidconv_src = gst_element_factory_make ("nvvideoconvert", "nvvidconv_src");
    filter_src = gst_element_factory_make ("capsfilter", "filter_src");
    g_object_set (G_OBJECT (nvvidconv_src), "nvbuf-memory-type", 0, NULL);
    caps_filter_src = gst_caps_from_string ("video/x-raw(memory:NVMM), format=NV12, width=1280, height=720, framerate=30/1");
    g_object_set (G_OBJECT (filter_src), "caps", caps_filter_src, NULL);
    gst_caps_unref (caps_filter_src);
    gst_bin_add_many (GST_BIN (pipeline), source, vidconv_src, nvvidconv_src, filter_src, NULL);
    gst_element_link_many (source, vidconv_src, nvvidconv_src, filter_src, NULL);*/

    source = gst_element_factory_make ("rtspsrc","source");
    g_object_set (G_OBJECT (source), "latency",2000,NULL);
    g_object_set(GST_OBJECT(source),"location","rtsp://ashutosh:ashu1234@192.168.1.250:554/cam/realmonitor?channel=1",NULL);

    GstElement *rtppay = gst_element_factory_make( "rtph264depay", "depayl");
    GstElement *parse = gst_element_factory_make("h264parse","parse");
    filter_src = gst_element_factory_make("capsfilter","filter");
    GstElement *decodebin = gst_element_factory_make ("nvv4l2decoder","decode");
    //GstElement *sink = gst_element_factory_make("nveglglessink","sink");
    //g_object_set (G_OBJECT (sink), "sync",FALSE,NULL);

    GstCaps *filtercaps = gst_caps_from_string("application/x-rtp");
    g_object_set (G_OBJECT (filter_src), "caps",filtercaps,NULL);
    gst_caps_unref(filtercaps);

    gst_bin_add_many (GST_BIN (pipeline),source ,rtppay, parse, decodebin,NULL);
    gst_element_link_many (source ,rtppay, parse, decodebin,NULL);
  }

  /* Create main processing bin */
  video_full_processing_bin = gst_bin_new ("video-process-bin");

  /* Create nvstreammux instance to form batches from one or more sources. */
  streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");
  g_object_set (G_OBJECT (streammux), "width", 1280, "height", 720, "batch-size", 1, "batched-push-timeout", 40000, NULL);
  g_object_set (G_OBJECT (streammux), "nvbuf-memory-type", 0, NULL);
  gchar pad_name_sink[16] = "sink_0";
  videopad = gst_element_get_request_pad (streammux, pad_name_sink);

  /* Use nvinfer to run inferencing on decoder's output,
   * behaviour of inferencing is set through config file.
   * Create components for the detection */
  queue_pgie = gst_element_factory_make ("queue", "queue_pgie");
  pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");
  g_object_set (G_OBJECT (pgie), "config-file-path", pgie_config, NULL);

  /* Use nvosd to render bbox/text on top of the input video.
   * Create components for the rendering */
  nvvidconv_osd = gst_element_factory_make ("nvvideoconvert", "nvvidconv_osd");
  osd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");

  /* Create components for the output */
  sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
  g_object_set (sink, "sync", FALSE, "max-lateness", -1, "async", FALSE, "qos", TRUE, NULL);

  if (input_mp4)
  {
    g_object_set (sink, "sync", TRUE, NULL);
  }

  /* Check all components */
  if (!pipeline || !source || !video_full_processing_bin
      || !queue_pgie || !pgie || !nvvidconv_osd
      || !osd || !sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* Set up the pipeline */
  gst_bin_add_many (GST_BIN (video_full_processing_bin), streammux, queue_pgie, pgie, nvvidconv_osd, osd, NULL);
  /* link the elements together */
  gst_element_link_many (streammux, queue_pgie, pgie, nvvidconv_osd, osd, NULL);
  gst_element_add_pad (video_full_processing_bin, gst_ghost_pad_new ("sink", videopad));
  gst_object_unref (videopad);
  gst_bin_add (GST_BIN (pipeline), video_full_processing_bin);

  /* link soure and video_full_processing_bin */
  if (!input_mp4){

    GstPad *sinkpad, *srcpad;

    sinkpad = gst_element_get_static_pad (video_full_processing_bin, "sink");
    if (!sinkpad) {
      g_printerr ("video_full_processing_bin request sink pad failed. Exiting.\n");
      return -1;
    }

    srcpad = gst_element_get_static_pad (filter_src, "src");
    if (!srcpad) {
      g_printerr ("filter_src request src pad failed. Exiting.\n");
      return -1;
    }

    if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
        g_printerr ("Failed to link pads. Exiting.\n");
        return -1;
    }

    gst_object_unref (sinkpad);
    gst_object_unref (srcpad);
  }

    gst_bin_add_many (GST_BIN (video_full_processing_bin), sink, NULL);
    gst_element_link (osd, sink);

  /* add probe to get informed of the meta data generated, we add probe to
   * the sink pad of the osd element, since by that time, the buffer would have
   * had got all the metadata. */
  osd_sink_pad = gst_element_get_static_pad (osd, "sink");
  if (!osd_sink_pad)
    g_print ("Unable to get sink pad\n");
  else
    osd_probe_id = gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, osd_sink_pad_buffer_probe, NULL, NULL);

  /* Set the pipeline to "playing" state */
  if (input_mp4)
  {
  	g_print ("Now playing: %s\n", input_mp4);
  }
  else
  {
  	g_print ("Now playing from webcam\n");
  }

  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* Wait till pipeline encounters an error or EOS */
  g_main_loop_run (loop);
  GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "ds-app-playing");

  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  return 0;

}

Hi,
In redaction_with_deepstream sample, the source is ‘filesrc ! decodebin’. You may replace it with uridecodebin. The code of using uridecodebin as source is in

deepstream_sdk_v4.0.2_jetson\sources\apps\sample_apps\deepstream-test3

I am literally confused. Help me here a bit…everything seems to be hidden away somewhere else.

a) Where is the code for drawing boxes ?
b) How can i fetch frames based on detection and crop them ?
c) How can i skip frames like opencv grab method to run the stream at 12 fps if the original stream is running at 24 fps ?

Thanks.

Hi,
It is easier to run deepstream-app. You can modify config file to change to RTSP source. The document is
https://docs.nvidia.com/metropolis/deepstream/dev-guide/index.html

You may run the default config first, which uses file source. And then refer to the following post to modify it to RTSP source:
https://devtalk.nvidia.com/default/topic/1058086/deepstream-sdk/how-to-run-rtp-camera-in-deepstream-on-nano/post/5366807/#5366807

It is nvdsosd plugin, which is not open source code.

You can leverage dsexample plugin. Please check

deepstream_sdk_v4.0.2_jetson\sources\gst-plugins\gst-dsexample\README

Do you mean to play like 2x slow motion?

It’s not last week, but I have a working version of what you ask for in my newest app’s dev branch:

https://github.com/mdegans/nvalhalla/tree/dev

It supports both input from rtsp streams (uridecodebin) and output to rtsp (Gst.RtspSever). Part of it (the redaction callback) is based on the deepstream_redaction_app. The RTSP server part is actually ported from a python example.

It’s written in Genie, a python-like syntax of Vala, which is recommended by the gstreamer project for those who want syntactic sugar on top of their C. It looks like Python, except for the callback which draws the boxes, which is copy and paste directly from the redaction app.

It certainly is, but in my case, I wanted to learn gstreamer. If that’s not your goal, deepstream-app is certainly the better and more mature option.