Dynamicly take video+audio based on inference

Hi, I’m trying to dynamically take a capture based on if someone is in the frame or not. Unfortunately it looks like I can’t use smart record as it does not support audio and hence I need to add and remove elements to my pipeline dynamically.

I actually got this to work, with a catch. my original pipeline was as such

// nvarguscamerasrc ->nvvideoconver->capsfilter->nvstreammux->nvinfer->nvvideoconvert->nvdsosd->nvvideoconvert->| tee_post_osd->queue_pre_sink->nvegltransform->nveglglessink

This worked fine when I dynamically attached to the tee_post_osd, however, given that my FPS = 120, the video output is slightly “delayed” - this causes problems since my audio and video will no longer be in sync.

My other thought was to create the following pipeline,

camera -> tee-> queue -> nvvideoconvert -> caps -> streammux -> nvinfer->nvvideoconvert->nvdsosd->nvvideoconvert->nvegltransform->nveglglessink (x monitor)

this crashes with the follownig output whenever I try to dynmically attach to the Tee

NvMMLiteBlockCreate : Block : BlockType = 8
Error generated. /dvs/git/dirty/git-master_linux/multimedia/nvgstreamer/gst-nvarguscamera/gstnvarguscamerasrc.cpp, execute:656 Failed to create CaptureSession

(misty-west-sw:12344): GStreamer-CRITICAL **: 00:52:32.037: gst_mini_object_set_qdata: assertion ‘object != NULL’ failed
CONSUMER: Done Success
NVMEDIA: H265 : Profile : 1
NVMEDIA_ENC: bBlitMode is set to TRUE

I have attached the important parts of the code below, is there a way I can do what I want?

#include <gst/gst.h>
#include <glib.h>
#include <stdio.h>
#include <cuda_runtime_api.h>
#include "gstnvdsmeta.h"
#include "gst-nvdssr.h"
#include "pthread.h"
#include "time.h"
#include <unistd.h>

GST_DEBUG_CATEGORY (NVDS_APP);

//----------------------------------- Statics 
static gboolean bbox_enabled = TRUE;
static GOptionEntry entries[] = {
  {"enable", 'e', 0, G_OPTION_ARG_INT, &bbox_enabled,
      "Display Bounding box in recorded files", NULL}
  ,
  {NULL}
  ,
};

static gint frame_number = 0;
static gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",
  "Roadsign"
};

static NvDsSRContext *ctx = NULL;

static GMainLoop *loop;
static GstElement *pipeline, *tee_smart_cap, *mmux, *tee_aud, *fakesink, *audioconvert, *vorbisenc, *p_nvvidconv, 
     *encoder, *parser, *filesink_a, *filesink_v, *queue_record, *queue_aud, *matmux_a, *matmux_v;
static GstBus *bus;
static GstPad *teepad, *teepad_aud;
static gboolean recording = FALSE;
static int counter;
GstElement *source = NULL;


static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_ERROR:{
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      if (debug)
        g_printerr ("Error details: %s\n", debug);
      g_free (debug);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
    default:
      break;
  }
  return TRUE;
}

static
GstPadProbeReturn unlink_cb(GstPad *pad, GstPadProbeInfo *info, gpointer user_data) {
	g_print("Unlinking...");
	GstPad *sinkpad;

 	sinkpad = gst_element_get_static_pad (queue_record, "sink");
 	gst_pad_unlink (teepad, sinkpad);
	gst_object_unref (sinkpad);

	sinkpad = gst_element_get_static_pad (queue_aud, "sink");
	gst_pad_unlink (teepad_aud, sinkpad);
	gst_object_unref (sinkpad);

	gst_element_send_event(vorbisenc , gst_event_new_eos()); 
	gst_element_send_event(encoder   , gst_event_new_eos()); 

	usleep(150000); 
	gst_bin_remove(GST_BIN (pipeline), queue_record);
	gst_bin_remove(GST_BIN (pipeline), queue_aud);
	gst_bin_remove(GST_BIN (pipeline), encoder);
	gst_bin_remove(GST_BIN (pipeline), parser);
	gst_bin_remove(GST_BIN (pipeline), matmux_a);
	gst_bin_remove(GST_BIN (pipeline), matmux_v);
	gst_bin_remove(GST_BIN (pipeline), filesink_a);
	gst_bin_remove(GST_BIN (pipeline), filesink_v);
	gst_bin_remove(GST_BIN (pipeline), vorbisenc);

       gst_element_set_state(queue_record , GST_STATE_NULL);
       gst_element_set_state(queue_aud    , GST_STATE_NULL);
       gst_element_set_state(encoder      , GST_STATE_NULL);
	gst_element_set_state(parser       , GST_STATE_NULL);
        gst_element_set_state(vorbisenc    , GST_STATE_NULL);
        gst_element_set_state(matmux_a     , GST_STATE_NULL); 
        gst_element_set_state(matmux_v     , GST_STATE_NULL);
	gst_element_set_state(filesink_a   , GST_STATE_NULL);
	gst_element_set_state(filesink_v   , GST_STATE_NULL);
	
       gst_object_unref(queue_record);
	gst_object_unref(queue_aud);
	gst_object_unref(encoder);
	gst_object_unref(parser);
	gst_object_unref(filesink_a);
	gst_object_unref(filesink_v);
       gst_object_unref(vorbisenc);
	gst_object_unref(matmux_a);
	gst_object_unref(matmux_v);

	gst_element_release_request_pad (tee_aud      , teepad_aud);
	gst_element_release_request_pad (tee_smart_cap , teepad);
	gst_object_unref (teepad);
	gst_object_unref (teepad_aud);

	return GST_PAD_PROBE_REMOVE;
}

static
void stop_recording() {
	g_print("stop Recording\n");
	gst_pad_add_probe(teepad, GST_PAD_PROBE_TYPE_IDLE, unlink_cb, NULL, (GDestroyNotify) g_free);
}

static
void start_recording() {
	g_print("start Recording\n");
	GstPad *sinkpad;
	GstPadTemplate *templ;
	
  /* get video T pad */
  templ = gst_element_class_get_pad_template(GST_ELEMENT_GET_CLASS(tee_smart_cap), "src_%u");
	teepad = gst_element_request_pad(tee_smart_cap, templ, NULL, NULL);

  /* get audio T pad */
	templ      = gst_element_class_get_pad_template(GST_ELEMENT_GET_CLASS(tee_aud), "src_%u");
	teepad_aud = gst_element_request_pad(tee_aud, templ, NULL, NULL);

  /* Dynamic Video pipeline */
	queue_record = gst_element_factory_make( "queue"         , "queue_record");
	encoder      = gst_element_factory_make( "nvv4l2h265enc" , NULL          );
	parser       = gst_element_factory_make( "h265parse"     , NULL          );
       matmux_v     = gst_element_factory_make( "matroskamux"   , "muxy-mat-vid"    );
	filesink_v   = gst_element_factory_make( "filesink"      , NULL          );

  /* Dynamic audio pipeline */
     queue_aud    = gst_element_factory_make( "queue"         , "que-ad"      );
      vorbisenc    = gst_element_factory_make( "vorbisenc"     , "vorb-1"      );
      matmux_a     = gst_element_factory_make( "matroskamux"   , "muxy-mat-aud"    );
      filesink_a   = gst_element_factory_make( "filesink"      , NULL          );
 
	char *file_name = (char*) malloc(255 * sizeof(char));
	sprintf(file_name, "%s%daudio.mkv", "/home/sam/merge/", counter);
	g_object_set(filesink_a, "location", file_name, NULL);
	sprintf(file_name, "%s%dvideo.mkv", "/home/sam/merge/", counter);
	g_object_set(filesink_v, "location", file_name, NULL);
	free(file_name);
	
  g_object_set(filesink_v, "sync", false, NULL);

	gst_bin_add_many(GST_BIN(pipeline),
                   gst_object_ref(queue_record),
                   gst_object_ref(queue_aud),
                   gst_object_ref(encoder),
                   gst_object_ref(parser),
                   gst_object_ref(vorbisenc),
                   gst_object_ref(matmux_a),
                   gst_object_ref(matmux_v),
                   gst_object_ref(filesink_a),
                   gst_object_ref(filesink_v),
                   NULL);

  /* add the video and audio elements */
	gst_element_link_many(queue_record, encoder, parser, matmux_v, filesink_v, NULL);
	if (!gst_element_link_many(queue_aud, vorbisenc, matmux_a, filesink_a, NULL)){
    g_print("Failed! to link elements");
  } 

	gst_element_sync_state_with_parent(queue_record);
	gst_element_sync_state_with_parent(queue_aud);
	gst_element_sync_state_with_parent(encoder);
	gst_element_sync_state_with_parent(parser);
        gst_element_sync_state_with_parent(vorbisenc);
	gst_element_sync_state_with_parent(matmux_a);
	gst_element_sync_state_with_parent(matmux_v);
	gst_element_sync_state_with_parent(filesink_a);
	gst_element_sync_state_with_parent(filesink_v);
 
	sinkpad = gst_element_get_static_pad(queue_record, "sink");
	gst_pad_link(teepad, sinkpad);
	gst_object_unref(sinkpad);

	sinkpad = gst_element_get_static_pad(queue_aud, "sink");
	gst_pad_link(teepad_aud, sinkpad);
	gst_object_unref(sinkpad);

  g_print("Recording!");
	recording = TRUE;
}

void sigintHandler(int unused) {
	g_print("You ctrl-c!\n");
  static bool rec;
	if (rec)
		stop_recording();
	else{
		start_recording();
    rec = true;
  }
}

// Video pipeline
// camera -> tee|-> queue -> nvvideoconvert -> caps -> streammux -> nvinfer->nvvideoconvert->nvdsosd->nvvideoconvert->nvegltransform->nveglglessink (x monitor)
//    (dynamic) |-> queue -> encoder -> parser -> matroskamux -> filesink
int
main (int argc, char *argv[])
{
	signal(SIGINT, sigintHandler);
  
  GMainLoop *loop = NULL;
  GstElement  *h264parser = NULL,
      *decoder = NULL, *streammux = NULL, *sink = NULL, *pgie = NULL, *nvvidconv = NULL,
      *nvosd = NULL;

  GstElement *transform = NULL;
  GstBus *bus = NULL;
  guint bus_watch_id;
  GstPad *osd_sink_pad = NULL;
  GstCaps *caps = NULL, *caps_filter_src = NULL;
  GstElement *vidconvsrc, *nvvidconvsrc, *caps_nvvidconvsrc, *queue_post_osd, *queue_post_tee;  
  GstElement *encoder_post_osd, *parser_post_osd, *nvvidconv2;

  int current_device = -1;
  cudaGetDevice(&current_device);
  struct cudaDeviceProp prop;
  cudaGetDeviceProperties(&prop, current_device);

  GOptionContext *gctx = NULL;
  GOptionGroup *group = NULL;
  GError *error = NULL;

  NvDsSRInitParams params = { 0 };

  gctx = g_option_context_new ("Nvidia DeepStream Test-SR app");
  group = g_option_group_new ("SR_test", NULL, NULL, NULL, NULL);
  g_option_group_add_entries (group, entries);

  g_option_context_set_main_group (gctx, group);
  g_option_context_add_group (gctx, gst_init_get_option_group ());

  GST_DEBUG_CATEGORY_INIT (NVDS_APP, "NVDS_APP", 0, NULL);

  if (!g_option_context_parse (gctx, &argc, &argv, &error)) {
    g_printerr ("%s", error->message);
    g_print ("%s", g_option_context_get_help (gctx, TRUE, NULL));
    return -1;
  }

  /* Standard GStreamer initialization */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  /* Create gstreamer elements */
  /* Create Pipeline element that will form a connection of other elements */
  pipeline = gst_pipeline_new ("dstest1-pipeline");

  /* Source element for reading from the camera */
  source            = gst_element_factory_make ( "nvarguscamerasrc", "nv-nvarguscamerasrc" );
  vidconvsrc        = gst_element_factory_make ( "videoconvert"    , "convertor_src1"      );
  nvvidconvsrc      = gst_element_factory_make ( "nvvideoconvert"  , "convertor_src2"      );
  caps_nvvidconvsrc = gst_element_factory_make ( "capsfilter"      , "nvmm_caps"           );
  p_nvvidconv       = gst_element_factory_make ( "nvvideoconvert" , "nvvidconv-1"          );

  tee_smart_cap     = gst_element_factory_make ( "tee"             , "tee-post-osd"        );
  queue_post_tee    = gst_element_factory_make ( "queue"           , "queue-pre-osd"       );
  nvvidconv2        = gst_element_factory_make ( "nvvideoconvert"  , "nvvideo-converter2"  );
  

  caps_filter_src = gst_caps_from_string ("video/x-raw(memory:NVMM), format=NV12, width=1280, height=720, framerate=120/1");
  g_object_set (G_OBJECT (caps_nvvidconvsrc ), "caps", caps_filter_src, NULL);
  gst_caps_unref (caps_filter_src);

  /* Create nvstreammux instance to form batches from one or more sources. */
  streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");

  if (!pipeline || !streammux) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

  /* audio pipeline */
  GstElement *asource, *aqueue2, *audioconvert, *capsfilter, *vorb;
  asource            = gst_element_factory_make ( "alsasrc"      , "audio-source"   );
  tee_aud            = gst_element_factory_make ( "tee"          , "tee-aud"        );
  audioconvert    = gst_element_factory_make ( "audioconvert" , "audio-conv"     );
  aqueue2            = gst_element_factory_make ( "queue"        , "audio-queue2"   );
  capsfilter         = gst_element_factory_make ( "capsfilter"   , "capsfilter"     );
  fakesink           = gst_element_factory_make ( "fakesink"     , "fakesnink-1"    );

  g_object_set (G_OBJECT (asource), "device", "hw:tegrasndt210ref,0", NULL);
  caps = gst_caps_from_string("audio/x-raw,format=S32LE,rate=96000,channnels=2");
  g_object_set (capsfilter, "caps", caps, NULL); 
 
 gst_bin_add_many (GST_BIN (pipeline),  asource,  capsfilter,  audioconvert, tee_aud, aqueue2, fakesink, NULL);
 if (!gst_element_link_many (           asource,  capsfilter,  audioconvert, tee_aud, aqueue2, fakesink, NULL)) {
    g_printerr ("Elements could not be linked: 1. Exiting.\n");
    return -1;
  }

  pgie              = gst_element_factory_make ("nvinfer"        , "primary-nvinference-engine");
  nvvidconv   = gst_element_factory_make ("nvvideoconvert" , "nvvideo-converter");
  nvosd           = gst_element_factory_make ("nvdsosd"        , "nv-onscreendisplay");
  transform = gst_element_factory_make ("nvegltransform" , "nvegl-transform");
  sink              = gst_element_factory_make ("nveglglessink"  , "nvvideo-renderer");
 
  /* set GOBJECTS params */
  g_object_set (G_OBJECT (source)   , "bufapi-version", 1, NULL);
  g_object_set (G_OBJECT (streammux), "width", 1280, NULL);
  g_object_set (G_OBJECT (streammux), "height", 720, NULL);
  g_object_set (G_OBJECT (streammux), "batch-size", 1, NULL);
  g_object_set (G_OBJECT (streammux), "live-source", 1, NULL);
 

  /* Set all the necessary properties of the nvinfer element,
   * the necessary ones are : */
  g_object_set (G_OBJECT (pgie), "config-file-path", "dstest1_pgie_config.txt", NULL);
  g_object_set (G_OBJECT (sink), "sync", 0, NULL);

  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* Set up the pipeline */
  /* we add all elements into the pipeline */
  if(prop.integrated) {
    gst_bin_add_many (GST_BIN (pipeline),  
        source, tee_smart_cap, queue_post_tee, vidconvsrc, nvvidconvsrc, caps_nvvidconvsrc, streammux, pgie, p_nvvidconv,
        nvvidconv, nvosd, nvvidconv2,  transform, sink, NULL);
  }

  GstPad *sinkpad, *srcpad;
  gchar pad_name_sink[16] = "sink_0";
  gchar pad_name_src[16] = "src";

  sinkpad = gst_element_get_request_pad (streammux, pad_name_sink);
  if (!sinkpad) {
    g_printerr ("Streammux request sink pad failed. Exiting.\n");
    return -1;
  }

  srcpad = gst_element_get_static_pad (caps_nvvidconvsrc, pad_name_src);
  if (!srcpad) {
    g_printerr ("Decoder request src pad failed. Exiting.\n");
    return -1;
  }

  if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
      g_printerr ("Failed to link decoder to stream muxer. Exiting.\n");
      return -1;
  }

  gst_object_unref (sinkpad);
  gst_object_unref (srcpad);


#if 1 /* this configuration fails to dynamically link */
  if (!gst_element_link_many (source, tee_smart_cap, queue_post_tee, p_nvvidconv, caps_nvvidconvsrc, NULL)) {
    g_printerr ("Elements could not be linked: 1. Exiting.\n");
    return -1;
  }

  if (!gst_element_link_many (streammux, pgie,
      nvvidconv, nvosd, nvvidconv2, transform, sink, NULL)) {
    g_printerr ("Elements could not be linked: 2. Exiting.\n");
    return -1;
  }
#else /* this configuration works, but output is skewed */ 
  if (!gst_element_link_many (source, p_nvvidconv, caps_nvvidconvsrc, NULL)) {
    g_printerr ("Elements could not be linked: 1. Exiting.\n");
    return -1;
  }

  if (!gst_element_link_many (streammux, pgie,
      nvvidconv, nvosd, nvvidconv2, tee_smart_cap, queue_post_tee, transform, sink, NULL)) {
    g_printerr ("Elements could not be linked: 2. Exiting.\n");
    return -1;
  }
#endif 

  /* Set the pipeline to "playing" state */
  g_print ("Now playing: %s\n", argv[1]);
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* Wait till pipeline encounters an error or EOS */
  g_print ("Running...\n");
  g_main_loop_run (loop);

  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  return 0;
}

Sorry for late response, it may take some time to investigate your code.

Hi Fiona,

I ended up getting it to work using GST interpipe,

Thanks for the reply though!

This topic was automatically closed 60 days after the last reply. New replies are no longer allowed.