This is how the pipeline is constructed:
/* Create gstreamer elements */
/* Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new ("dstest1-pipeline");
/* Source element for reading from the file */
source = gst_element_factory_make ("filesrc", "file-source");
/* Since the data format in the input file is elementary h264 stream,
* we need a h264parser */
h264parser = gst_element_factory_make ("h264parse", "h264-parser");
/* Use nvdec_h264 for hardware accelerated decode on GPU */
decoder = gst_element_factory_make ("nvv4l2decoder", "nvv4l2-decoder");
/* Create nvstreammux instance to form batches from one or more sources. */
streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");
/* Use nvinfer to run inferencing on decoder's output,
* behaviour of inferencing is set through config file */
pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");
/* We need to have a tracker to track the identified objects */
nvtracker = gst_element_factory_make ("nvtracker", "tracker");
/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");
/* Create OSD to draw on the converted RGBA buffer */
nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");
nvvidconv1 = gst_element_factory_make ("nvvidconv", "nvvideo-converter1");
videoconvert = gst_element_factory_make ("videoconvert", "converter");
x264enc = gst_element_factory_make ("x264enc", "h264 encoder");
qtmux = gst_element_factory_make ("qtmux", "muxer");
/* sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer"); */
sink = gst_element_factory_make ("filesink", "filesink");
/* caps filter for nvvidconv to convert NV12 to RGBA as nvosd expects input
* in RGBA format */
filter1 = gst_element_factory_make ("capsfilter", "filter1");
filter2 = gst_element_factory_make ("capsfilter", "filter2");
filter3 = gst_element_factory_make ("capsfilter", "filter3");
filter4 = gst_element_factory_make ("capsfilter", "filter4");
/* we set the input filename to the source element */
g_object_set (G_OBJECT (source), "location", argv[1], NULL);
/* ADD WRITE */
g_object_set (G_OBJECT (sink), "location", "out.mp4", NULL);
g_object_set (G_OBJECT (streammux), "batch-size", 1, NULL);
g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
MUXER_OUTPUT_HEIGHT,
"batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);
/* Set all the necessary properties of the nvinfer element,
* the necessary ones are : */
g_object_set (G_OBJECT (pgie),
"config-file-path", "dstest1_pgie_config.txt", NULL);
gst_bin_add_many (GST_BIN (pipeline),
source, h264parser, decoder, streammux, pgie, nvtracker,
filter1, nvvidconv, filter2, nvosd, nvvidconv1, filter3,
videoconvert, filter4, x264enc, qtmux, sink, NULL);
caps1 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=NV12");
g_object_set (G_OBJECT (filter1), "caps", caps1, NULL);
gst_caps_unref (caps1);
caps2 = gst_caps_from_string ("video/x-raw(memory:NVMM), format=RGBA");
g_object_set (G_OBJECT (filter2), "caps", caps2, NULL);
gst_caps_unref (caps2);
caps3 = gst_caps_from_string ("video/x-raw, format=RGBA");
g_object_set (G_OBJECT (filter3), "caps", caps3, NULL);
gst_caps_unref (caps3);
caps4 = gst_caps_from_string ("video/x-raw, format=NV12");
g_object_set (G_OBJECT (filter4), "caps", caps4, NULL);
gst_caps_unref (caps4);
if (!gst_element_link_many (source, h264parser, decoder, NULL)) {
g_printerr ("Elements could not be linked: 1. Exiting.\n");
return -1;
}
if (!gst_element_link_many (streammux, pgie, nvtracker,
nvvidconv, filter2, nvosd, nvvidconv1, filter3,
videoconvert, filter4,
x264enc, qtmux, sink, NULL)) {
/*nvvidconv, nvosd, transform, sink, NULL)) { */
g_printerr ("Elements could not be linked: 2. Exiting.\n");
return -1;
}