@mchi So, I am using this sample app => deepstream_reference_apps/back-to-back-detectors at master · NVIDIA-AI-IOT/deepstream_reference_apps · GitHub
and added code to write output to a file.
int
main (int argc, char *argv[])
{
GMainLoop *loop = NULL;
GstElement *pipeline = NULL, *source = NULL, *h264parser = NULL,
*decoder = NULL, *streammux = NULL, *sink = NULL, *primary_detector = NULL,
*secondary_detector = NULL, *nvvidconv = NULL, *nvosd = NULL,
*queue_sink = NULL, *nvvidconv_sink = NULL, *filter_sink = NULL,
*videoconvert = NULL, *encoder = NULL, *muxer = NULL;
GstCaps *caps_filter_sink = NULL;
#ifdef PLATFORM_TEGRA
GstElement *transform = NULL;
#endif
GstBus *bus = NULL;
guint bus_watch_id;
GstPad *osd_sink_pad = NULL;
/* Check input arguments */
if (argc != 3) {
g_printerr ("Usage: %s <H264 filename>\n", argv[0]);
return -1;
}
/* Standard GStreamer initialization */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Create gstreamer elements */
/* Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new ("pipeline");
/* Source element for reading from the file */
source = gst_element_factory_make ("filesrc", "file-source");
/* Since the data format in the input file is elementary h264 stream,
* we need a h264parser */
h264parser = gst_element_factory_make ("h264parse", "h264-parser");
/* Use nvdec_h264 for hardware accelerated decode on GPU */
decoder = gst_element_factory_make ("nvv4l2decoder", "nvv4l2-decoder");
/* Create nvstreammux instance to form batches from one or more sources. */
streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");
if (!pipeline || !streammux) {
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
/* Create two nvinfer instances for the two back-to-back detectors */
primary_detector = gst_element_factory_make ("nvinfer", "primary-nvinference-engine1");
secondary_detector = gst_element_factory_make ("nvinfer", "primary-nvinference-engine2");
/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");
/* Create OSD to draw on the converted RGBA buffer */
nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");
if(argv[2]){
queue_sink = gst_element_factory_make ("queue", "queue_sink");
nvvidconv_sink = gst_element_factory_make ("nvvideoconvert", "nvvidconv_sink");
filter_sink = gst_element_factory_make ("capsfilter", "filter_sink");
caps_filter_sink = gst_caps_from_string ("video/x-raw, format=I420");
g_object_set (G_OBJECT (filter_sink), "caps", caps_filter_sink, NULL);
gst_caps_unref (caps_filter_sink);
videoconvert = gst_element_factory_make ("videoconvert", "videoconverter");
encoder = gst_element_factory_make ("avenc_mpeg4", "mp4-encoder");
g_object_set (G_OBJECT (encoder), "bitrate", 1000000, NULL);
muxer = gst_element_factory_make ("qtmux", "muxer");
sink = gst_element_factory_make ("filesink", "nvvideo-renderer");
g_object_set (G_OBJECT (sink), "location", argv[2], NULL);
}
else{
/* Finally render the osd output */
#ifdef PLATFORM_TEGRA
transform = gst_element_factory_make ("nvegltransform", "nvegl-transform");
#endif
sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
if (!source || !h264parser || !decoder || !primary_detector || !secondary_detector
|| !nvvidconv || !nvosd || !sink) {
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
#ifdef PLATFORM_TEGRA
if(!transform) {
g_printerr ("One tegra element could not be created. Exiting.\n");
return -1;
}
#endif
}
/* we set the input filename to the source element */
g_object_set (G_OBJECT (source), "location", argv[1], NULL);
g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
MUXER_OUTPUT_HEIGHT, "batch-size", 1,
"batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);
/* Set the config files for the two detectors. We demonstrate this by using
* the same detector model twice but making them act as vehicle-only and
* person-only detectors by adjusting the bbox confidence thresholds in the
* two seperate config files. */
g_object_set (G_OBJECT (primary_detector), "config-file-path", "primary_detector_config.txt",
"unique-id", PRIMARY_DETECTOR_UID, NULL);
g_object_set (G_OBJECT (secondary_detector), "config-file-path", "secondary_detector_config.txt",
"unique-id", SECONDARY_DETECTOR_UID, "process-mode", SECOND_DETECTOR_IS_SECONDARY ? 2 : 1, NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
/* Set up the pipeline */
/* we add all elements into the pipeline */
if (argv[2]) {
gst_bin_add_many (GST_BIN (pipeline),
source, h264parser, decoder, streammux, primary_detector, secondary_detector,
nvvidconv, nvosd, queue_sink, nvvidconv_sink, filter_sink, videoconvert, encoder, muxer,
sink, NULL);
} else{
#ifdef PLATFORM_TEGRA
gst_bin_add_many (GST_BIN (pipeline),
source, h264parser, decoder, streammux, primary_detector, secondary_detector,
nvvidconv, nvosd, transform, sink, NULL);
#else
gst_bin_add_many (GST_BIN (pipeline),
source, h264parser, decoder, streammux, primary_detector, secondary_detector,
nvvidconv, nvosd, sink, NULL);
#endif
}
GstPad *sinkpad, *srcpad;
gchar pad_name_sink[16] = "sink_0";
gchar pad_name_src[16] = "src";
sinkpad = gst_element_get_request_pad (streammux, pad_name_sink);
if (!sinkpad) {
g_printerr ("Streammux request sink pad failed. Exiting.\n");
return -1;
}
srcpad = gst_element_get_static_pad (decoder, pad_name_src);
if (!srcpad) {
g_printerr ("Decoder request src pad failed. Exiting.\n");
return -1;
}
if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
g_printerr ("Failed to link decoder to stream muxer. Exiting.\n");
return -1;
}
gst_object_unref (sinkpad);
gst_object_unref (srcpad);
/* we link the elements together */
/* file-source -> h264-parser -> nvh264-decoder ->
* nvinfer -> nvvidconv -> nvosd -> video-renderer */
if (!gst_element_link_many (source, h264parser, decoder, NULL)) {
g_printerr ("Elements could not be linked: 1. Exiting.\n");
return -1;
}
if (argv[2]){
gst_element_link_many (streammux, primary_detector, secondary_detector,
nvvidconv, nvosd, queue_sink, nvvidconv_sink, filter_sink, videoconvert, encoder, muxer,
sink, NULL);
}
else{
#ifdef PLATFORM_TEGRA
if (!gst_element_link_many (streammux, primary_detector, secondary_detector,
nvvidconv, nvosd, transform, sink, NULL)) {
g_printerr ("Elements could not be linked: 2. Exiting.\n");
return -1;
}
#else
if (!gst_element_link_many (streammux, primary_detector, secondary_detector,
nvvidconv, nvosd, sink, NULL)) {
g_printerr ("Elements could not be linked: 2. Exiting.\n");
return -1;
}
#endif
}
/* Lets add probe to get informed of the meta data generated, we add probe to
* the sink pad of the osd element, since by that time, the buffer would have
* had got all the metadata. */
osd_sink_pad = gst_element_get_static_pad (nvosd, "sink");
if (!osd_sink_pad)
g_print ("Unable to get sink pad\n");
else
gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_sink_pad_buffer_probe, NULL, NULL);
/* Set the pipeline to "playing" state */
g_print ("Now playing: %s\n", argv[1]);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait till pipeline encounters an error or EOS */
g_print ("Running...\n");
g_main_loop_run (loop);
/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}
But running this code gives me the following error:
Now playing: ../video.mp4
Creating LL OSD context new
0:00:00.983780642 593 0x55f0669096d0 WARN nvinfer gstnvinfer.cpp:515:gst_nvinfer_logger:<primary-nvinference-engine2> NvDsInferContext[UID 2]:useEngineFile(): Failed to read from model engine file
0:00:00.983806454 593 0x55f0669096d0 INFO nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<primary-nvinference-engine2> NvDsInferContext[UID 2]:initialize(): Trying to create engine from model files
0:00:00.984057506 593 0x55f0669096d0 ERROR nvinfer gstnvinfer.cpp:511:gst_nvinfer_logger:<primary-nvinference-engine2> NvDsInferContext[UID 2]:generateTRTModel(): Cannot access caffemodel file '/root/deepstream_sdk_v4.0.2_x86_64/sources/apps/sample_apps/deepstream_reference_apps/back-to-back-detectors/../../../../samples/models/Secondary_FaceDetect/fd_lpd.caffemodel'
0:00:00.984082150 593 0x55f0669096d0 ERROR nvinfer gstnvinfer.cpp:511:gst_nvinfer_logger:<primary-nvinference-engine2> NvDsInferContext[UID 2]:initialize(): Failed to create engine from model files
0:00:00.984111980 593 0x55f0669096d0 WARN nvinfer gstnvinfer.cpp:692:gst_nvinfer_start:<primary-nvinference-engine2> error: Failed to create NvDsInferContext instance
0:00:00.984123949 593 0x55f0669096d0 WARN nvinfer gstnvinfer.cpp:692:gst_nvinfer_start:<primary-nvinference-engine2> error: Config file path: secondary_detector_config.txt, NvDsInfer Error: NVDSINFER_CONFIG_FAILED
Running...
ERROR from element primary-nvinference-engine2: Failed to create NvDsInferContext instance
Error details: gstnvinfer.cpp(692): gst_nvinfer_start (): /GstPipeline:pipeline/GstNvInfer:primary-nvinference-engine2:
Config file path: secondary_detector_config.txt, NvDsInfer Error: NVDSINFER_CONFIG_FAILED
Returned, stopping playback
Deleting pipeline
I did not update the config file as you asked but rather used the code to write output file in this repository => redaction_with_deepstream/deepstream_redaction_app.c at master · NVIDIA-AI-IOT/redaction_with_deepstream · GitHub. I dont really understand , I know I am doing something wrong as I am just doing this for a couple of days. Any help is welcome. Thanks.