Hi DaneLLL
gst-inspect-1.0 is helpful, thank you. I also came across gstreamer basic tutorial 3, and your previous posts for similar problems as mine:
https://devtalk.nvidia.com/default/topic/1058754/deepstream-sdk/how-to-change-source-pixel-format-from-yuyv-to-mjpg/post/5369809/#5369809
https://devtalk.nvidia.com/default/topic/1061855/how-to-run-deepstream-test3-with-usb-camera/
(Also realised my mistake of commenting out gst_element_link_many method call)
From your previous advice of pipeline:
nvarguscamerasrc ! capsfilter caps='video/x-raw(memory:NVMM),width=1920,height=1080,format=NV12' ! nvvideoconvert ! nvstreammux ! nvinfer ! ...
My understanding is that the nvvideoconvert is required for to convert the data into (memory:NVMM) and NV12 formats for the sink pad at nvstreammux.
If the capsfilter is left out, the nvvideoconvert knows how about the required format for nvstreammux without any specification? Hence it only needs to be linked(with gst_element_link_many)?
Regardless, my attempt at including capsfilter and nvvideoconvert into the pipeline:
static gboolean
link_elements_with_filter (GstElement *element1, GstElement *element2)
{
gboolean link_ok;
GstCaps *caps;
caps = gst_caps_new_simple ("video/x-raw(memory:NVMM)",
"width", G_TYPE_INT, 1920,
"height", G_TYPE_INT, 1080,
"format", G_TYPE_STRING, "NV12",
"framerate", GST_TYPE_FRACTION, 120, 1,
NULL);
link_ok = gst_element_link_filtered (element1, element2, caps);
gst_caps_unref (caps);
if (!link_ok) {
g_warning ("Failed to link element1 and element2!");
}
return link_ok;
}
int
main (int argc, char *argv[])
{
GMainLoop *loop = NULL;
GstElement *pipeline = NULL, *source = NULL, *filter1 = NULL,
*sink = NULL, *pgie = NULL, *nvvidconv1 = NULL, *nvvidconv2 = NULL,
*nvosd = NULL, *nvstreammux;
GstElement *msgconv = NULL, *msgbroker = NULL, *tee = NULL;
GstElement *queue1 = NULL, *queue2 = NULL;
#ifdef PLATFORM_TEGRA
GstElement *transform = NULL;
#endif
GstBus *bus = NULL;
guint bus_watch_id;
GstPad *osd_sink_pad = NULL;
GstPad *tee_render_pad = NULL;
GstPad *tee_msg_pad = NULL;
GstPad *sink_pad = NULL;
GstPad *src_pad = NULL;
GOptionContext *ctx = NULL;
GOptionGroup *group = NULL;
GError *error = NULL;
GstCaps *caps1 = NULL;
ctx = g_option_context_new ("Nvidia DeepStream Test4");
group = g_option_group_new ("test4", NULL, NULL, NULL, NULL);
g_option_group_add_entries (group, entries);
g_option_context_set_main_group (ctx, group);
g_option_context_add_group (ctx, gst_init_get_option_group ());
if (!g_option_context_parse (ctx, &argc, &argv, &error)) {
g_option_context_free (ctx);
g_printerr ("%s", error->message);
return -1;
}
g_option_context_free (ctx);
/*if (!proto_lib || !input_file) {
g_printerr("missing arguments\n");
g_printerr ("Usage: %s -i <H264 filename> -p <Proto adaptor library> --conn-str=<Connection string>\n", argv[0]);
return -1;
} */
loop = g_main_loop_new (NULL, FALSE);
/* Create gstreamer elements */
/* Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new ("dstest4-pipeline");
source = gst_element_factory_make ("nvarguscamerasrc","source");
nvvidconv1 = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter1");
nvstreammux = gst_element_factory_make ("nvstreammux", "nvstreammux");
/* Use nvinfer to run inferencing on decoder's output,
* behaviour of inferencing is set through config file */
pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");
/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv2 = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter2");
/* Create OSD to draw on the converted RGBA buffer */
nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");
/* Create msg converter to generate payload from buffer metadata */
msgconv = gst_element_factory_make ("nvmsgconv", "nvmsg-converter");
/* Create msg broker to send payload to server */
msgbroker = gst_element_factory_make ("nvmsgbroker", "nvmsg-broker");
/* Create tee to render buffer and send message simultaneously*/
tee = gst_element_factory_make ("tee", "nvsink-tee");
/* Create queues */
queue1 = gst_element_factory_make ("queue", "nvtee-que1");
queue2 = gst_element_factory_make ("queue", "nvtee-que2");
/* Finally render the osd output */
if (display_off) {
sink = gst_element_factory_make ("fakesink", "nvvideo-renderer");
} else {
sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
#ifdef PLATFORM_TEGRA
transform = gst_element_factory_make ("nvegltransform", "nvegl-transform");
if (!transform) {
g_printerr ("nvegltransform element could not be created. Exiting.\n");
return -1;
}
#endif
}
if (!pipeline || !source || !nvvidconv1 || !nvstreammux || !pgie
|| !nvvidconv2 || !nvosd || !msgconv || !msgbroker || !tee
|| !queue1 || !queue2 || !sink) {
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
g_object_set (G_OBJECT (nvstreammux), "width", MUXER_OUTPUT_WIDTH, "height",
MUXER_OUTPUT_HEIGHT, "batch-size", 1,
"batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);
/* Set all the necessary properties of the nvinfer element,
* the necessary ones are : */
g_object_set (G_OBJECT (pgie),
"config-file-path", PGIE_CONFIG_FILE, NULL);
g_object_set (G_OBJECT(msgconv), "config", MSCONV_CONFIG_FILE, NULL);
g_object_set (G_OBJECT(msgconv), "payload-type", schema_type, NULL);
g_object_set (G_OBJECT(msgbroker), "proto-lib", proto_lib,
"conn-str", conn_str, "config", cfg_file, "topic", topic, "sync", FALSE, NULL);
g_object_set (G_OBJECT (sink), "sync", TRUE, NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
/* Set up the pipeline */
/* we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (pipeline),
source, nvvidconv1, nvstreammux, pgie,
nvvidconv2, nvosd, tee, queue1, queue2, msgconv,
msgbroker, sink, NULL);
#ifdef PLATFORM_TEGRA
if (!display_off)
gst_bin_add (GST_BIN (pipeline), transform);
#endif
/* we link the elements together */
/* file-source -> nvvidconv1 -> nvstreammux ->
* nvinfer -> nvvidconv2 -> nvosd -> tee -> video-renderer
* |
* |-> msgconv -> msgbroker */
if(link_elements_with_filter(source,nvvidconv1) != TRUE){
g_printerr ("Elements source,sink could not be linked.\n");
return -1;
}
if (!gst_element_link_many (nvstreammux, pgie, nvvidconv2, nvosd, tee, NULL)) {
g_printerr ("2Elements could not be linked. Exiting.\n");
return -1;
}
if (!gst_element_link_many (queue1, msgconv, msgbroker, NULL)) {
g_printerr ("3Elements could not be linked. Exiting.\n");
return -1;
}
#ifdef PLATFORM_TEGRA
if (!display_off) {
if (!gst_element_link_many (queue2, transform, sink, NULL)) {
g_printerr ("Elements could not be linked. Exiting.\n");
return -1;
}
} else {
if (!gst_element_link (queue2, sink)) {
g_printerr ("Elements could not be linked. Exiting.\n");
return -1;
}
}
#else
if (!gst_element_link (queue2, sink)) {
g_printerr ("4Elements could not be linked. Exiting.\n");
return -1;
}
#endif
sink_pad = gst_element_get_static_pad (queue1, "sink");
tee_msg_pad = gst_element_get_request_pad (tee, "src_%u");
tee_render_pad = gst_element_get_request_pad (tee, "src_%u");
if (!tee_msg_pad || !tee_render_pad) {
g_printerr ("Unable to get request pads\n");
return -1;
}
if (gst_pad_link (tee_msg_pad, sink_pad) != GST_PAD_LINK_OK) {
g_printerr ("Unable to link tee and message converter\n");
gst_object_unref (sink_pad);
return -1;
}
gst_object_unref (sink_pad);
sink_pad = gst_element_get_static_pad (queue2, "sink");
if (gst_pad_link (tee_render_pad, sink_pad) != GST_PAD_LINK_OK) {
g_printerr ("Unable to link tee and render\n");
gst_object_unref (sink_pad);
return -1;
}
gst_object_unref (sink_pad);
/* Lets add probe to get informed of the meta data generated, we add probe to
* the sink pad of the osd element, since by that time, the buffer would have
* had got all the metadata. */
osd_sink_pad = gst_element_get_static_pad (nvosd, "sink");
if (!osd_sink_pad)
g_print ("Unable to get sink pad\n");
else
gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_sink_pad_buffer_probe, NULL, NULL);
/* Set the pipeline to "playing" state */
g_print ("Now playing: %s\n", input_file);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait till pipeline encounters an error or EOS */
g_print ("Running...\n");
g_main_loop_run (loop);
/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
g_free (cfg_file);
g_free (input_file);
g_free (topic);
g_free (conn_str);
g_free (proto_lib);
/* Release the request pads from the tee, and unref them */
gst_element_release_request_pad (tee, tee_msg_pad);
gst_element_release_request_pad (tee, tee_render_pad);
gst_object_unref (tee_msg_pad);
gst_object_unref (tee_render_pad);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}
Presently the code compiles but ends with video not showing and data stream error. I am not sure what im doing wrongly. Please help!