Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) Nvidia Xavier
• DeepStream Version 5.1
• JetPack Version (valid for Jetson only)
• TensorRT Version
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs) Issue
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
Hello,
I’m trying to crop the camera video before input to nvstreammux.
I use deepstream-opencv-test as base.
When I run :
gst-launch-1.0 uridecodebin uri=“file:///opt/nvidia/deepstream/deepstream-5.1/samples/streams/sample_1080p_h264.mp4” ! queue ! nvvideoconvert src-crop=500:500:300:300 ! “video/x-raw(memory:NVMM),format=RGBA” ! queue ! nvstreammux0.sink_0 nvstreammux name=nvstreammux0 batch-size=1 live-source=TRUE width=500 height=500 ! queue ! nvdsosd process-mode=HW_MODE ! queue ! nvoverlaysink sync=false
All works fine. nvinver omitted for simplicity.
But When I changed the code in main() I get error :
Elements could not be linked: 0. Exiting.
ON erro check here:
if (!gst_element_link_many (nvvidconv1, caps_filter1, streammux, pgie, nvvidconv, caps_filter, nvosd, transform, sink, NULL)) {
g_printerr (“Elements could not be linked: 0. Exiting.\n”);
return -1;
}
At --gst-debug=4 I got
0:00:00.236314673 8061 0x555ca0f6c0 ERROR nvstreammux gstnvstreammux.c:1222:gst_nvstreammux_request_new_pad: Pad should be named ‘sink_%u’ when requesting a pad
0:00:00.236414933 8061 0x555ca0f6c0 INFO GST_ELEMENT_PADS gstutils.c:1227:gst_element_get_compatible_pad: Could not find a compatible pad to link to capsfilter1:src
How I can solve that?
Here is my code snipped:
GMainLoop *loop = NULL;
GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *pgie = NULL,
*nvvidconv = NULL, *nvvidconv1 = NULL, *caps_filter = NULL, *caps_filter1 = NULL,
*nvosd = NULL;
GstElement *transform = NULL;
GstBus *bus = NULL;
guint bus_watch_id;
GstPad *osd_sink_pad = NULL;
int current_device = -1;
cudaGetDevice(¤t_device);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, current_device);
// Check input arguments
if (argc != 2) {
g_printerr (“Usage: %s \n”, argv[0]);
return -1;
}
// Standard GStreamer initialization
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
pgie = gst_element_factory_make (“nvinfer”, “primary-nvinference-engine”);
nvvidconv = gst_element_factory_make (“nvvideoconvert”, “nvvideo-converter”);
nvvidconv1 = gst_element_factory_make (“nvvideoconvert”, “nvvideo-converter1”);
caps_filter = gst_element_factory_make (“capsfilter”, NULL);
caps_filter1 = gst_element_factory_make (“capsfilter”, NULL);
nvosd = gst_element_factory_make (“nvdsosd”, “nv-onscreendisplay”);
// Finally render the osd output
if(prop.integrated) {
transform = gst_element_factory_make (“nvegltransform”, “nvegl-transform”);
}
sink = gst_element_factory_make (“nveglglessink”, “nvvideo-renderer”);
if (!pgie || !nvvidconv || !nvvidconv1 || !caps_filter || !caps_filter1 || !nvosd || !sink) {
g_printerr (“One element could not be created. Exiting.\n”);
return -1;
}
// Create gstreamer elements. Create Pipeline element that will form a connection of other elements
pipeline = gst_pipeline_new (“deepstream-opencv-test-pipeline”);
// Create nvstreammux instance to form batches from one or more sources.
streammux = gst_element_factory_make (“nvstreammux”, “stream-muxer”);
if (!pipeline || !streammux) {
g_printerr (“One element could not be created. Exiting.\n”);
return -1;
}
gst_bin_add (GST_BIN (pipeline), streammux);
GstElement *source_bin = create_source_bin (0, argv[1]);
if (!source_bin) {
g_printerr (“Failed to create source bin. Exiting.\n”);
return -1;
}
gst_bin_add (GST_BIN (pipeline), source_bin);
GstPad *sinkpad, *srcpad;
gchar pad_name_sink[16] = “sink_0”;
gchar pad_name_src[16] = “src”;
sinkpad = gst_element_get_request_pad (streammux, pad_name_sink);
if (!sinkpad) { g_printerr (“Streammux request sink pad failed. Exiting.\n”); return -1; }
srcpad = gst_element_get_static_pad (source_bin, pad_name_src);
if (!srcpad) { g_printerr (“Failed to get src pad of source bin. Exiting.\n”); return -1; }
if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) { g_printerr (“Failed to link source bin to stream muxer. Exiting.\n”); return -1; }
gst_object_unref (srcpad);
gst_object_unref (sinkpad);
if(!transform && prop.integrated) {
g_printerr (“One tegra element could not be created. Exiting.\n”);
return -1;
}
g_object_set (G_OBJECT (streammux), “width”, MUXER_OUTPUT_WIDTH, “height”,
MUXER_OUTPUT_HEIGHT, “batch-size”, 1, “batched-push-timeout”, MUXER_BATCH_TIMEOUT_USEC, NULL);
// Set all the necessary properties of the nvinfer element, the necessary ones are :
g_object_set (G_OBJECT (pgie), “config-file-path”, “dsopencvtest_pgie_config.txt”, NULL);
if(!prop.integrated) {
// Set properties of the nvvideoconvert element requires unified cuda memory for opencv blurring on CPU
g_object_set (G_OBJECT (nvvidconv), “nvbuf-memory-type”, 1, NULL);
g_object_set (G_OBJECT (nvvidconv1), “nvbuf-memory-type”, 1, NULL);
}
// Set properties of the caps_filter element
GstCaps *caps = gst_caps_new_simple (“video/x-raw”, “format”, G_TYPE_STRING, “RGBA”, NULL);
GstCapsFeatures *feature = gst_caps_features_new (MEMORY_FEATURES, NULL);
gst_caps_set_features (caps, 0, feature);
g_object_set (G_OBJECT (caps_filter), “caps”, caps, NULL);
GstCaps *caps1 = gst_caps_from_string(“video/x-raw(memory:NVMM),format=RGBA,width=500,height=500”);
GstCapsFeatures *feature1 = gst_caps_features_new (MEMORY_FEATURES, NULL);
gst_caps_set_features (caps1, 0, feature1);
g_object_set (G_OBJECT (caps_filter1), “caps”, caps1, NULL);
// Set properties of the sink element
g_object_set (G_OBJECT (sink), “sync”, FALSE, NULL);
// we add a message handler
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
// Set up the pipeline we add all elements into the pipeline
gst_bin_add_many (GST_BIN (pipeline), nvvidconv1, caps_filter1, pgie, nvvidconv, caps_filter, nvosd, NULL);
if(prop.integrated) {
gst_bin_add_many (GST_BIN (pipeline), transform, sink, NULL);
}
else {
gst_bin_add_many (GST_BIN (pipeline), sink, NULL);
}
// we link the elements together file-source → h264-parser → nvh264-decoder → nvinfer → nvvidconv → nvosd → video-renderer
if(prop.integrated) {
if (!gst_element_link_many (nvvidconv1, caps_filter1, streammux, pgie, nvvidconv, caps_filter, nvosd, transform, sink, NULL)) {
g_printerr (“Elements could not be linked: 0. Exiting.\n”);
return -1;
}
}
else {
if (!gst_element_link_many (nvvidconv1, caps_filter1,
streammux, pgie, nvvidconv, caps_filter, nvosd, sink, NULL)) {
g_printerr (“Elements could not be linked: 2. Exiting.\n”);
return -1;
}
}
// Lets add probe to get informed of the meta data generated, we add probe to
// the sink pad of the osd element, since by that time, the buffer would have had got all the metadata.
osd_sink_pad = gst_element_get_static_pad (nvosd, “sink”);
if (!osd_sink_pad)
g_print (“Unable to get sink pad\n”);
else
gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, osd_sink_pad_buffer_probe, NULL, NULL);
gst_object_unref (osd_sink_pad);
// Set the pipeline to “playing” state
g_print (“Now playing: %s\n”, argv[1]);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
// Wait till pipeline encounters an error or EOS
g_print (“Running…\n”);
g_main_loop_run (loop);
// Out of the main loop, clean up nicely
g_print (“Returned, stopping playback\n”);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print (“Deleting pipeline\n”);
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
What I made wrong?