Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) : GPU
• DeepStream Version : 6.1
0:03:51.707872721 7069 0x557e0d453400 WARN nvinfer gstnvinfer.cpp:1388:convert_batch_and_push_to_input_thread:<secondary-infer-engine2> error: NvBufSurfTransform failed with error -3 while converting buffer
ERROR from element secondary-infer-engine2: NvBufSurfTransform failed with error -3 while converting buffer
Error details: gstnvinfer.cpp(1388): convert_batch_and_push_to_input_thread (): /GstPipeline:ANPR-pipeline/GstNvInfer:secondary-infer-engine2
0:03:51.733218929 7069 0x557e0d453460 WARN nvinfer gstnvinfer.cpp:2299:gst_nvinfer_output_loop:<secondary-infer-engine1> error: Internal data stream error.
0:03:51.733243723 7069 0x557e0d453460 WARN nvinfer gstnvinfer.cpp:2299:gst_nvinfer_output_loop:<secondary-infer-engine1> error: streaming stopped, reason error (-5)
I’m running 3 models. pgie, sgie1 for detection and sgie2 for classification. However, after running for 4000 frames suddenly the application crashes with the above error. Below is my config file:
source-list:
list: file:///home/mainak/../../../
streammux:
batch-size: 1
batched-push-timeout: 40000
width: 1280
height: 736
attach-sys-ts : 1
live-source : 1
osd:
process-mode: 0
display-text: 0
#If there is ROI
analytics-config:
#filename: config_nvdsanalytics.txt
triton:
## 0:disable 1:enable
enable: 0
##0:trtion-native 1:triton-grpc
type: 0
##car mode, 1:US car plate model|2: Chinese car plate model
car-mode: 1
output:
## 1:file ouput 2:fake output 3:eglsink output
type: 1
## 0: H264 encoder 1:H265 encoder
enc: 0
bitrate: 4000000
##The file name without suffix
filename: anpr
primary-gie:
##For car detection
config-file-path: ./pgie_config.yml
unique-id: 1
secondary-gie-0:
##For US car plate
config-file-path: ./sgie1_config.yml
##For China mainland car plate
#config-file-path: lpd_ccpd_yolov4-tiny_config.yml
unique-id: 2
process-mode: 2
secondary-gie-1:
##For US car plate recognization
config-file-path: ./lpr_config_sgie_us.yml
##For China mainland car plate recognization
#config-file-path: lpr_config_sgie_ch.yml
unique-id: 3
process-mode: 2
I noticed changing streammux width and height affects the crash time. However it certainly crashes.
Below is my pipeline:
/* Use nvinfer to infer on batched frame. */
pgie = gst_element_factory_make("nvinfer", "primary-nvinference-engine");
sgie1 = gst_element_factory_make("nvinfer", "secondary-infer-engine1");
sgie2 = gst_element_factory_make("nvinfer", "secondary-infer-engine2");
tracker = gst_element_factory_make("nvtracker", "nvtracker");
/* Add queue elements between every two elements */
queue1 = gst_element_factory_make("queue", "queue1");
queue2 = gst_element_factory_make("queue", "queue2");
queue3 = gst_element_factory_make("queue", "queue3");
queue4 = gst_element_factory_make("queue", "queue4");
queue5 = gst_element_factory_make("queue", "queue5");
queue6 = gst_element_factory_make("queue", "queue6");
queue7 = gst_element_factory_make("queue", "queue7");
queue8 = gst_element_factory_make("queue", "queue8");
/* Use nvdslogger for perf measurement. */
nvdslogger = gst_element_factory_make("nvdslogger", "nvdslogger");
/* Use nvtiler to composite the batched frames into a 2D tiled array based
* on the source of the frames. */
// tiler = gst_element_factory_make("nvmultistreamtiler", "nvtiler");
/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter");
/* Create OSD to draw on the converted RGBA buffer */
nvosd = gst_element_factory_make("nvdsosd", "nv-onscreendisplay");
/* Create Sink*/
// sink = gst_element_factory_make("fakesink", "nvvideo-renderer");
sink = gst_element_factory_make("nveglglessink", "nvvideo-renderer"); // for display
// sink = gst_element_factory_make("fakesink", "nvvideo-renderer");
if (!pgie || !nvdslogger || !nvvidconv || !nvosd || !sink)
{
g_printerr("One element could not be created. Exiting.\n");
return -1;
}
if (g_str_has_suffix(argv[1], ".yml") || g_str_has_suffix(argv[1], ".yaml"))
{
nvds_parse_streammux(streammux, argv[1], "streammux");
nvds_parse_gie(pgie, argv[1], "primary-gie");
nvds_parse_gie(sgie1, argv[1], "secondary-gie-0");
nvds_parse_gie(sgie2, argv[1], "secondary-gie-1");
g_object_get(G_OBJECT(pgie), "batch-size", &pgie_batch_size, NULL);
if (pgie_batch_size != num_sources)
{
g_printerr("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
pgie_batch_size, num_sources);
g_object_set(G_OBJECT(pgie), "batch-size", num_sources, NULL);
}
nvds_parse_osd(nvosd, argv[1], "osd");
// tiler_rows = (guint)sqrt(num_sources);
// tiler_columns = (guint)ceil(1.0 * num_sources / tiler_rows);
// g_object_set(G_OBJECT(tiler), "rows", tiler_rows, "columns", tiler_columns, NULL);
// nvds_parse_tiler(tiler, argv[1], "tiler");
// nvds_parse_egl_sink(sink, argv[1], "sink");
g_object_set(G_OBJECT(sink), "qos", 0, NULL);
g_object_set(G_OBJECT(sink), "sync", 0, NULL);
}
else
{
g_object_set(G_OBJECT(streammux), "batch-size", num_sources, NULL);
g_object_set(G_OBJECT(streammux), "width", MUXER_OUTPUT_WIDTH, "height",
MUXER_OUTPUT_HEIGHT,
"batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);
/* Configure the nvinfer element using the nvinfer config file. */
g_object_set(G_OBJECT(pgie),
"config-file-path", "./models/ped_pgie_config.txt", NULL);
/* Override the batch-size set in the config file with the number of sources. */
g_object_get(G_OBJECT(pgie), "batch-size", &pgie_batch_size, NULL);
if (pgie_batch_size != num_sources)
{
g_printerr("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
pgie_batch_size, num_sources);
g_object_set(G_OBJECT(pgie), "batch-size", num_sources, NULL);
}
// tiler_rows = (guint)sqrt(num_sources);
// tiler_columns = (guint)ceil(1.0 * num_sources / tiler_rows);
// /* we set the tiler properties here */
// g_object_set(G_OBJECT(tiler), "rows", tiler_rows, "columns", tiler_columns,
// "width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);
// g_object_set(G_OBJECT(nvosd), "process-mode", OSD_PROCESS_MODE,
// "display-text", OSD_DISPLAY_TEXT, NULL);
g_object_set(G_OBJECT(sink), "qos", 0, NULL);
}
g_object_set(
G_OBJECT(tracker), "tracker-width", MUXER_OUTPUT_WIDTH, "tracker-height", MUXER_OUTPUT_HEIGHT,
"gpu_id", 0, "ll-lib-file",
"/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so",
"ll-config-file", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml", "enable_batch_process", 1,
NULL);
// /*Use this for multifilesink*/
// g_object_set(
// G_OBJECT(sink), "location", "/home/mainak/ms/C++/bbpl/pedestrian/output/image_%02d.png", "async", 0, NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
gst_object_unref(bus);
/* Set up the pipeline */
/* we add all elements into the pipeline */
if (transform)
{
gst_bin_add_many(GST_BIN(pipeline), queue1, pgie, queue2, nvdslogger,
queue3, nvvidconv, queue4, nvosd, queue5, transform, sink, NULL);
/* we link the elements together
* nvstreammux -> nvinfer -> nvdslogger -> nvtiler -> nvvidconv -> nvosd
* -> video-renderer */
if (!gst_element_link_many(streammux, queue1, pgie, queue2, nvdslogger,
queue3, nvvidconv, queue4, nvosd, queue5, transform, sink, NULL))
{
g_printerr("Elements could not be linked. Exiting.\n");
return -1;
}
}
else
{
gst_bin_add_many(GST_BIN(pipeline), queue1, pgie, queue2, tracker, queue3, sgie1, queue4,
sgie2, queue5, nvdslogger, queue6, nvvidconv, queue7, nvosd, queue8, sink, NULL);
/* we link the elements together
* nvstreammux -> nvinfer -> nvdslogger -> nvtiler -> nvvidconv -> nvosd
* -> video-renderer */
if (!gst_element_link_many(streammux, queue1, pgie, queue2, tracker, queue3, sgie1, queue4,
sgie2, queue5, nvdslogger, nvvidconv, queue7, nvosd, queue8, sink, NULL))
{
g_printerr("Elements could not be linked. Exiting.\n");
return -1;
}
}
/*Creat Context for Object Encoding */
NvDsObjEncCtxHandle obj_ctx_handle = nvds_obj_enc_create_context();
if (!obj_ctx_handle)
{
g_print("Unable to create context\n");
return -1;
}
if (save_img == 1)
{
/* Lets add probe to get informed of the meta data generated, we add probe to
* the sink pad of the osd element, since by that time, the buffer would have
* had got all the metadata. */
gie_src_pad = gst_element_get_static_pad(tracker, "src");
if (!gie_src_pad)
g_print("Unable to get src pad\n");
else
gst_pad_add_probe(gie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
gie_src_pad_buffer_probe, (gpointer)obj_ctx_handle, NULL);
gst_object_unref(gie_src_pad);
}
/* Lets add probe to get informed of the meta data generated, we add probe to
* the sink pad of the osd element, since by that time, the buffer would have
* had got all the metadata. */
osd_src_pad = gst_element_get_static_pad(sgie2, "src");
if (!osd_src_pad)
g_print("Unable to get sink pad\n");
else
gst_pad_add_probe(osd_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_src_pad_buffer_probe, (gpointer)obj_ctx_handle, NULL);
gst_object_unref(osd_src_pad);
/* Set the pipeline to "playing" state */
if (g_str_has_suffix(argv[1], ".yml") || g_str_has_suffix(argv[1], ".yaml"))
{
g_print("Using file: %s\n", argv[1]);
}
else
{
g_print("Now playing:");
for (i = 0; i < num_sources; i++)
{
g_print(" %s,", argv[i + 1]);
}
g_print("\n");
}
gst_element_set_state(pipeline, GST_STATE_PLAYING);
/* Wait till pipeline encounters an error or EOS */
g_print("Running...\n");
g_main_loop_run(loop);
/* Out of the main loop, clean up nicely */
g_print("Returned, stopping playback\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(pipeline));
g_source_remove(bus_watch_id);
g_main_loop_unref(loop);
/** Paho MQTT*/
g_print("Disconnecting MQTT Client\n");
if ((rc = MQTTClient_disconnect(client, 10000)) != MQTTCLIENT_SUCCESS)
printf("Failed to disconnect, return code %d\n", rc);
MQTTClient_destroy(&client);
g_print("Destroying MQTT Client\n");
return 0;