Getting some error in deepstream-test1-app

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) -jetson nano
• DeepStream Version - 6.0.1
• JetPack Version (valid for Jetson only) - 4.6.3
• TensorRT Version - 8.4
• NVIDIA GPU Driver Version (valid for GPU only) - 10.2
• Issue Type( questions, new requirements, bugs) getting error when i try execute deepstreaam-test1-app
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)

this is a error

jetson@ubuntu:~/Documents/deepstream-test1-app$ ./deepstream-test1-app “rtsp://admin:Sieora123@192.168.1.108:554/cam/realmonitor?channel=3&subtype=0”
Now playing: rtsp://admin:Sieora123@192.168.1.108:554/cam/realmonitor?channel=3&subtype=0

Using winsys: x11
Opening in BLOCKING MODE
WARNING: [TRT]: Using an engine plan file across different models of devices is not recommended and is likely to affect performance or even cause errors.
0:00:06.672492933 9511 0x557f616b00 INFO nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger: NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:1900> [UID = 1]: deserialized trt engine from :/home/jetson/Documents/deepstream-test1-app/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp16.engine
INFO: [Implicit Engine Info]: layers num: 3
0 INPUT kFLOAT input_1 3x368x640
1 OUTPUT kFLOAT conv2d_bbox 16x23x40
2 OUTPUT kFLOAT conv2d_cov/Sigmoid 4x23x40

0:00:06.673918986 9511 0x557f616b00 INFO nvinfer gstnvinfer.cpp:638:gst_nvinfer_logger: NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2004> [UID = 1]: Use deserialized engine model: /home/jetson/Documents/deepstream-test1-app/Primary_Detector/resnet10.caffemodel_b1_gpu0_fp16.engine
0:00:06.728030265 9511 0x557f616b00 INFO nvinfer gstnvinfer_impl.cpp:313:notifyLoadModelStatus: [UID 1]: Load new model:dstest1_pgie_config.txt sucessfully
Running…
ERROR from element rtsp-source: Resource not found.
Error details: gstfilesrc.c(533): gst_file_src_start (): /GstPipeline:dstest1-pipeline/GstFileSrc:rtsp-source:
No such file “rtsp://admin:Sieora123@192.168.1.108:554/cam/realmonitor?channel=3&subtype=0”
Returned, stopping playback
Deleting pipeline

this is a configuration file of deepstream-test1-app

[property]
gpu-id=0
net-scale-factor=0.0039215697906911373
model-file = Primary_Detector/resnet10.caffemodel
proto-file = Primary_Detector/resnet10.prototxt
model-engine-file = Primary_Detector/resnet10.caffemodel_b1_gpu0_fp16.engine
labelfile-path = Primary_Detector/labels.txt
int8-calib-file = Primary_Detector/cal_trt.bin
force-implicit-batch-dim=1
batch-size=1
network-mode=1
num-detected-classes=4
interval=0
gie-unique-id=1
output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid
#scaling-filter=0
#scaling-compute-hw=0
cluster-mode=2

[class-attrs-all]
pre-cluster-threshold=0.2
topk=20
nms-iou-threshold=0.5

deepstream-test1-app

#include <gst/gst.h>
#include <glib.h>
#include <stdio.h>
#include <cuda_runtime_api.h>
#include “gstnvdsmeta.h”

#define MAX_DISPLAY_LEN 64

#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2

/* The muxer output resolution must be set if the input streams will be of

  • different resolution. The muxer will scale all the input frames to this
  • resolution. */
    #define MUXER_OUTPUT_WIDTH 1920
    #define MUXER_OUTPUT_HEIGHT 1080

/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set

  • based on the fastest source’s framerate. */
    #define MUXER_BATCH_TIMEOUT_USEC 40000

gint frame_number = 0;
gchar pgie_classes_str[4][32] = { “Vehicle”, “TwoWheeler”, “Person”,
“Roadsign”
};

/* osd_sink_pad_buffer_probe will extract metadata received on OSD sink pad

  • and update params for drawing rectangle, object information etc. */

static GstPadProbeReturn
osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
gpointer u_data)
{
GstBuffer *buf = (GstBuffer *) info->data;
guint num_rects = 0;
NvDsObjectMeta *obj_meta = NULL;
guint vehicle_count = 0;
guint person_count = 0;
NvDsMetaList * l_frame = NULL;
NvDsMetaList * l_obj = NULL;
NvDsDisplayMeta *display_meta = NULL;

NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);

for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
  l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
    int offset = 0;
    for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
            l_obj = l_obj->next) {
        obj_meta = (NvDsObjectMeta *) (l_obj->data);
        if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) {
            vehicle_count++;
            num_rects++;
        }
        if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
            person_count++;
            num_rects++;
        }
    }
    display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
    NvOSD_TextParams *txt_params  = &display_meta->text_params[0];
    display_meta->num_labels = 1;
    txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);
    offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ", person_count);
    offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);

    /* Now set the offsets where the string should appear */
    txt_params->x_offset = 10;
    txt_params->y_offset = 12;

    /* Font , font-color and font-size */
    txt_params->font_params.font_name = "Serif";
    txt_params->font_params.font_size = 10;
    txt_params->font_params.font_color.red = 1.0;
    txt_params->font_params.font_color.green = 1.0;
    txt_params->font_params.font_color.blue = 1.0;
    txt_params->font_params.font_color.alpha = 1.0;

    /* Text background color */
    txt_params->set_bg_clr = 1;
    txt_params->text_bg_clr.red = 0.0;
    txt_params->text_bg_clr.green = 0.0;
    txt_params->text_bg_clr.blue = 0.0;
    txt_params->text_bg_clr.alpha = 1.0;

    nvds_add_display_meta_to_frame(frame_meta, display_meta);
}

g_print ("Frame Number = %d Number of objects = %d "
        "Vehicle Count = %d Person Count = %d\n",
        frame_number, num_rects, vehicle_count, person_count);
frame_number++;
return GST_PAD_PROBE_OK;

}

static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print (“End of stream\n”);
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR:{
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_printerr (“ERROR from element %s: %s\n”,
GST_OBJECT_NAME (msg->src), error->message);
if (debug)
g_printerr (“Error details: %s\n”, debug);
g_free (debug);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}

int
main (int argc, char *argv)
{
GMainLoop *loop = NULL;
GstElement *pipeline = NULL, *source = NULL, *h264parser = NULL,
*decoder = NULL, *streammux = NULL, *sink = NULL, *pgie = NULL, *nvvidconv = NULL,
*nvosd = NULL;

GstElement *transform = NULL;
GstBus *bus = NULL;
guint bus_watch_id;
GstPad *osd_sink_pad = NULL;

int current_device = -1;
cudaGetDevice(&current_device);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, current_device);
/* Check input arguments */
if (argc != 2) {
g_printerr (“Usage: %s \n”, argv[0]);
return -1;
}

/* Standard GStreamer initialization */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);

/* Create gstreamer elements /
/
Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new (“dstest1-pipeline”);

/* Source element for reading from the file */
source = gst_element_factory_make (“filesrc”, “rtsp-source”);

/* Since the data format in the input file is elementary h264 stream,

  • we need a h264parser */
    h264parser = gst_element_factory_make (“h265parse”, “h264-parser”);

/* Use nvdec_h264 for hardware accelerated decode on GPU */
decoder = gst_element_factory_make (“nvv4l2decoder”, “nvv4l2-decoder”);

/* Create nvstreammux instance to form batches from one or more sources. */
streammux = gst_element_factory_make (“nvstreammux”, “stream-muxer”);

if (!pipeline || !streammux) {
g_printerr (“One element could not be created. Exiting.\n”);
return -1;
}

/* Use nvinfer to run inferencing on decoder’s output,

  • behaviour of inferencing is set through config file */
    pgie = gst_element_factory_make (“nvinfer”, “primary-nvinference-engine”);

/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv = gst_element_factory_make (“nvvideoconvert”, “nvvideo-converter”);

/* Create OSD to draw on the converted RGBA buffer */
nvosd = gst_element_factory_make (“nvdsosd”, “nv-onscreendisplay”);

/* Finally render the osd output */
if(prop.integrated) {
transform = gst_element_factory_make (“nvegltransform”, “nvegl-transform”);
}
sink = gst_element_factory_make (“nveglglessink”, “nvvideo-renderer”);

if (!source || !h264parser || !decoder || !pgie
|| !nvvidconv || !nvosd || !sink) {
g_printerr (“One element could not be created. Exiting.\n”);
return -1;
}

if(!transform && prop.integrated) {
g_printerr (“One tegra element could not be created. Exiting.\n”);
return -1;
}

/* we set the input filename to the source element */
g_object_set (G_OBJECT (source), “location”, argv[1], NULL);

g_object_set (G_OBJECT (streammux), “batch-size”, 1, NULL);

g_object_set (G_OBJECT (streammux), “width”, MUXER_OUTPUT_WIDTH, “height”,
MUXER_OUTPUT_HEIGHT,
“batched-push-timeout”, MUXER_BATCH_TIMEOUT_USEC, NULL);

/* Set all the necessary properties of the nvinfer element,

  • the necessary ones are : */
    g_object_set (G_OBJECT (pgie),
    “config-file-path”, “dstest1_pgie_config.txt”, NULL);

/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);

/* Set up the pipeline /
/
we add all elements into the pipeline */
if(prop.integrated) {
gst_bin_add_many (GST_BIN (pipeline),
source, h264parser, decoder, streammux, pgie,
nvvidconv, nvosd, transform, sink, NULL);
}
else {
gst_bin_add_many (GST_BIN (pipeline),
source, h264parser, decoder, streammux, pgie,
nvvidconv, nvosd, sink, NULL);
}

GstPad *sinkpad, *srcpad;
gchar pad_name_sink[16] = “sink_0”;
gchar pad_name_src[16] = “src”;

sinkpad = gst_element_get_request_pad (streammux, pad_name_sink);
if (!sinkpad) {
g_printerr (“Streammux request sink pad failed. Exiting.\n”);
return -1;
}

srcpad = gst_element_get_static_pad (decoder, pad_name_src);
if (!srcpad) {
g_printerr (“Decoder request src pad failed. Exiting.\n”);
return -1;
}

if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
g_printerr (“Failed to link decoder to stream muxer. Exiting.\n”);
return -1;
}

gst_object_unref (sinkpad);
gst_object_unref (srcpad);

/* we link the elements together /
/
file-source → h264-parser → nvh264-decoder →

  • nvinfer → nvvidconv → nvosd → video-renderer */

if (!gst_element_link_many (source, h264parser, decoder, NULL)) {
g_printerr (“Elements could not be linked: 1. Exiting.\n”);
return -1;
}

if(prop.integrated) {
if (!gst_element_link_many (streammux, pgie,
nvvidconv, nvosd, transform, sink, NULL)) {
g_printerr (“Elements could not be linked: 2. Exiting.\n”);
return -1;
}
}
else {
if (!gst_element_link_many (streammux, pgie,
nvvidconv, nvosd, sink, NULL)) {
g_printerr (“Elements could not be linked: 2. Exiting.\n”);
return -1;
}
}

/* Lets add probe to get informed of the meta data generated, we add probe to

  • the sink pad of the osd element, since by that time, the buffer would have
  • had got all the metadata. */
    osd_sink_pad = gst_element_get_static_pad (nvosd, “sink”);
    if (!osd_sink_pad)
    g_print (“Unable to get sink pad\n”);
    else
    gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
    osd_sink_pad_buffer_probe, NULL, NULL);
    gst_object_unref (osd_sink_pad);

/* Set the pipeline to “playing” state */
g_print (“Now playing: %s\n”, argv[1]);
gst_element_set_state (pipeline, GST_STATE_PLAYING);

/* Wait till pipeline encounters an error or EOS */
g_print (“Running…\n”);
g_main_loop_run (loop);

/* Out of the main loop, clean up nicely */
g_print (“Returned, stopping playback\n”);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print (“Deleting pipeline\n”);
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}

Judging from your pipeline, Your code needs to be changed to

source = gst_element_factory_make ("rtspsrc", "rtsp-source");
h264parser = gst_element_factory_make ("h264parse", "h264-parser");
// Please set the encoding type according to rtsp
// h265parser = gst_element_factory_make ("h265parse", "h265-parser");

You need some knowledge of gsteamer and multimedia.

**There is no update from you for a period, **
**assuming this is not an issue anymore. **
**Hence we are closing this topic. **
If need further support, please open a new one. Thanks

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.