Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) NVIDIA GeForce RTX 3090 Ti
• DeepStream Version 7.1
• JetPack Version (valid for Jetson only)
• TensorRT Version 10.3.0.26
• NVIDIA GPU Driver Version (valid for GPU only) 550.163.01
• Issue Type( questions, new requirements, bugs) questions
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
I’m using a Docker container and want to test nvmultiurisrcbin
to understand how to add or remove streams in a DeepStream pipeline. I create the nvmultiurisrcbin
element using the following code:
GstElement *SourceBin::create_nv_multi_urisrc_bin(guint index, std::string filenames){
// (void)filename;
// (void)index;
static GstElement *nvmultiurisrcbin;
gchar nvmultiurisrcbin_name[32] = {};
g_print("Creating nvmultiurisrcbin for stream_id %d or stream %s \n", index,
filenames.c_str());
// g_source_id_list[index] = index;
g_snprintf(nvmultiurisrcbin_name, 15, "nvmultiurisrc-bin-%02d", index);
nvmultiurisrcbin = gst_element_factory_make("nvmultiurisrcbin", nvmultiurisrcbin_name);
if (!nvmultiurisrcbin) {
std::cerr << "Failed to create nvmultiurisrcbin" << std::endl;
return NULL;
}
g_object_set(G_OBJECT(nvmultiurisrcbin), "uri-list", filenames.c_str(), NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "max-batch-size", 20/*(gint)filenames.size()*/, NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "live-source", 1, NULL); //1 for RTSP/camera, 0 for file
g_object_set(G_OBJECT(nvmultiurisrcbin), "batched-push-timeout", 33000, NULL); //1 for RTSP/camera, 0 for file
// g_object_set(G_OBJECT(nvmultiurisrcbin), "rtsp-reconnect-interval", 5, NULL);
// g_object_set(G_OBJECT(nvmultiurisrcbin), "rtsp-reconnect-attempts", 10, NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "drop-pipeline-eos", TRUE, NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "drop-frame-interval", 5, NULL); //Skip frames if decoding lags behind.
g_object_set(G_OBJECT(nvmultiurisrcbin), "file-loop", FALSE, NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "gpu-id", GPU_ID, NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "width", 1920, NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "height", 1080, NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "cudadec-memtype", 0, NULL); // Memory type for CUDA decoding (0=default, 1=NVBUF_MEM_CUDA_PINNED, 2=NVBUF_MEM_CUDA_DEVICE, 3=NVBUF_MEM_CUDA_UNIFIED).
g_object_set(G_OBJECT(nvmultiurisrcbin), "latency", 200, NULL); //Network jitter buffer latency (milliseconds). Used for RTSP.
g_object_set(G_OBJECT(nvmultiurisrcbin), "sensor-id-list", ""/*"UniqueSensorId1"*/, NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "sensor-name-list", "UniqueSensorName1", NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "buffer-pool-size", 16, NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "ip-address", "localhost", NULL);
g_object_set(G_OBJECT(nvmultiurisrcbin), "port", "3190", NULL); // Default: "9000"
g_object_set(G_OBJECT(nvmultiurisrcbin), "disable-audio", TRUE, NULL);
if (!nvmultiurisrcbin) {
std::cerr << "Failed to create nvmultiurisrcbin" << std::endl;
return NULL;
}
return nvmultiurisrcbin;
}
Simpler scenario:
GstElement *SourceBin::create_nv_multi_urisrc_bin(guint index, std::string filenames){
static GstElement *nvmultiurisrcbin;
gchar *file_uri = g_strdup("file:///root/Put.mp4");
g_object_set(G_OBJECT(nvmultiurisrcbin),
"uri-list", file_uri,
"max-batch-size", 20,
"sensor-id-list", "UniqueSensorId1",
"width", 1920,
"height", 1080,
"sensor-name-list", "",
"port", "3190",
"batched-push-timeout", 33000,
NULL);
if (!nvmultiurisrcbin) {
std::cerr << "Failed to create nvmultiurisrcbin" << std::endl;
return NULL;
}
return nvmultiurisrcbin;
}
I then attempt to add a new stream using the REST API:
curl -v -XPOST 'http://localhost:3190/api/v1/stream/add' -d '{
"key": "sensor",
"value": {
"camera_id": "uniqueSensorID1",
"camera_name": "front_door",
"camera_url": "file:///root/P.mp4",
"change": "camera_add",
"metadata": {
"resolution": "1920 x1080",
"codec": "h264",
"framerate": 30
}
},
"headers": {
"source": "vst",
"created_at": "2021-06-01T14:34:13.417Z"
}
}'
However, when running this command in the terminal, the program crashes with:
terminate called after throwing an instance of 'std::logic_error'
what(): basic_string::_M_construct null not valid
Aborted (core dumped)
So the pipeline fails with a core dump.
These questions may be related to another one I asked Service-maker deepstream_test5_app Unable to set the pipeline to the playing state error and Unable to add streams to DeepStream Server, API endpoints returning 404
As you suggested in The deepstream-server use rest api after adding streaming I find the deepstream-server not to infer, I tried to verify this using gst-launch-1.0
:
gst-launch-1.0 nvmultiurisrcbin port=9000 ip-address=localhost \
batched-push-timeout=33333 max-batch-size=10 drop-pipeline-eos=1 \
rtsp-reconnect-interval=1 live-source=1 width=1920 height=1080 ! \
nvmultistreamtiler ! fakesink async=false
Then I attempted to add a stream via the REST API:
curl -v -XPOST 'http://localhost:9000/api/v1/stream/add' -d '{
"key": "sensor",
"value": {
"camera_id": "uniqueSensorID1",
"camera_name": "front_door",
"camera_url": "file:///root/P.mp4",
"change": "camera_add",
"metadata": {
"resolution": "1920 x1080",
"codec": "h264",
"framerate": 30
}
},
"headers": {
"source": "vst",
"created_at": "2021-06-01T14:34:13.417Z"
}
}'
However, I received the following error:
Note: Unnecessary use of -X or --request, POST is already inferred.
* Trying 127.0.0.1:9000...
* Connected to localhost (127.0.0.1) port 9000 (#0)
> POST /api/v1/stream/add HTTP/1.1
> Host: localhost:9000
> User-Agent: curl/7.81.0
> Accept: */*
> Content-Length: 419
> Content-Type: application/x-www-form-urlencoded
>
* Mark bundle as not supporting multiuse
< HTTP/1.1 404 Not Found
< Content-Length: 21
< Content-Type: text/plain
<
* Connection #0 to host localhost left intact
{"error":"Not Found"}