I am using DeepStream SDK 6.2, D455 Realsense Camera, Jetson Orin Nano with 5.1.3 Jetpack.
I successfully connected one camera and got output after running the DeepStream_3d_depth_camera sample application (c++).
Then I modified the code for two cameras created 2 data loader, 2 data render, and 2 DepthCameraApp instances in application code (in main).
Then I modified the ds_3d_realsense_depth_capture.yaml file and included configurations for 2 data loader and data render, in this case I am getting the error “multiple data render found keep only one”.
However, when I keep 1 data render in yaml file then only one camera output is visible.
My YAML Code :
type or paste code here
# Configuration for Camera 1
- name: realsense_dataloader_cam1
type: ds3d::dataloader
out_caps: ds3d/datamap
custom_lib_path: libnvds_3d_dataloader_realsense.so
custom_create_function: createRealsenseDataloader
config_body:
streams: ["color", "depth"]
framerate: 0
- name: depth-render_cam1
type: ds3d::datarender
in_caps: ds3d/datamap
custom_lib_path: libnvds_3d_gl_datarender.so
custom_create_function: createDepthStreamDataRender
gst_properties:
sync: false
async: false
drop: false
config_body:
streams: ["depth", "color"]
width: 1920
height: 544
title: "ds3d-camera-app-cam1"
block: true
min_depth: 0.3 # in meters
max_depth: 2.0 # in meters
min_depth_color: [255, 128, 0] # RGB color value for minimum depth
max_depth_color: [0, 128, 255] # RGB color value for maximum depth
# Configuration for Camera 2
- name: realsense_dataloader_cam2
type: ds3d::dataloader
out_caps: ds3d/datamap
custom_lib_path: libnvds_3d_dataloader_realsense.so
custom_create_function: createRealsenseDataloader
config_body:
streams: ["color", "depth"]
framerate: 0
- name: depth-render_cam2
type: ds3d::datarender
in_caps: ds3d/datamap
custom_lib_path: libnvds_3d_gl_datarender.so
custom_create_function: createDepthStreamDataRender
gst_properties:
sync: false
async: false
drop: false
config_body:
streams: ["depth", "color"]
width: 1920
height: 544
title: "ds3d-camera-app-cam2"
block: true
min_depth: 0.3 # in meters
max_depth: 2.0 # in meters
min_depth_color: [255, 128, 0] # RGB color value for minimum depth
max_depth_color: [0, 128, 255] # RGB color value for maximum depth
# User application
- name: debugdump
type: ds3d::userapp
My Main Code:
int main(int argc, char* argv[])
{
std::vector<gst::DataLoaderSrc> loaderSrcs(2);
std::vector<gst::DataRenderSink> renderSinks(2);
std::vector<std::shared_ptr<DepthCameraApp>> appCtxs(2);
std::vector<ConfigList> componentConfigs(2);
std::string configPath;
std::string configContent;
/* Standard GStreamer initialization */
gst_init(&argc, &argv);
/* setup signal handler */
_intr_setup();
/* Parse program arguments */
opterr = 0;
int c = -1;
while ((c = getopt(argc, argv, "hc:")) != -1) {
switch (c) {
case 'c': // get config file path
configPath = optarg;
break;
case 'h':
help(argv[0]);
return 0;
case '?':
default:
help(argv[0]);
return -1;
}
}
if (configPath.empty()) {
LOG_ERROR("config file is not set!");
help(argv[0]);
return -1;
}
CHECK_ERROR(readFile(configPath, configContent), "read file: %s failed", configPath.c_str());
// Parse configuration for both cameras
for (int i = 0; i < 2; ++i) {
// parse all components in config file
ErrCode code = CatchConfigCall(config::parseFullConfig, configContent, configPath, componentConfigs[i]);
CHECK_ERROR(isGood(code), "parse config failed for camera %d", i);
// Order all parsed component configs into config table
std::map<config::ComponentType, ConfigList> configTable;
for (const auto& c : componentConfigs[i]) {
configTable[c.type].emplace_back(c);
}
appCtxs[i] = std::make_shared<DepthCameraApp>();
// update userapp configuration
if (configTable.count(config::ComponentType::kUserApp)) {
CHECK_ERROR(
isGood(appCtxs[i]->initUserAppProfiling(configTable[config::ComponentType::kUserApp][0])),
"parse userapp data failed");
}
// Initialize app context with main loop and pipelines
appCtxs[i]->setMainloop(g_main_loop_new(NULL, FALSE));
CHECK_ERROR(appCtxs[i]->mainLoop(), "set main loop failed");
CHECK_ERROR(isGood(appCtxs[i]->init("deepstream-depth-camera-pipeline")), "init pipeline failed");
bool startLoaderDirectly = true;
bool startRenderDirectly = true;
// Ensure separate configurations for each camera instance
CHECK_ERROR(
isGood(CreateLoaderSourceForMultiple(configTable[config::ComponentType::kDataLoader], loaderSrcs[i], startLoaderDirectly)),
"create dataloader source failed for camera %d", i);
CHECK_ERROR(
isGood(CreateRenderSinkForMultiple(configTable[config::ComponentType::kDataRender], renderSinks[i], startRenderDirectly)),
"create datarender sink failed for camera %d", i);
appCtxs[i]->setDataloaderSrc(loaderSrcs[i]);
appCtxs[i]->setDataRenderSink(renderSinks[i]);
DS_ASSERT(loaderSrcs[i].gstElement);
DS_ASSERT(renderSinks[i].gstElement);
/* create and add all filters */
bool hasFilters = configTable.count(config::ComponentType::kDataFilter);
/* link all pad/elements together */
code = CatchVoidCall([&loaderSrcs, &renderSinks, hasFilters, &configTable, i, appCtxs]() {
gst::ElePtr lastEle = loaderSrcs[i].gstElement;
if (hasFilters) {
auto& filterConfigs = configTable[config::ComponentType::kDataFilter];
DS_ASSERT(filterConfigs.size());
for (size_t j = 0; j < filterConfigs.size(); ++j) {
auto queue = gst::elementMake("queue", ("filterQueue" + std::to_string(j) + "_cam" + std::to_string(i)).c_str());
DS_ASSERT(queue);
auto filter =
gst::elementMake(kDs3dFilterPluginName, ("filter" + std::to_string(j) + "_cam" + std::to_string(i)).c_str());
DS3D_THROW_ERROR_FMT(
filter, ErrCode::kGst, "gst-plugin: %s is not found", kDs3dFilterPluginName);
g_object_set(
G_OBJECT(filter.get()), "config-content", filterConfigs[j].rawContent.c_str(),
nullptr);
appCtxs[i]->add(queue).add(filter);
lastEle.link(queue).link(filter);
lastEle = filter;
}
}
lastEle.link(renderSinks[i].gstElement);
});
CHECK_ERROR(isGood(code), "Link pipeline elements failed for camera %d", i);
/* Add probe to get informed of the meta data generated, we add probe to
* gstappsrc src pad of the dataloader */
gst::PadPtr srcPad = loaderSrcs[i].gstElement.staticPad("src");
CHECK_ERROR(srcPad, "appsrc src pad is not detected for camera %d", i);
srcPad.addProbe(GST_PAD_PROBE_TYPE_BUFFER, appsrcBufferProbe, appCtxs[i].get(), NULL);
srcPad.reset();
/* Add probe to get informed of the meta data generated, we add probe to
* gstappsink sink pad of the datrender */
if (renderSinks[i].gstElement) {
gst::PadPtr sinkPad = renderSinks[i].gstElement.staticPad("sink");
CHECK_ERROR(sinkPad, "appsink sink pad is not detected for camera %d", i);
sinkPad.addProbe(GST_PAD_PROBE_TYPE_BUFFER, appsinkBufferProbe, appCtxs[i].get(), NULL);
sinkPad.reset();
}
CHECK_ERROR(isGood(appCtxs[i]->play()), "app context play failed for camera %d", i);
LOG_INFO("Play camera instance %d...", i);
// get window system and set close event callback
if (renderSinks[i].customProcessor) {
GuardWindow win = renderSinks[i].customProcessor.getWindow();
if (win) {
GuardCB<abiWindow::CloseCB> windowClosedCb;
windowClosedCb.setFn<>(WindowClosed);
win->setCloseCallback(windowClosedCb.abiRef());
}
}
}
/* Wait till pipelines encounter an error or EOS */
for (auto& appCtx : appCtxs) {
appCtx->runMainLoop();
}
for (auto& appCtx : appCtxs) {
appCtx->stop();
appCtx->deinit();
}
return 0;
}
I am stuck here, can not figure out what the issue is, any suggestion would be appreciated.