Pose Estimation how use webcam to detect

• Hardware Platform (Jetson / GPU): Xavier NX
• DeepStream Version: 5.0
• JetPack Version (valid for Jetson only): 4.5
• TensorRT Version: 7.1.3
• Issue Type( questions, new requirements, bugs): questions

Hi guys,
I’m newbie in Deepstream, I’m trying to use webcam to detect pose. I am adjusting deepstream_pose_estimation_app.cpp .

I look over the deepstream-test-1 ~ 5 -app.c , but nevertheless don’t understand how to start for writing.

I also search many information , but those always about python, I wanna know the example of c/c++ .

Hi,
You can refer to sources/apps/apps-common/src/deepstream_source_bin.c create_uridecode_src_bin
for how to create uridecodebin, which accept any type of input (e.g. RTSP/File)

I use usb camera. Therefore, I refer the following.


GST_DEBUG_CATEGORY_EXTERN (NVDS_APP);

static gboolean
set_camera_csi_params (NvDsSourceConfig * config, NvDsSrcBin * bin)
{
g_object_set (G_OBJECT (bin->src_elem), “sensor-id”,
config->camera_csi_sensor_id, NULL);

// GST_CAT_DEBUG (NVDS_APP, “Setting csi camera params successful”);

return TRUE;
}

static gboolean
set_camera_v4l2_params (NvDsSourceConfig * config, NvDsSrcBin * bin)
{
gchar device[64];

g_snprintf (device, sizeof (device), “/dev/video%d”,
config->camera_v4l2_dev_node);
g_object_set (G_OBJECT (bin->src_elem), “device”, device, NULL);

// GST_CAT_DEBUG (NVDS_APP, “Setting v4l2 camera params successful”);

return TRUE;
}

static gboolean
create_camera_source_bin (NvDsSourceConfig * config, NvDsSrcBin * bin)
{
GstCaps *caps = NULL, *caps1 = NULL, *convertCaps = NULL;
gboolean ret = FALSE;

switch (config->type) {
case NV_DS_SOURCE_CAMERA_CSI:
bin->src_elem =
gst_element_factory_make (NVDS_ELEM_SRC_CAMERA_CSI, “src_elem”);
g_object_set (G_OBJECT (bin->src_elem), “bufapi-version”, TRUE, NULL);
g_object_set (G_OBJECT (bin->src_elem), “maxperf”, TRUE, NULL);
break;
case NV_DS_SOURCE_CAMERA_V4L2:
bin->src_elem =
gst_element_factory_make (NVDS_ELEM_SRC_CAMERA_V4L2, “src_elem”);
bin->cap_filter1 =
gst_element_factory_make (NVDS_ELEM_CAPS_FILTER, “src_cap_filter1”);
if (!bin->cap_filter1) {
NVGSTDS_ERR_MSG_V (“Could not create ‘src_cap_filter1’”);
goto done;
}
caps1 = gst_caps_new_simple (“video/x-raw”,
“width”, G_TYPE_INT, config->source_width, “height”, G_TYPE_INT,
config->source_height, “framerate”, GST_TYPE_FRACTION,
config->source_fps_n, config->source_fps_d, NULL);
break;
default:
NVGSTDS_ERR_MSG_V (“Unsupported source type”);
goto done;
}

if (!bin->src_elem) {
NVGSTDS_ERR_MSG_V (“Could not create ‘src_elem’”);
goto done;
}

bin->cap_filter =
gst_element_factory_make (NVDS_ELEM_CAPS_FILTER, “src_cap_filter”);
if (!bin->cap_filter) {
NVGSTDS_ERR_MSG_V (“Could not create ‘src_cap_filter’”);
goto done;
}
caps = gst_caps_new_simple (“video/x-raw”, “format”, G_TYPE_STRING, “NV12”,
“width”, G_TYPE_INT, config->source_width, “height”, G_TYPE_INT,
config->source_height, “framerate”, GST_TYPE_FRACTION,
config->source_fps_n, config->source_fps_d, NULL);

if (config->type == NV_DS_SOURCE_CAMERA_CSI) {
GstCapsFeatures *feature = NULL;
feature = gst_caps_features_new (“memory:NVMM”, NULL);
gst_caps_set_features (caps, 0, feature);
}

if (config->type == NV_DS_SOURCE_CAMERA_V4L2) {
GstElement *nvvidconv2;
GstCapsFeatures *feature = NULL;

#ifndef IS_TEGRA
GstElement *nvvidconv1;
nvvidconv1 = gst_element_factory_make (“videoconvert”, “nvvidconv1”);
if (!nvvidconv1) {
NVGSTDS_ERR_MSG_V (“Failed to create ‘nvvidconv1’”);
goto done;
}
#endif

feature = gst_caps_features_new ("memory:NVMM", NULL);
gst_caps_set_features (caps, 0, feature);
g_object_set (G_OBJECT (bin->cap_filter), "caps", caps, NULL);

g_object_set (G_OBJECT (bin->cap_filter1), "caps", caps1, NULL);

nvvidconv2 = gst_element_factory_make (NVDS_ELEM_VIDEO_CONV, "nvvidconv2");
if (!nvvidconv2) {
  NVGSTDS_ERR_MSG_V ("Failed to create 'nvvidconv2'");
  goto done;
}

g_object_set (G_OBJECT (nvvidconv2), "gpu-id", config->gpu_id,
    "nvbuf-memory-type", config->nvbuf_memory_type, NULL);

#ifndef IS_TEGRA
gst_bin_add_many (GST_BIN (bin->bin), bin->src_elem, bin->cap_filter1,
nvvidconv1, nvvidconv2, bin->cap_filter,
NULL);
#else
gst_bin_add_many (GST_BIN (bin->bin), bin->src_elem, bin->cap_filter1,
nvvidconv2, bin->cap_filter, NULL);
#endif

NVGSTDS_LINK_ELEMENT (bin->src_elem, bin->cap_filter1);

#ifndef IS_TEGRA
NVGSTDS_LINK_ELEMENT (bin->cap_filter1, nvvidconv1);

NVGSTDS_LINK_ELEMENT (nvvidconv1, nvvidconv2);

#else
NVGSTDS_LINK_ELEMENT (bin->cap_filter1, nvvidconv2);
#endif

NVGSTDS_LINK_ELEMENT (nvvidconv2, bin->cap_filter);

NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->cap_filter, "src");

} else {

g_object_set (G_OBJECT (bin->cap_filter), "caps", caps, NULL);

gst_bin_add_many (GST_BIN (bin->bin), bin->src_elem, bin->cap_filter, NULL);

NVGSTDS_LINK_ELEMENT (bin->src_elem, bin->cap_filter);

NVGSTDS_BIN_ADD_GHOST_PAD (bin->bin, bin->cap_filter, "src");

}

switch (config->type) {
case NV_DS_SOURCE_CAMERA_CSI:
if (!set_camera_csi_params (config, bin)) {
NVGSTDS_ERR_MSG_V (“Could not set CSI camera properties”);
goto done;
}
break;
case NV_DS_SOURCE_CAMERA_V4L2:
if (!set_camera_v4l2_params (config, bin)) {
NVGSTDS_ERR_MSG_V (“Could not set V4L2 camera properties”);
goto done;
}
break;
default:
NVGSTDS_ERR_MSG_V (“Unsupported source type”);
goto done;
}

ret = TRUE;

// GST_CAT_DEBUG (NVDS_APP, “Created camera source bin successfully”);

done:
if (caps)
gst_caps_unref (caps);

if (convertCaps)
gst_caps_unref (convertCaps);

if (!ret) {
NVGSTDS_ERR_MSG_V (“%s failed”, func);
}
return ret;
}

gboolean
create_source_bin (NvDsSourceConfig * config, NvDsSrcBin * bin)
{
static guint bin_cnt = 0;
gchar bin_name[64];
g_snprintf(bin_name, 64, “src_bin_%d”, bin_cnt++);
bin->bin = gst_bin_new (bin_name);
if (!bin->bin) {
NVGSTDS_ERR_MSG_V (“Failed to create ‘src_bin’”);
return FALSE;
}

switch (config->type) {
case NV_DS_SOURCE_CAMERA_V4L2:
if (!create_camera_source_bin (config, bin)) {
return FALSE;
}
break;
case NV_DS_SOURCE_URI:
// if (!create_uridecode_src_bin (config, bin)) {
// return FALSE;
// }
// bin->live_source = config->live_source;
break;
case NV_DS_SOURCE_RTSP:
// if (!create_rtsp_src_bin (config, bin)) {
// return FALSE;
// }
break;
default:
NVGSTDS_ERR_MSG_V (“Source type not yet implemented!\n”);
return FALSE;
}

// GST_CAT_DEBUG (NVDS_APP, “Source bin created”);

return TRUE;
}


And then I add the following to “Main()”. But I don’t understand how to send the arg of “create_source_bin()”. Should I send what? Because it the first arg needs config format and the second arg needs “bin” format.

GstElement *source_bin = create_source_bin (pgie, argv[1]);

if (!source_bin) {
g_printerr (“Failed to create source bin. Exiting.\n”);
return -1;
}

deepstream_pose_estimation_app.zip (6.8 KB)

HI,
You can follow the idea, for simple usage, you can refer to python sample deepstream-test1-usbcam, follow this to download python sample, deepstream_python_apps/HOWTO.md at master · NVIDIA-AI-IOT/deepstream_python_apps · GitHub
you do not need to use the whole function create_source_bin from deepstream-app, since it have some deepstream-app structures which is not necessary for your case.

Thank your for that I can use webcam.

Hi MingGatsby,
I got the same problem , and may I have a look at your code please?
Or could you please give me some solutions?