[JETSON XAVIER] information deepstream-segmentation-test

I’m running deepstream -segmentation-test and i’m wondering if it is possible to get some information that is put on the screen?

For example:

I want to get the recognized classes from the frame.
I want to know the coordinates of the class’s pixels in the frame.

Is it possible to obtain this information by editing the deepstream-segmentation-test code?

my code deepstream_segmentation_test.c. I’m use the config file dstest_segmentation_config_semantic.txt

/*
 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

#include <gst/gst.h>
#include <glib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#include "gstnvdsinfer.h"
#include "gstnvdsmeta.h"
#include "nvbufsurface.h"
#ifndef PLATFORM_TEGRA
#include "gst-nvmessage.h"

#endif
#define getName(var)  #var
#define MAX_DISPLAY_LEN 64
#define DUMP(varname) fprintf(stderr, "%s = %x", #varname, varname);
#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2

/* The muxer output resolution must be set if the input streams will be of
 * different resolution. The muxer will scale all the input frames to this
 * resolution. */
#define MUXER_OUTPUT_WIDTH 1280
#define MUXER_OUTPUT_HEIGHT 720

/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
 * based on the fastest source's framerate. */
#define MUXER_BATCH_TIMEOUT_USEC 4000000

#define TILED_OUTPUT_WIDTH 512
#define TILED_OUTPUT_HEIGHT 512

/* tiler_sink_pad_buffer_probe  will extract metadata received on segmentation
 *  src pad */
static GstPadProbeReturn
tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
    gpointer u_data)
{
  GstBuffer *buf = (GstBuffer *) info->data;
  NvDsInferSegmentationMeta *obj_meta = NULL;
  NvDsMetaList * l_frame = NULL;
  NvDsMetaList * l_obj = NULL;
  NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
  FILE *bbox_params_dump_file = NULL;
  //NvDsInferSegmentationMeta *user_meta;
  gchar bbox_file[1024] = { 0 };
  NvDsUserMeta *user_meta = NULL;
  gchar *user_meta_data = NULL;
  NvDsUserMeta *of_user_meta = NULL;
  int classe = 1;	
  for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {	
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
	 
	 for (l_obj = frame_meta->frame_user_meta_list; l_obj != NULL;
        l_obj = l_obj->next) {
	
		of_user_meta = (NvDsUserMeta *)l_obj->data;

	        NvDsInferSegmentationMeta *segmeta = (NvDsInferSegmentationMeta *) (of_user_meta->user_meta_data);		
		
		//Faz o crop da imagem que é mostrada no plugin  		
		//segmeta->width = 100;
		//segmeta->height = 100;		
		/*
		for(int pix_id = 0; pix_id < segmeta->width * segmeta->height; pix_id++) {
		   // altera o background da imagem 
		   //segmeta->class_map[pix_id] = 255;
		}*/
		for(int pix_id_x = 0; pix_id_x < segmeta->width ; pix_id_x++) {
			for(int pix_id = 0; pix_id < segmeta->height ; pix_id++) {
			   // altera o background da imagem 
			   //segmeta->class_map[pix_id] = 255;
			   printf("Novo class_map: %i \n ", segmeta->class_map[pix_id * segmeta->width + pix_id_x]);
			segmeta->class_map[pix_id * segmeta->width + pix_id_x] = 25;

			}
		}		
		/*		
		for(int pix_id_x = 0; pix_id_x < segmeta->width  ; pix_id_x++) {		
			for(int pix_id_y = 0; pix_id_y < segmeta->height  ; pix_id_y++) {
			   // altera o background da imagem 
			   printf("noovoo: %f \n", segmeta->class_probabilities_map[classe * segmeta->width * segmeta->height + pix_id_y * segmeta->width + pix_id_x ]);
			}
		}*/	
		
		printf("noovoo classe 0: %f \n", segmeta->class_probabilities_map[classe * segmeta->width * segmeta->height + 0 * segmeta->width + 0 ]);	
		//classe = 1;		
		printf("noovoo classe 1: %f \n", segmeta->class_probabilities_map[classe * segmeta->width * segmeta->height + 0 * segmeta->width + 50 ]);	
		//classe = 2;
		printf("noovoo classe 2: %f \n", segmeta->class_probabilities_map[classe * segmeta->width * segmeta->height + 0 * segmeta->width + 55 ]);	
		//classe = 3;		
		printf("noovoo classe 3: %f \n", segmeta->class_probabilities_map[classe * segmeta->width * segmeta->height + 0 * segmeta->width + 60 ]);	
	

		printf(" classes : %i \n", segmeta->classes);		
		printf(" class_map: %d \n", segmeta->class_map[1]);
						
	}

	
    if (frame_meta == NULL) {
      g_print ("NvDS Meta contained NULL meta \n");

      return GST_PAD_PROBE_OK;
    }
    
  }
	   

    //gst_buffer_unmap (buf, &in_map_info);   
return GST_PAD_PROBE_OK;
}

 
static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      // Add the delay to show the result
      usleep(2000000);
      g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_WARNING:
    {
      gchar *debug;
      GError *error;
      gst_message_parse_warning (msg, &error, &debug);
      g_printerr ("WARNING from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      g_free (debug);
      g_printerr ("Warning: %s\n", error->message);
      g_error_free (error);
      break;
    }
    case GST_MESSAGE_ERROR:
    {
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      if (debug)
        g_printerr ("Error details: %s\n", debug);
      g_free (debug);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
#ifndef PLATFORM_TEGRA
    case GST_MESSAGE_ELEMENT:
    {
      if (gst_nvmessage_is_stream_eos (msg)) {
        guint stream_id;
        if (gst_nvmessage_parse_stream_eos (msg, &stream_id)) {
          g_print ("Got EOS from stream %d\n", stream_id);
        }
      }
      break;
    }
#endif
    default:
      break;
  }
  return TRUE;
}

static GstElement *
create_source_bin (guint index, gchar * uri)
{
  GstElement *bin = NULL;
  gchar bin_name[16] = { };

  g_snprintf (bin_name, 15, "source-bin-%02d", index);
  /* Create a source GstBin to abstract this bin's content from the rest of the
   * pipeline */
  bin = gst_bin_new (bin_name);

  GstElement *source, *jpegparser, *decoder;

  source = gst_element_factory_make ("filesrc", "source");

  jpegparser = gst_element_factory_make ("jpegparse", "jpeg-parser");

  decoder = gst_element_factory_make ("nvv4l2decoder", "nvv4l2-decoder");

  if (!source || !jpegparser || !decoder)
  {
    g_printerr ("One element could not be created. Exiting.\n");
    return NULL;
  }
  g_object_set (G_OBJECT (source), "location", uri, NULL);
  const char *dot = strrchr(uri, '.');
  if ((!strcmp (dot+1, "mjpeg")) || (!strcmp (dot+1, "mjpg")))
  {
#ifdef PLATFORM_TEGRA
    g_object_set (G_OBJECT (decoder), "mjpeg", 1, NULL);
#endif
  }

  gst_bin_add_many (GST_BIN (bin), source, jpegparser, decoder, NULL);

  gst_element_link_many (source, jpegparser, decoder, NULL);

  /* We need to create a ghost pad for the source bin which will act as a proxy
   * for the video decoder src pad. The ghost pad will not have a target right
   * now. Once the decode bin creates the video decoder and generates the
   * cb_newpad callback, we will set the ghost pad target to the video decoder
   * src pad. */
  if (!gst_element_add_pad (bin, gst_ghost_pad_new_no_target ("src",
              GST_PAD_SRC))) {
    g_printerr ("Failed to add ghost pad in source bin\n");
    return NULL;
  }

  GstPad *srcpad = gst_element_get_static_pad (decoder, "src");
  if (!srcpad) {
    g_printerr ("Failed to get src pad of source bin. Exiting.\n");
    return NULL;
  }
  GstPad *bin_ghost_pad = gst_element_get_static_pad (bin, "src");
  if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),
        srcpad)) {
    g_printerr ("Failed to link decoder src pad to source bin ghost pad\n");
  }

  return bin;
}

int
main (int argc, char *argv[])
{
  GMainLoop *loop = NULL;
  GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *seg = NULL,
             *nvsegvisual = NULL, *tiler = NULL;
#ifdef PLATFORM_TEGRA
  GstElement *transform = NULL;
#endif
  GstBus *bus = NULL;
  guint bus_watch_id;
  GstPad *seg_src_pad = NULL;
  GstPad *nvsegvisual_sink_pad = NULL;
  guint i, num_sources;
  guint tiler_rows, tiler_columns;
  guint pgie_batch_size;

  /* Check input arguments */
  if (argc < 3) {
    g_printerr ("Usage: %s config_file <file1> [file2] ... [fileN] \n", argv[0]);
    return -1;
  }
  num_sources = argc - 2;

  /* Standard GStreamer initialization */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  /* Create gstreamer elements */
  /* Create Pipeline element that will form a connection of other elements */
  pipeline = gst_pipeline_new ("dstest-image-decode-pipeline");

  /* Create nvstreammux instance to form batches from one or more sources. */
  streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");

  if (!pipeline || !streammux) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }
  gst_bin_add (GST_BIN (pipeline), streammux);

  for (i = 0; i < num_sources; i++) {
    GstPad *sinkpad, *srcpad;
    gchar pad_name[16] = { };
    GstElement *source_bin = create_source_bin (i, argv[i + 2]);

    if (!source_bin) {
      g_printerr ("Failed to create source bin. Exiting.\n");
      return -1;
    }

    gst_bin_add (GST_BIN (pipeline), source_bin);

    g_snprintf (pad_name, 15, "sink_%u", i);
    sinkpad = gst_element_get_request_pad (streammux, pad_name);
    if (!sinkpad) {
      g_printerr ("Streammux request sink pad failed. Exiting.\n");
      return -1;
    }

    srcpad = gst_element_get_static_pad (source_bin, "src");
    if (!srcpad) {
      g_printerr ("Failed to get src pad of source bin. Exiting.\n");
      return -1;
    }

    if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
      g_printerr ("Failed to link source bin to stream muxer. Exiting.\n");
      return -1;
    }

    gst_object_unref (srcpad);
    gst_object_unref (sinkpad);
  }

  /* Use nvinfer to infer on batched frame. */
  seg = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");

  nvsegvisual = gst_element_factory_make ("nvsegvisual", "nvsegvisual");

  /* Use nvtiler to composite the batched frames into a 2D tiled array based
   * on the source of the frames. */
  tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");

#ifdef PLATFORM_TEGRA
  transform = gst_element_factory_make ("nvegltransform", "transform");
#endif

  sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");

  if (!seg || !nvsegvisual || !tiler || !sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

#ifdef PLATFORM_TEGRA
  if(!transform) {
    g_printerr ("One tegra element could not be created. Exiting.\n");
    return -1;
  }
#endif

  g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
      MUXER_OUTPUT_HEIGHT, "batch-size", num_sources,
      "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);

  /* Configure the nvinfer element using the nvinfer config file. */
  g_object_set (G_OBJECT (seg), "config-file-path", argv[1], NULL);

  /* Override the batch-size set in the config file with the number of sources. */
  g_object_get (G_OBJECT (seg), "batch-size", &pgie_batch_size, NULL);
  if (pgie_batch_size != num_sources) {
    g_printerr
        ("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
        pgie_batch_size, num_sources);
    g_object_set (G_OBJECT (seg), "batch-size", num_sources, NULL);
  }

  g_object_set (G_OBJECT (nvsegvisual), "batch-size", num_sources, NULL);
  g_object_set (G_OBJECT (nvsegvisual), "width", 512, NULL);
  g_object_set (G_OBJECT (nvsegvisual), "height", 512, NULL);

  tiler_rows = (guint) sqrt (num_sources);
  tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);
  /* we set the tiler properties here */
  g_object_set (G_OBJECT (tiler), "rows", tiler_rows, "columns", tiler_columns,
      "width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);

  g_object_set(G_OBJECT(sink), "async", FALSE, NULL);

  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* Set up the pipeline */
  /* Add all elements into the pipeline */
#ifdef PLATFORM_TEGRA
  gst_bin_add_many (GST_BIN (pipeline), seg, nvsegvisual, tiler, transform, sink, NULL);
  /* we link the elements together
   * nvstreammux -> nvinfer -> nvsegvidsual -> nvtiler -> transform -> video-renderer */
  if (!gst_element_link_many (streammux, seg, nvsegvisual, tiler, transform, sink, NULL))
  {
    g_printerr ("Elements could not be linked. Exiting.\n");
    return -1;
  }
#else
  gst_bin_add_many (GST_BIN (pipeline), seg, nvsegvisual, tiler, sink, NULL);
  /* Link the elements together
   * nvstreammux -> nvinfer -> nvsegvisual -> nvtiler -> video-renderer */
  if (!gst_element_link_many (streammux, seg, nvsegvisual, tiler, sink, NULL)) {
    g_printerr ("Elements could not be linked. Exiting.\n");
    return -1;
  }
#endif

  /* Lets add probe to get informed of the meta data generated, we add probe to
   * the src pad of the nvseg element, since by that time, the buffer would have
   * had got all the segmentation metadata. */
  seg_src_pad = gst_element_get_static_pad (seg, "src");
  
  if (!seg_src_pad)
    g_print ("Unable to get src pad\n");
  else
    gst_pad_add_probe (seg_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
        tiler_src_pad_buffer_probe, NULL, NULL);
	
/*  
nvsegvisual_sink_pad = gst_element_get_static_pad (nvsegvisual, "sink");
  if (!nvsegvisual_sink_pad)
    g_print ("Unable to get src pad\n");
  else
    gst_pad_add_probe (nvsegvisual_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
        nvsegvisual_sink_pad_buffer_probe, NULL, NULL);
*/
  /* Set the pipeline to "playing" state */
  g_print ("Now playing:");
  for (i = 0; i < num_sources; i++) {
    g_print (" %s,", argv[i + 2]);
  }
  g_print ("\n");
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* Wait till pipeline encounters an error or EOS */
  g_print ("Running...\n");
  g_main_loop_run (loop);

  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  return 0;
}

deepstream-app --version-all

deepstream-app version 4.0.2
DeepStreamSDK 4.0.2
CUDA Driver Version: 10.0
CUDA Runtime Version: 10.0
TensorRT Version: 6.0
cuDNN Version: 7.6
libNVWarp360 Version: 2.0.0d5

The recognized classes stored in struct NvDsObjectMeta::class_id
https://docs.nvidia.com/metropolis/deepstream/dev-guide/DeepStream%20Development%20Guide/baggage/struct__NvDsObjectMeta.html
and you can refer to sample code sources/apps/sample_apps/deepstream-infer-tensor-meta-test/deepstream_infer_tensor_meta_test.cpp::osd_sink_pad_buffer_probe for how to access it
coordinates of the object stored in NvDsObjectMeta::rect_params, similar way to access it.

→ please ignore this post, it’s for bbox coordinates

Hi, amycao

I copy function osd_sink_pad_buffer_probe that deepstream_infer_tensor_meta_test.cpp file and add in my deepstream_segmentation_test.c file. My deepstream_segmentation_test.c:

/*
 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

#include <gst/gst.h>
#include <glib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#include "gstnvdsinfer.h"
#include "gstnvdsmeta.h"
#include "nvbufsurface.h"
#ifndef PLATFORM_TEGRA
#include "gst-nvmessage.h"

#endif
#define getName(var)  #var
#define MAX_DISPLAY_LEN 64
#define DUMP(varname) fprintf(stderr, "%s = %x", #varname, varname);
#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2

/* The muxer output resolution must be set if the input streams will be of
 * different resolution. The muxer will scale all the input frames to this
 * resolution. */
#define MUXER_OUTPUT_WIDTH 1280
#define MUXER_OUTPUT_HEIGHT 720

/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
 * based on the fastest source's framerate. */
#define MUXER_BATCH_TIMEOUT_USEC 4000000

#define TILED_OUTPUT_WIDTH 512
#define TILED_OUTPUT_HEIGHT 512

gint frame_number = 0;

static GstPadProbeReturn
osd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
    gpointer u_data)
{
  GstBuffer *buf = (GstBuffer *) info->data;
  guint num_rects = 0;
  NvDsObjectMeta *obj_meta = NULL;
  guint vehicle_count = 0;
  guint person_count = 0;
  NvDsMetaList *l_frame = NULL;
  NvDsMetaList *l_obj = NULL;
  NvDsDisplayMeta *display_meta = NULL;

  NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
  printf("osd_sink_pad_buffer_probe \n");
  for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
    int offset = 0;
    for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) {
      obj_meta = (NvDsObjectMeta *) (l_obj->data);	
      printf("class_id: %i\n", obj_meta->class_id);  
     if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) {
        vehicle_count++;
        num_rects++;
      }
      if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
        person_count++;
        num_rects++;
      }
    }
    display_meta = nvds_acquire_display_meta_from_pool (batch_meta);
    NvOSD_TextParams *txt_params = &display_meta->text_params[0];
    display_meta->num_labels = 1;
    txt_params->display_text = (gchar *) g_malloc0 (MAX_DISPLAY_LEN);
    offset =
        snprintf (txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ",
        person_count);
    offset =
        snprintf (txt_params->display_text + offset, MAX_DISPLAY_LEN,
        "Vehicle = %d ", vehicle_count);

    /* Now set the offsets where the string should appear */
    txt_params->x_offset = 10;
    txt_params->y_offset = 12;

    /* Font , font-color and font-size */
    txt_params->font_params.font_name = (gchar *) "Serif";
    txt_params->font_params.font_size = 10;
    txt_params->font_params.font_color.red = 1.0;
    txt_params->font_params.font_color.green = 1.0;
    txt_params->font_params.font_color.blue = 1.0;
    txt_params->font_params.font_color.alpha = 1.0;

    /* Text background color */
    txt_params->set_bg_clr = 1;
    txt_params->text_bg_clr.red = 0.0;
    txt_params->text_bg_clr.green = 0.0;
    txt_params->text_bg_clr.blue = 0.0;
    txt_params->text_bg_clr.alpha = 1.0;

    nvds_add_display_meta_to_frame (frame_meta, display_meta);
  }


  g_print ("Frame Number = %d Number of objects = %d "
      "Vehicle Count = %d Person Count = %d\n",
      frame_number, num_rects, vehicle_count, person_count);
  frame_number++;
  return GST_PAD_PROBE_OK;
}


/* tiler_sink_pad_buffer_probe  will extract metadata received on segmentation
 *  src pad */
static GstPadProbeReturn
tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
    gpointer u_data)
{
  GstBuffer *buf = (GstBuffer *) info->data;
  NvDsObjectMeta *obj_meta = NULL;
  NvDsMetaList * l_frame = NULL;
  NvDsMetaList * l_obj = NULL;
  NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
  FILE *bbox_params_dump_file = NULL;
  //NvDsInferSegmentationMeta *user_meta;
  gchar bbox_file[1024] = { 0 };
  NvDsUserMeta *user_meta = NULL;
  gchar *user_meta_data = NULL;
  NvDsUserMeta *of_user_meta = NULL;
  int classe = 1;	
  for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {	
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
	 
	 for (l_obj = frame_meta->frame_user_meta_list; l_obj != NULL;
        l_obj = l_obj->next) {
	
		of_user_meta = (NvDsUserMeta *)l_obj->data;
		obj_meta = (NvDsObjectMeta *) (l_obj->data);

		printf("class_id:  %i \n", obj_meta->class_id);


	        NvDsInferSegmentationMeta *segmeta = (NvDsInferSegmentationMeta *) (of_user_meta->user_meta_data);		
		
		//Faz o crop da imagem que é mostrada no plugin  		
		//segmeta->width = 100;
		//segmeta->height = 100;		
		/*
		for(int pix_id = 0; pix_id < segmeta->width * segmeta->height; pix_id++) {
		   // altera o background da imagem 
		   //segmeta->class_map[pix_id] = 255;
		}*/
		for(int pix_id_x = 0; pix_id_x < segmeta->width ; pix_id_x++) {
			for(int pix_id = 0; pix_id < segmeta->height ; pix_id++) {
			   // altera o background da imagem 
			   //segmeta->class_map[pix_id] = 255;
			   //printf("Novo class_map: %i \n ", segmeta->class_map[pix_id * segmeta->width + pix_id_x]);
			//segmeta->class_map[pix_id * segmeta->width + pix_id_x] = 25;

			}
		}		
		/*		
		for(int pix_id_x = 0; pix_id_x < segmeta->width  ; pix_id_x++) {		
			for(int pix_id_y = 0; pix_id_y < segmeta->height  ; pix_id_y++) {
			   // altera o background da imagem 
			   printf("noovoo: %f \n", segmeta->class_probabilities_map[classe * segmeta->width * segmeta->height + pix_id_y * segmeta->width + pix_id_x ]);
			}
		}*/	
		
//		printf("noovoo classe 0: %f \n", segmeta->class_probabilities_map[classe * segmeta->width * segmeta->height + 0 * segmeta->width + 0 ]);	
		//classe = 1;		
//		printf("noovoo classe 1: %f \n", segmeta->class_probabilities_map[classe * segmeta->width * segmeta->height + 0 * segmeta->width + 50 ]);	
		//classe = 2;
//		printf("noovoo classe 2: %f \n", segmeta->class_probabilities_map[classe * segmeta->width * segmeta->height + 0 * segmeta->width + 55 ]);	
		//classe = 3;		
//		printf("noovoo classe 3: %f \n", segmeta->class_probabilities_map[classe * segmeta->width * segmeta->height + 0 * segmeta->width + 60 ]);	
	

//		printf(" classes : %i \n", segmeta->classes);		
//		printf(" class_map: %d \n", segmeta->class_map[1]);
						
	}

	
    if (frame_meta == NULL) {
      g_print ("NvDS Meta contained NULL meta \n");

      return GST_PAD_PROBE_OK;
    }
    
  }
	   

    //gst_buffer_unmap (buf, &in_map_info);   
return GST_PAD_PROBE_OK;
}

 
static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      // Add the delay to show the result
      usleep(2000000);
      g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_WARNING:
    {
      gchar *debug;
      GError *error;
      gst_message_parse_warning (msg, &error, &debug);
      g_printerr ("WARNING from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      g_free (debug);
      g_printerr ("Warning: %s\n", error->message);
      g_error_free (error);
      break;
    }
    case GST_MESSAGE_ERROR:
    {
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      if (debug)
        g_printerr ("Error details: %s\n", debug);
      g_free (debug);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
#ifndef PLATFORM_TEGRA
    case GST_MESSAGE_ELEMENT:
    {
      if (gst_nvmessage_is_stream_eos (msg)) {
        guint stream_id;
        if (gst_nvmessage_parse_stream_eos (msg, &stream_id)) {
          g_print ("Got EOS from stream %d\n", stream_id);
        }
      }
      break;
    }
#endif
    default:
      break;
  }
  return TRUE;
}

static GstElement *
create_source_bin (guint index, gchar * uri)
{
  GstElement *bin = NULL;
  gchar bin_name[16] = { };

  g_snprintf (bin_name, 15, "source-bin-%02d", index);
  /* Create a source GstBin to abstract this bin's content from the rest of the
   * pipeline */
  bin = gst_bin_new (bin_name);

  GstElement *source, *jpegparser, *decoder;

  source = gst_element_factory_make ("filesrc", "source");

  jpegparser = gst_element_factory_make ("jpegparse", "jpeg-parser");

  decoder = gst_element_factory_make ("nvv4l2decoder", "nvv4l2-decoder");

  if (!source || !jpegparser || !decoder)
  {
    g_printerr ("One element could not be created. Exiting.\n");
    return NULL;
  }
  g_object_set (G_OBJECT (source), "location", uri, NULL);
  const char *dot = strrchr(uri, '.');
  if ((!strcmp (dot+1, "mjpeg")) || (!strcmp (dot+1, "mjpg")))
  {
#ifdef PLATFORM_TEGRA
    g_object_set (G_OBJECT (decoder), "mjpeg", 1, NULL);
#endif
  }

  gst_bin_add_many (GST_BIN (bin), source, jpegparser, decoder, NULL);

  gst_element_link_many (source, jpegparser, decoder, NULL);

  /* We need to create a ghost pad for the source bin which will act as a proxy
   * for the video decoder src pad. The ghost pad will not have a target right
   * now. Once the decode bin creates the video decoder and generates the
   * cb_newpad callback, we will set the ghost pad target to the video decoder
   * src pad. */
  if (!gst_element_add_pad (bin, gst_ghost_pad_new_no_target ("src",
              GST_PAD_SRC))) {
    g_printerr ("Failed to add ghost pad in source bin\n");
    return NULL;
  }

  GstPad *srcpad = gst_element_get_static_pad (decoder, "src");
  if (!srcpad) {
    g_printerr ("Failed to get src pad of source bin. Exiting.\n");
    return NULL;
  }
  GstPad *bin_ghost_pad = gst_element_get_static_pad (bin, "src");
  if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),
        srcpad)) {
    g_printerr ("Failed to link decoder src pad to source bin ghost pad\n");
  }

  return bin;
}

int
main (int argc, char *argv[])
{
  GMainLoop *loop = NULL;
  GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *seg = NULL,
             *nvsegvisual = NULL, *tiler = NULL;
#ifdef PLATFORM_TEGRA
  GstElement *transform = NULL;
#endif
  GstBus *bus = NULL;
  guint bus_watch_id;
  GstPad *seg_src_pad = NULL;
  GstPad *seg_src_pad2 = NULL;
  GstPad *nvsegvisual_sink_pad = NULL;
  guint i, num_sources;
  guint tiler_rows, tiler_columns;
  guint pgie_batch_size;

  /* Check input arguments */
  if (argc < 3) {
    g_printerr ("Usage: %s config_file <file1> [file2] ... [fileN] \n", argv[0]);
    return -1;
  }
  num_sources = argc - 2;

  /* Standard GStreamer initialization */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  /* Create gstreamer elements */
  /* Create Pipeline element that will form a connection of other elements */
  pipeline = gst_pipeline_new ("dstest-image-decode-pipeline");

  /* Create nvstreammux instance to form batches from one or more sources. */
  streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");

  if (!pipeline || !streammux) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }
  gst_bin_add (GST_BIN (pipeline), streammux);

  for (i = 0; i < num_sources; i++) {
    GstPad *sinkpad, *srcpad;
    gchar pad_name[16] = { };
    GstElement *source_bin = create_source_bin (i, argv[i + 2]);

    if (!source_bin) {
      g_printerr ("Failed to create source bin. Exiting.\n");
      return -1;
    }

    gst_bin_add (GST_BIN (pipeline), source_bin);

    g_snprintf (pad_name, 15, "sink_%u", i);
    sinkpad = gst_element_get_request_pad (streammux, pad_name);
    if (!sinkpad) {
      g_printerr ("Streammux request sink pad failed. Exiting.\n");
      return -1;
    }

    srcpad = gst_element_get_static_pad (source_bin, "src");
    if (!srcpad) {
      g_printerr ("Failed to get src pad of source bin. Exiting.\n");
      return -1;
    }

    if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
      g_printerr ("Failed to link source bin to stream muxer. Exiting.\n");
      return -1;
    }

    gst_object_unref (srcpad);
    gst_object_unref (sinkpad);
  }

  /* Use nvinfer to infer on batched frame. */
  seg = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");

  nvsegvisual = gst_element_factory_make ("nvsegvisual", "nvsegvisual");

  /* Use nvtiler to composite the batched frames into a 2D tiled array based
   * on the source of the frames. */
  tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");

#ifdef PLATFORM_TEGRA
  transform = gst_element_factory_make ("nvegltransform", "transform");
#endif

  sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");

  if (!seg || !nvsegvisual || !tiler || !sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

#ifdef PLATFORM_TEGRA
  if(!transform) {
    g_printerr ("One tegra element could not be created. Exiting.\n");
    return -1;
  }
#endif

  g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
      MUXER_OUTPUT_HEIGHT, "batch-size", num_sources,
      "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);

  /* Configure the nvinfer element using the nvinfer config file. */
  g_object_set (G_OBJECT (seg), "config-file-path", argv[1], NULL);

  /* Override the batch-size set in the config file with the number of sources. */
  g_object_get (G_OBJECT (seg), "batch-size", &pgie_batch_size, NULL);
  if (pgie_batch_size != num_sources) {
    g_printerr
        ("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
        pgie_batch_size, num_sources);
    g_object_set (G_OBJECT (seg), "batch-size", num_sources, NULL);
  }

  g_object_set (G_OBJECT (nvsegvisual), "batch-size", num_sources, NULL);
  g_object_set (G_OBJECT (nvsegvisual), "width", 512, NULL);
  g_object_set (G_OBJECT (nvsegvisual), "height", 512, NULL);

  tiler_rows = (guint) sqrt (num_sources);
  tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);
  /* we set the tiler properties here */
  g_object_set (G_OBJECT (tiler), "rows", tiler_rows, "columns", tiler_columns,
      "width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);

  g_object_set(G_OBJECT(sink), "async", FALSE, NULL);

  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* Set up the pipeline */
  /* Add all elements into the pipeline */
#ifdef PLATFORM_TEGRA
  gst_bin_add_many (GST_BIN (pipeline), seg, nvsegvisual, tiler, transform, sink, NULL);
  /* we link the elements together
   * nvstreammux -> nvinfer -> nvsegvidsual -> nvtiler -> transform -> video-renderer */
  if (!gst_element_link_many (streammux, seg, nvsegvisual, tiler, transform, sink, NULL))
  {
    g_printerr ("Elements could not be linked. Exiting.\n");
    return -1;
  }
#else
  gst_bin_add_many (GST_BIN (pipeline), seg, nvsegvisual, tiler, sink, NULL);
  /* Link the elements together
   * nvstreammux -> nvinfer -> nvsegvisual -> nvtiler -> video-renderer */
  if (!gst_element_link_many (streammux, seg, nvsegvisual, tiler, sink, NULL)) {
    g_printerr ("Elements could not be linked. Exiting.\n");
    return -1;
  }
#endif

  /* Lets add probe to get informed of the meta data generated, we add probe to
   * the src pad of the nvseg element, since by that time, the buffer would have
   * had got all the segmentation metadata. */
  /*
  seg_src_pad = gst_element_get_static_pad (seg, "src");
  
  if (!seg_src_pad){
    g_print ("Unable to get src pad\n");
  }else{
    gst_pad_add_probe (seg_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
        tiler_src_pad_buffer_probe, NULL, NULL);
    gst_pad_add_probe (seg_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
        osd_sink_pad_buffer_probe, NULL, NULL);	
  }*/

  seg_src_pad2 = gst_element_get_static_pad (seg, "src");
  
  if (!seg_src_pad2){
    g_print ("Unable to get src pad\n");
  }else{
    gst_pad_add_probe (seg_src_pad2, GST_PAD_PROBE_TYPE_BUFFER,
        osd_sink_pad_buffer_probe, NULL, NULL);
    /*gst_pad_add_probe (seg_src_pad2, GST_PAD_PROBE_TYPE_BUFFER,
        tiler_src_pad_buffer_probe, NULL, NULL);*/	
  }	
	
/*  
nvsegvisual_sink_pad = gst_element_get_static_pad (nvsegvisual, "sink");
  if (!nvsegvisual_sink_pad)
    g_print ("Unable to get src pad\n");
  else
    gst_pad_add_probe (nvsegvisual_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
        nvsegvisual_sink_pad_buffer_probe, NULL, NULL);
*/
  /* Set the pipeline to "playing" state */
  g_print ("Now playing:");
  for (i = 0; i < num_sources; i++) {
    g_print (" %s,", argv[i + 2]);
  }
  g_print ("\n");
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* Wait till pipeline encounters an error or EOS */
  g_print ("Running...\n");
  g_main_loop_run (loop);

  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  return 0;
}

return command: ./deepstream-segmentation-app dstest_segmentation_config_semantic.txt /opt/nvidia/deepstream/deepstream-4.0/samples/streams/sample_720p.mjpeg is:

Now playing: /opt/nvidia/deepstream/deepstream-4.0/samples/streams/sample_720p.mjpeg,

Using winsys: x11 
Opening in BLOCKING MODE 
0:00:01.599288356 18529   0x559ba502a0 INFO                 nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<primary-nvinference-engine> NvDsInferContext[UID 1]:initialize(): Trying to create engine from model files
0:00:42.180645329 18529   0x559ba502a0 INFO                 nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<primary-nvinference-engine> NvDsInferContext[UID 1]:generateTRTModel(): Storing the serialized cuda engine to file at /opt/nvidia/deepstream/deepstream-4.0/samples/models/Segmentation/semantic/unetres18_v4_pruned0.65_800_data.uff_b1_fp32.engine
Running...
NvMMLiteOpen : Block : BlockType = 277 
NVMEDIA: Reading vendor.tegra.display-size : status: 6 
NvMMLiteBlockCreate : Block : BlockType = 277 
in videoconvert caps = video/x-raw(memory:NVMM), format=(string)RGBA, framerate=(fraction)1/1, width=(int)512, height=(int)512
osd_sink_pad_buffer_probe 
Frame Number = 0 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 1 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 2 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 3 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 4 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 5 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 6 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 7 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 8 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 9 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 10 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 11 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 12 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 13 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 14 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 15 Number of objects = 0 Vehicle Count = 0 Person Count = 0
osd_sink_pad_buffer_probe 
Frame Number = 16 Number of objects = 0 Vehicle Count = 0 Person Count = 0

My problem is not show class_id. Remembering that my network-type=2 in the file dstest_segmentation_config_semantic.txt

the complete file dstest_segmentation_config_semantic.txt:

################################################################################
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################

# Following properties are mandatory when engine files are not specified:
#   int8-calib-file(Only in INT8), model-file-format
#   Caffemodel mandatory properties: model-file, proto-file, output-blob-names
#   UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
#   ONNX: onnx-file
#
# Mandatory properties for detectors:
#   num-detected-classes
#
# Optional properties for detectors:
#   enable-dbscan(Default=false), interval(Primary mode only, Default=0)
#   custom-lib-path,
#   parse-bbox-func-name
#
# Mandatory properties for classifiers:
#   classifier-threshold, is-classifier
#
# Optional properties for classifiers:
#   classifier-async-mode(Secondary mode only, Default=false)
#
# Optional properties in secondary mode:
#   operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
#   input-object-min-width, input-object-min-height, input-object-max-width,
#   input-object-max-height
#
# Following properties are always recommended:
#   batch-size(Default=1)
#
# Other optional properties:
#   net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
#   model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
#   mean-file, gie-unique-id(Default=0), offsets, gie-mode (Default=1 i.e. primary),
#   custom-lib-path, network-mode(Default=0 i.e FP32)
#
# The values in the config file are overridden by values set through GObject
# properties.

[property]
gpu-id=0
net-scale-factor=1.0
model-color-format=0
uff-file=../../../../samples/models/Segmentation/semantic/unetres18_v4_pruned0.65_800_data.uff
uff-input-dims=3;512;512;0
uff-input-blob-name=data
batch-size=1
## 0=FP32, 1=INT8, 2=FP16 mode
network-mode=0
num-detected-classes=6
interval=0
gie-unique-id=1
network-type=2
output-blob-names=final_conv/BiasAdd
#model-engine-file=/opt/nvidia/deepstream/deepstream-4.0/samples/models/Segmentation/semantic/unetres18_v4_pruned0.65_800_data.uff_b1_fp32.engine
segmentation-threshold=0.0
#parse-bbox-func-name=NvDsInferParseCustomSSD
#custom-lib-path=nvdsinfer_custom_impl_ssd/libnvdsinfer_custom_impl_ssd.so

[class-attrs-all]
roi-top-offset=0
roi-bottom-offset=0
detected-min-w=0
detected-min-h=0
detected-max-w=0
detected-max-h=0

## Per class configuration
[class-attrs-2]
threshold=0.6
roi-top-offset=20
roi-bottom-offset=10
detected-min-w=40
detected-min-h=40
detected-max-w=400
detected-max-h=800

Hi
Please ignore comment 2,
please refer to attach_metadata_segmentation() :: class_map

I found the function attach_metadata_segmentation in file /opt/nvidia/deepstream/deepstream-4.0/sources/gst-plugins/gst-nvinfer/gstnvinfer_meta_utils.cpp and understand a litle bit about process for get NvDsInferSegmentationMeta in deepstream_segmentation_test.cpp (I converted for cpp because I want to call opencv functions).

so I save the return of class_map as an image inside the folder to see the segmentation result, but the images I am getting do not leave the segmentation very visible. Can you tell me if I am saving the image correctly? I save imagem in the function tiler_src_pad_buffer_prob

O command used:
./deepstream-segmentation-app dstest_segmentation_config_semantic.txt /opt/nvidia/deepstream/deepstream-4.0/samples/streams/sample_720p.mjpeg
my images imagens-result-segmentation-class_map

my code:

< Blockquote
/*

  • Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
  • Permission is hereby granted, free of charge, to any person obtaining a
  • copy of this software and associated documentation files (the “Software”),
  • to deal in the Software without restriction, including without limitation
  • the rights to use, copy, modify, merge, publish, distribute, sublicense,
  • and/or sell copies of the Software, and to permit persons to whom the
  • Software is furnished to do so, subject to the following conditions:
  • The above copyright notice and this permission notice shall be included in
  • all copies or substantial portions of the Software.
  • THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  • IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  • FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  • THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  • LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  • FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  • DEALINGS IN THE SOFTWARE.
    */

#include <gst/gst.h>
#include <glib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>

#include “gstnvdsinfer.h”
#include “gstnvdsmeta.h”
#include “nvbufsurface.h”
//#ifndef PLATFORM_TEGRA
#include “gst-nvmessage.h”

#include <gst/gst.h>
#include <glib.h>

#include <math.h>

#include <stdio.h>
#include <string.h>
//#include “cuda_runtime_api.h”

//#include <opencv2/objdetect/objdetect.hpp>

/*
#include “gstnvdsmeta.h”
#include “gstnvdsinfer.h”
#include “nvdsinfer_custom_impl.h”
*/

#include “opencv2/core/core.hpp”
#include <opencv2/imgcodecs.hpp>
#include “opencv2/highgui/highgui.hpp”
#include <opencv2/imgproc/imgproc.hpp>
#include
#include <math.h>

#include <opencv2/core/types.hpp>

#include <opencv2/objdetect/objdetect.hpp>

//#endif

#include
#define getName(var) #var
#define MAX_DISPLAY_LEN 64
#define DUMP(varname) fprintf(stderr, “%s = %x”, #varname, varname);
#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2

/* The muxer output resolution must be set if the input streams will be of

  • different resolution. The muxer will scale all the input frames to this
  • resolution. */
    #define MUXER_OUTPUT_WIDTH 1280
    #define MUXER_OUTPUT_HEIGHT 720

/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set

  • based on the fastest source’s framerate. */
    #define MUXER_BATCH_TIMEOUT_USEC 4000000

#define TILED_OUTPUT_WIDTH 512
#define TILED_OUTPUT_HEIGHT 512

gint frame_number = 0;

static GstPadProbeReturn
tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
gpointer u_data)
{
GstBuffer *buf = (GstBuffer *) info->data;

NvDsMetaList * l_frame = NULL;
NvDsMetaList * l_obj = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
NvDsUserMeta *of_user_meta = NULL;

for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);

 for (l_obj = frame_meta->frame_user_meta_list; l_obj != NULL;
    l_obj = l_obj->next) {

	of_user_meta = (NvDsUserMeta *)l_obj->data;		

        NvDsInferSegmentationMeta *segmeta = (NvDsInferSegmentationMeta *) (of_user_meta->user_meta_data);		
	std::time_t t = std::time(0);
	std::string nametext;
	std::string tempo;
        tempo = std::to_string(t);
	
	nametext = "/home/teste/images/"+tempo;		
	nametext = nametext + ".jpg";
					
	cv::Mat img(segmeta->height, segmeta->width, CV_8UC4, segmeta->class_map);
	cv::imwrite(nametext, img);				
					
}

if (frame_meta == NULL) {
  g_print ("NvDS Meta contained NULL meta \n");

  return GST_PAD_PROBE_OK;
}

}
return GST_PAD_PROBE_OK;
}

static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print (“End of stream\n”);
// Add the delay to show the result
usleep(2000000);
g_main_loop_quit (loop);
break;
case GST_MESSAGE_WARNING:
{
gchar *debug;
GError *error;
gst_message_parse_warning (msg, &error, &debug);
g_printerr (“WARNING from element %s: %s\n”,
GST_OBJECT_NAME (msg->src), error->message);
g_free (debug);
g_printerr (“Warning: %s\n”, error->message);
g_error_free (error);
break;
}
case GST_MESSAGE_ERROR:
{
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_printerr (“ERROR from element %s: %s\n”,
GST_OBJECT_NAME (msg->src), error->message);
if (debug)
g_printerr (“Error details: %s\n”, debug);
g_free (debug);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
#ifndef PLATFORM_TEGRA
case GST_MESSAGE_ELEMENT:
{
if (gst_nvmessage_is_stream_eos (msg)) {
guint stream_id;
if (gst_nvmessage_parse_stream_eos (msg, &stream_id)) {
g_print (“Got EOS from stream %d\n”, stream_id);
}
}
break;
}
#endif
default:
break;
}
return TRUE;
}

static GstElement *
create_source_bin (guint index, gchar * uri)
{
GstElement *bin = NULL;
gchar bin_name[16] = { };

g_snprintf (bin_name, 15, “source-bin-%02d”, index);
/* Create a source GstBin to abstract this bin’s content from the rest of the

  • pipeline */
    bin = gst_bin_new (bin_name);

GstElement *source, *jpegparser, *decoder;

source = gst_element_factory_make (“filesrc”, “source”);

jpegparser = gst_element_factory_make (“jpegparse”, “jpeg-parser”);

decoder = gst_element_factory_make (“nvv4l2decoder”, “nvv4l2-decoder”);

if (!source || !jpegparser || !decoder)
{
g_printerr (“One element could not be created. Exiting.\n”);
return NULL;
}
g_object_set (G_OBJECT (source), “location”, uri, NULL);
const char *dot = strrchr(uri, ‘.’);
if ((!strcmp (dot+1, “mjpeg”)) || (!strcmp (dot+1, “mjpg”)))
{
#ifdef PLATFORM_TEGRA
g_object_set (G_OBJECT (decoder), “mjpeg”, 1, NULL);
#endif
}

gst_bin_add_many (GST_BIN (bin), source, jpegparser, decoder, NULL);

gst_element_link_many (source, jpegparser, decoder, NULL);

/* We need to create a ghost pad for the source bin which will act as a proxy

  • for the video decoder src pad. The ghost pad will not have a target right
  • now. Once the decode bin creates the video decoder and generates the
  • cb_newpad callback, we will set the ghost pad target to the video decoder
  • src pad. */
    if (!gst_element_add_pad (bin, gst_ghost_pad_new_no_target (“src”,
    GST_PAD_SRC))) {
    g_printerr (“Failed to add ghost pad in source bin\n”);
    return NULL;
    }

GstPad *srcpad = gst_element_get_static_pad (decoder, “src”);
if (!srcpad) {
g_printerr (“Failed to get src pad of source bin. Exiting.\n”);
return NULL;
}
GstPad *bin_ghost_pad = gst_element_get_static_pad (bin, “src”);
if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),
srcpad)) {
g_printerr (“Failed to link decoder src pad to source bin ghost pad\n”);
}

return bin;
}

int
main (int argc, char *argv)
{

GMainLoop *loop = NULL;
GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *seg = NULL,
*nvsegvisual = NULL, *tiler = NULL;
#ifdef PLATFORM_TEGRA
GstElement *transform = NULL;
#endif

GstBus *bus = NULL;
guint bus_watch_id;
GstPad *seg_src_pad = NULL;
GstPad *seg_src_pad2 = NULL;
GstPad *nvsegvisual_sink_pad = NULL;
guint i, num_sources;
guint tiler_rows, tiler_columns;
guint pgie_batch_size;

/* Check input arguments */
if (argc < 3) {
g_printerr (“Usage: %s config_file [file2] … [fileN] \n”, argv[0]);
return -1;
}
num_sources = argc - 2;

/* Standard GStreamer initialization */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);

/* Create gstreamer elements /
/
Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new (“dstest-image-decode-pipeline”);

/* Create nvstreammux instance to form batches from one or more sources. */
streammux = gst_element_factory_make (“nvstreammux”, “stream-muxer”);

if (!pipeline || !streammux) {
g_printerr (“One element could not be created. Exiting.\n”);
return -1;
}
gst_bin_add (GST_BIN (pipeline), streammux);

for (i = 0; i < num_sources; i++) {
GstPad *sinkpad, *srcpad;
gchar pad_name[16] = { };
GstElement *source_bin = create_source_bin (i, argv[i + 2]);

if (!source_bin) {
  g_printerr ("Failed to create source bin. Exiting.\n");
  return -1;
}

gst_bin_add (GST_BIN (pipeline), source_bin);

g_snprintf (pad_name, 15, "sink_%u", i);
sinkpad = gst_element_get_request_pad (streammux, pad_name);
if (!sinkpad) {
  g_printerr ("Streammux request sink pad failed. Exiting.\n");
  return -1;
}

srcpad = gst_element_get_static_pad (source_bin, "src");
if (!srcpad) {
  g_printerr ("Failed to get src pad of source bin. Exiting.\n");
  return -1;
}

if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
  g_printerr ("Failed to link source bin to stream muxer. Exiting.\n");
  return -1;
}

gst_object_unref (srcpad);
gst_object_unref (sinkpad);

}

/* Use nvinfer to infer on batched frame. */
seg = gst_element_factory_make (“nvinfer”, “primary-nvinference-engine”);

nvsegvisual = gst_element_factory_make (“nvsegvisual”, “nvsegvisual”);

/* Use nvtiler to composite the batched frames into a 2D tiled array based

  • on the source of the frames. */
    tiler = gst_element_factory_make (“nvmultistreamtiler”, “nvtiler”);

#ifdef PLATFORM_TEGRA
transform = gst_element_factory_make (“nvegltransform”, “transform”);
#endif

sink = gst_element_factory_make (“nveglglessink”, “nvvideo-renderer”);

if (!seg || !nvsegvisual || !tiler || !sink) {
g_printerr (“One element could not be created. Exiting.\n”);
return -1;
}

#ifdef PLATFORM_TEGRA
if(!transform) {
g_printerr (“One tegra element could not be created. Exiting.\n”);
return -1;
}
#endif

g_object_set (G_OBJECT (streammux), “width”, MUXER_OUTPUT_WIDTH, “height”,
MUXER_OUTPUT_HEIGHT, “batch-size”, num_sources,
“batched-push-timeout”, MUXER_BATCH_TIMEOUT_USEC, NULL);

/* Configure the nvinfer element using the nvinfer config file. */
g_object_set (G_OBJECT (seg), “config-file-path”, argv[1], NULL);

/* Override the batch-size set in the config file with the number of sources. */
g_object_get (G_OBJECT (seg), “batch-size”, &pgie_batch_size, NULL);
if (pgie_batch_size != num_sources) {
g_printerr
(“WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n”,
pgie_batch_size, num_sources);
g_object_set (G_OBJECT (seg), “batch-size”, num_sources, NULL);
}

g_object_set (G_OBJECT (nvsegvisual), “batch-size”, num_sources, NULL);
g_object_set (G_OBJECT (nvsegvisual), “width”, 512, NULL);
g_object_set (G_OBJECT (nvsegvisual), “height”, 512, NULL);

tiler_rows = (guint) sqrt (num_sources);
tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);
/* we set the tiler properties here */
g_object_set (G_OBJECT (tiler), “rows”, tiler_rows, “columns”, tiler_columns,
“width”, TILED_OUTPUT_WIDTH, “height”, TILED_OUTPUT_HEIGHT, NULL);

g_object_set(G_OBJECT(sink), “async”, FALSE, NULL);

/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);

/* Set up the pipeline /
/
Add all elements into the pipeline /
#ifdef PLATFORM_TEGRA
gst_bin_add_many (GST_BIN (pipeline), seg, nvsegvisual, tiler, transform, sink, NULL);
/
we link the elements together

  • nvstreammux → nvinfer → nvsegvidsual → nvtiler → transform → video-renderer /
    if (!gst_element_link_many (streammux, seg, nvsegvisual, tiler, transform, sink, NULL))
    {
    g_printerr (“Elements could not be linked. Exiting.\n”);
    return -1;
    }
    #else
    gst_bin_add_many (GST_BIN (pipeline), seg, nvsegvisual, tiler, sink, NULL);
    /
    Link the elements together
  • nvstreammux → nvinfer → nvsegvisual → nvtiler → video-renderer */
    if (!gst_element_link_many (streammux, seg, nvsegvisual, tiler, sink, NULL)) {
    g_printerr (“Elements could not be linked. Exiting.\n”);
    return -1;
    }
    #endif

/* Lets add probe to get informed of the meta data generated, we add probe to

  • the src pad of the nvseg element, since by that time, the buffer would have
  • had got all the segmentation metadata. /
    /

    seg_src_pad = gst_element_get_static_pad (seg, “src”);

if (!seg_src_pad){
g_print (“Unable to get src pad\n”);
}else{
gst_pad_add_probe (seg_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
tiler_src_pad_buffer_probe, NULL, NULL);
gst_pad_add_probe (seg_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_sink_pad_buffer_probe, NULL, NULL);
}*/

// seg_src_pad2 = gst_element_get_static_pad (seg, “src”);
seg_src_pad2 = gst_element_get_static_pad (nvsegvisual, “sink”);

if (!seg_src_pad2){
g_print (“Unable to get src pad\n”);
}else{
/gst_pad_add_probe (seg_src_pad2, GST_PAD_PROBE_TYPE_BUFFER,
osd_sink_pad_buffer_probe, NULL, NULL);
/
gst_pad_add_probe (seg_src_pad2, GST_PAD_PROBE_TYPE_BUFFER,
tiler_src_pad_buffer_probe, NULL, NULL);
}

/*
nvsegvisual_sink_pad = gst_element_get_static_pad (nvsegvisual, “sink”);
if (!nvsegvisual_sink_pad)
g_print (“Unable to get src pad\n”);
else
gst_pad_add_probe (nvsegvisual_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
nvsegvisual_sink_pad_buffer_probe, NULL, NULL);
/
/
Set the pipeline to “playing” state */
g_print (“Now playing:”);
for (i = 0; i < num_sources; i++) {
g_print (" %s,“, argv[i + 2]);
}
g_print (”\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);

/* Wait till pipeline encounters an error or EOS */
g_print (“Running…\n”);
g_main_loop_run (loop);

/* Out of the main loop, clean up nicely */
g_print (“Returned, stopping playback\n”);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print (“Deleting pipeline\n”);
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}

class_map store pixel coordinates and class map relations, for example, pixel (x,y), it’s memory index y*width + x, maps to class 1, pixels related to the entire class may be discrete, you can not directly use it like above.
I see you want to save the image using CV for the detected class, does it? if so one way you can try is first to refer to
dsexample plguin about how to save GPU buffer to CPU buffer for CV aacess, then draw pixels by pixels, if current pixel mapped class is not the one you want to save, you can mask it as white.