Is code for nvsegvisual plugin available?

Hi,

Is the code for “nvsegvisual” plugin available?

Thank you,

Ondra

Can you share why you need it ?

With DeepStream I want to test different usecase.
My usecase: input is image -> inference -> output is image (same size or larger).
I thought, I can reuse segmentation usecase…
Any thought how to achieve this?

/**
 * Copyright (c) 2018-2019, NVIDIA CORPORATION.  All rights reserved.
 *
 * NVIDIA Corporation and its licensors retain all intellectual property
 * and proprietary rights in and to this software, related documentation
 * and any modifications thereto.  Any use, reproduction, disclosure or
 * distribution of this software and related documentation without an express
 * license agreement from NVIDIA Corporation is strictly prohibited.
 *
 */

#include <string.h>
#include <string>
#include <sstream>
#include <iostream>
#include <ostream>
#include <fstream>
#include <sys/time.h>

#include "gstnvdsbufferpool.h"
#include "gstnvsegvisual.h"
#include "nvbufsurface.h"
#include "gstnvdsmeta.h"
#include "gstnvdsinfer.h"

GST_DEBUG_CATEGORY_STATIC (gst_nvseg_visual_debug);
#define GST_CAT_DEFAULT gst_nvseg_visual_debug

static GQuark _dsmeta_quark = 0;

/* Enum to identify properties */
enum
{
    PROP_0,
    PROP_UNIQUE_ID,
    PROP_GPU_DEVICE_ID,
    PROP_BATCH_SIZE,
    PROP_WIDTH,
    PROP_HEIGHT,
};

/* Default values for properties */
#define DEFAULT_UNIQUE_ID 0
#define DEFAULT_OUTPUT_WIDTH 1280
#define DEFAULT_OUTPUT_HEIGHT 720
#define DEFAULT_GPU_ID 0
#define DEFAULT_GRID_SIZE 0

/* By default NVIDIA Hardware allocated memory flows through the pipeline. We
 * will be processing on this type of memory only. */
#define GST_CAPS_FEATURE_MEMORY_NVMM "memory:NVMM"
static GstStaticPadTemplate gst_nvseg_visual_sink_template =
    GST_STATIC_PAD_TEMPLATE("sink",
                            GST_PAD_SINK,
                            GST_PAD_ALWAYS,
                            GST_STATIC_CAPS(GST_VIDEO_CAPS_MAKE_WITH_FEATURES(
                            "memory:NVMM",
                            "{ NV12, RGBA }")));

static GstStaticPadTemplate gst_nvseg_visual_src_template =
    GST_STATIC_PAD_TEMPLATE("src",
                            GST_PAD_SRC,
                            GST_PAD_ALWAYS,
                            GST_STATIC_CAPS(GST_VIDEO_CAPS_MAKE_WITH_FEATURES(
                            "memory:NVMM",
                            "{ RGBA }")));

/* Define our element type. Standard GObject/GStreamer boilerplate stuff */
#define gst_nvseg_visual_parent_class parent_class
G_DEFINE_TYPE (GstNvSegVisual, gst_nvseg_visual, GST_TYPE_BASE_TRANSFORM);

static void gst_nvseg_visual_set_property (GObject * object, guint prop_id,
    const GValue * value, GParamSpec * pspec);
static void gst_nvseg_visual_get_property (GObject * object, guint prop_id,
    GValue * value, GParamSpec * pspec);

static gboolean gst_nvseg_visual_transform_size(GstBaseTransform* btrans,
        GstPadDirection dir, GstCaps *caps, gsize size, GstCaps* othercaps, gsize* othersize);

static GstCaps* gst_nvseg_visual_fixate_caps(GstBaseTransform* btrans,
        GstPadDirection direction, GstCaps* caps, GstCaps* othercaps);

static gboolean gst_nvseg_visual_set_caps (GstBaseTransform * btrans,
    GstCaps * incaps, GstCaps * outcaps);

static GstCaps* gst_nvseg_visual_transform_caps(GstBaseTransform* btrans, GstPadDirection dir,
    GstCaps* caps, GstCaps* filter);

static gboolean gst_nvseg_visual_start (GstBaseTransform * btrans);
static gboolean gst_nvseg_visual_stop (GstBaseTransform * btrans);

static GstFlowReturn gst_nvseg_visual_transform(GstBaseTransform* btrans,
    GstBuffer* inbuf, GstBuffer* outbuf);

static GstFlowReturn
gst_nvseg_visual_prepare_output_buffer (GstBaseTransform * trans,
    GstBuffer * inbuf, GstBuffer ** outbuf);


/* Install properties, set sink and src pad capabilities, override the required
 * functions of the base class, These are common to all instances of the
 * element.
 */
static void
gst_nvseg_visual_class_init (GstNvSegVisualClass * klass)
{
    GObjectClass *gobject_class;
    GstElementClass *gstelement_class;
    GstBaseTransformClass *gstbasetransform_class;
    gobject_class = (GObjectClass *) klass;
    gstelement_class = (GstElementClass *) klass;
    gstbasetransform_class = (GstBaseTransformClass *) klass;

    /* Overide base class functions */
    gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_nvseg_visual_set_property);
    gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_nvseg_visual_get_property);

    gstbasetransform_class->transform_size = GST_DEBUG_FUNCPTR(gst_nvseg_visual_transform_size);
    gstbasetransform_class->fixate_caps = GST_DEBUG_FUNCPTR (gst_nvseg_visual_fixate_caps);
    gstbasetransform_class->set_caps = GST_DEBUG_FUNCPTR (gst_nvseg_visual_set_caps);
    gstbasetransform_class->transform_caps = GST_DEBUG_FUNCPTR(gst_nvseg_visual_transform_caps);
    gstbasetransform_class->start = GST_DEBUG_FUNCPTR (gst_nvseg_visual_start);
    gstbasetransform_class->stop = GST_DEBUG_FUNCPTR (gst_nvseg_visual_stop);

    gstbasetransform_class->transform = GST_DEBUG_FUNCPTR (gst_nvseg_visual_transform);

    gstbasetransform_class->prepare_output_buffer = GST_DEBUG_FUNCPTR (gst_nvseg_visual_prepare_output_buffer);

    gstbasetransform_class->passthrough_on_same_caps = TRUE;

    /* Install properties */
    g_object_class_install_property (gobject_class, PROP_UNIQUE_ID,
        g_param_spec_uint ("unique-id",
            "Unique ID",
            "Unique ID for the element. Can be used to identify output of the"
            " element", 0, G_MAXUINT, DEFAULT_UNIQUE_ID, (GParamFlags)
            (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

    g_object_class_install_property (gobject_class, PROP_GPU_DEVICE_ID,
        g_param_spec_uint ("gpu-id",
            "Set GPU Device ID",
            "Set GPU Device ID", 0,
            G_MAXUINT, 0,
            GParamFlags
            (G_PARAM_READWRITE |
                G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY)));

    g_object_class_install_property (gobject_class, PROP_BATCH_SIZE,
        g_param_spec_uint ("batch-size", "Batch Size",
            "Maximum batch size for inference",
            1, G_MAXUINT, 1,
            (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
                GST_PARAM_MUTABLE_READY)));

    g_object_class_install_property (gobject_class, PROP_WIDTH,
        g_param_spec_uint ("width", "Width",
            "Width of each frame in output batched buffer.",
            0, G_MAXUINT, DEFAULT_OUTPUT_WIDTH,
            (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

    g_object_class_install_property (gobject_class, PROP_HEIGHT,
        g_param_spec_uint ("height", "Height",
            "Height of each frame in output batched buffer.",
            0, G_MAXUINT, DEFAULT_OUTPUT_HEIGHT,
            (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

    /* Set sink and src pad capabilities */
    gst_element_class_add_pad_template (gstelement_class,
        gst_static_pad_template_get (&gst_nvseg_visual_src_template));
    gst_element_class_add_pad_template (gstelement_class,
        gst_static_pad_template_get (&gst_nvseg_visual_sink_template));

    /* Set metadata describing the element */
    gst_element_class_set_details_simple(gstelement_class,
          "nvsegvisual",
          "nvsegvisual",
          "Gstreamer NV Segmantation Visualization Plugin",
          "NVIDIA Corporation. Post on Deepstream for Jetson/Tesla forum for any queries "
          "@ https://devtalk.nvidia.com/default/board/209/");
}

static void
gst_nvseg_visual_init (GstNvSegVisual * segvisual)
{
    segvisual->sinkcaps =
      gst_static_pad_template_get_caps (&gst_nvseg_visual_sink_template);
    segvisual->srccaps =
      gst_static_pad_template_get_caps (&gst_nvseg_visual_src_template);

    /* Initialize all property variables to default values */
    segvisual->unique_id = DEFAULT_UNIQUE_ID;
    segvisual->output_width = DEFAULT_OUTPUT_WIDTH;
    segvisual->output_height = DEFAULT_OUTPUT_HEIGHT;
    segvisual->gpu_id = DEFAULT_GPU_ID;
    segvisual->batch_size = 1;

#if defined(__aarch64__)
    segvisual->cuda_mem_type = NVBUF_MEM_DEFAULT;
#else
    segvisual->cuda_mem_type = NVBUF_MEM_CUDA_UNIFIED;
#endif

    /* This quark is required to identify NvDsMeta when iterating through
     * the buffer metadatas */
    if (!_dsmeta_quark)
      _dsmeta_quark = g_quark_from_static_string (NVDS_META_STRING);
}

/* Function called when a property of the element is set. Standard boilerplate.
 */
static void
gst_nvseg_visual_set_property (GObject * object, guint prop_id,
    const GValue * value, GParamSpec * pspec)
{
    GstNvSegVisual *segvisual = GST_NV_SEG_VISUAL (object);
    switch (prop_id) {
      case PROP_UNIQUE_ID:
        segvisual->unique_id = g_value_get_uint (value);
        break;
      case PROP_GPU_DEVICE_ID:
        segvisual->gpu_id = g_value_get_uint (value);
        break;
      case PROP_BATCH_SIZE:
        segvisual->batch_size = g_value_get_uint (value);
        break;
      case PROP_WIDTH:
        segvisual->output_width = g_value_get_uint (value);
        break;
      case PROP_HEIGHT:
        segvisual->output_height = g_value_get_uint (value);
        break;
      default:
        G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
        break;
    }
}

/* Function called when a property of the element is requested. Standard
 * boilerplate.
 */
static void
gst_nvseg_visual_get_property (GObject * object, guint prop_id,
    GValue * value, GParamSpec * pspec)
{
    GstNvSegVisual *segvisual = GST_NV_SEG_VISUAL (object);
    switch (prop_id) {
      case PROP_UNIQUE_ID:
        g_value_set_uint (value, segvisual->unique_id);
        break;
      case PROP_GPU_DEVICE_ID:
        g_value_set_uint (value, segvisual->gpu_id);
        break;
      case PROP_BATCH_SIZE:
        g_value_set_uint (value, segvisual->batch_size);
        break;
      case PROP_WIDTH:
        g_value_set_uint (value, segvisual->output_width);
        break;
      case PROP_HEIGHT:
        g_value_set_uint (value, segvisual->output_height);
        break;
      default:
        G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
        break;
    }
}

/**
 * Initialize all resources and start the output thread
 */
static gboolean
gst_nvseg_visual_start(GstBaseTransform *btrans)
{
  GstNvSegVisual *segvisual = GST_NV_SEG_VISUAL (btrans);
  GST_DEBUG_OBJECT (segvisual, "gst_nvseg_visual_start\n");
	return TRUE;
}

/**
 * Stop the output thread and free up all the resources
 */
static gboolean
gst_nvseg_visual_stop (GstBaseTransform * btrans)
{
  GstNvSegVisual *segvisual = GST_NV_SEG_VISUAL (btrans);

  if (segvisual->pool) {
    gst_buffer_pool_set_active (segvisual->pool, FALSE);
    gst_object_unref(segvisual->pool);
    segvisual->pool = NULL;
  }

  GST_DEBUG_OBJECT (segvisual, "gst_nvseg_visual_stop\n");
  return TRUE;
}

static gboolean
gst_nvseg_visual_transform_size(GstBaseTransform* btrans,
        GstPadDirection dir, GstCaps *caps, gsize size, GstCaps* othercaps, gsize* othersize)
{
    gboolean ret = TRUE;
    GstVideoInfo info;

    ret = gst_video_info_from_caps(&info, othercaps);
    if (ret) *othersize = info.size;

    return ret;
}

static GstCaps *
gst_nvseg_visual_transform_caps (GstBaseTransform * btrans,
    GstPadDirection direction, GstCaps * caps, GstCaps * filter)
{
  GstCapsFeatures *feature = NULL;
  GstCaps *new_caps = NULL;

  if (direction == GST_PAD_SINK)
  {
    new_caps = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "RGBA",
          "width", GST_TYPE_INT_RANGE, 1, G_MAXINT, "height", GST_TYPE_INT_RANGE, 1,G_MAXINT, NULL);

  }
  else if (direction == GST_PAD_SRC)
  {
    new_caps = gst_caps_new_simple ("video/x-raw",
          "width", GST_TYPE_INT_RANGE, 1, G_MAXINT, "height", GST_TYPE_INT_RANGE, 1,G_MAXINT, NULL);
  }

  feature = gst_caps_features_new ("memory:NVMM", NULL);
  gst_caps_set_features (new_caps, 0, feature);

  if(gst_caps_is_fixed (caps))
  {
    GstStructure *fs = gst_caps_get_structure (caps, 0);
    const GValue *fps_value;
    guint i, n = gst_caps_get_size(new_caps);

    fps_value = gst_structure_get_value (fs, "framerate");

    // We cannot change framerate
    for (i = 0; i < n; i++)
    {
      fs = gst_caps_get_structure (new_caps, i);
      gst_structure_set_value (fs, "framerate", fps_value);
    }
  }
  return new_caps;
}

/* fixate the caps on the other side */
static GstCaps* gst_nvseg_visual_fixate_caps(GstBaseTransform* btrans,
    GstPadDirection direction, GstCaps* caps, GstCaps* othercaps)
{
  GstNvSegVisual* segvisual = GST_NV_SEG_VISUAL(btrans);
  GstStructure *s2;
  GstCaps* result;

  othercaps = gst_caps_truncate(othercaps);
  othercaps = gst_caps_make_writable(othercaps);
  s2 = gst_caps_get_structure(othercaps, 0);

  {
    /* otherwise the dimension of the output heatmap needs to be fixated */
    gst_structure_fixate_field_nearest_int(s2, "width", segvisual->output_width);
    gst_structure_fixate_field_nearest_int(s2, "height", segvisual->output_height);

    gst_structure_remove_fields (s2, "width", "height", NULL);

    gst_structure_set (s2, "width", G_TYPE_INT, segvisual->output_width,
        "height", G_TYPE_INT, segvisual->output_height, NULL);

    result = gst_caps_ref(othercaps);
  }

  gst_caps_unref(othercaps);

  GST_INFO_OBJECT(segvisual, "CAPS fixate: %" GST_PTR_FORMAT ", direction %d",
      result, direction);

  return result;
}

/**
 * Called when source / sink pad capabilities have been negotiated.
 */
static gboolean
gst_nvseg_visual_set_caps (GstBaseTransform * btrans, GstCaps * incaps,
    GstCaps * outcaps)
{
  GstNvSegVisual *segvisual = GST_NV_SEG_VISUAL (btrans);
  GstStructure *config = NULL;

  /* Save the input video information, since this will be required later. */
  gst_video_info_from_caps(&segvisual->video_info, incaps);

  if (segvisual->batch_size == 0)
  {
    g_print ("NvSegVisual: Received invalid batch_size i.e. 0\n");
    return FALSE;
  }

  if (!gst_video_info_from_caps (&segvisual->out_info, outcaps)) {
    GST_ERROR ("invalid output caps");
    return FALSE;
  }
  segvisual->output_fmt = GST_VIDEO_FORMAT_INFO_FORMAT (segvisual->out_info.finfo);

  if (!segvisual->pool)
  {
    segvisual->pool = gst_nvds_buffer_pool_new ();
    config = gst_buffer_pool_get_config (segvisual->pool);

    g_print ("in videoconvert caps = %s\n", gst_caps_to_string(outcaps));
    gst_buffer_pool_config_set_params (config, outcaps, sizeof (NvBufSurface), 4, 4); // TODO: remove 4 hardcoding

    gst_structure_set (config,
        "memtype", G_TYPE_UINT, segvisual->cuda_mem_type,
        "gpu-id", G_TYPE_UINT, segvisual->gpu_id,
        "batch-size", G_TYPE_UINT, segvisual->batch_size, NULL);

    GST_INFO_OBJECT (segvisual, " %s Allocating Buffers in NVM Buffer Pool for Max_Views=%d\n",
        __func__, segvisual->batch_size);

    /* set config for the created buffer pool */
    if (!gst_buffer_pool_set_config (segvisual->pool, config)) {
      GST_WARNING ("bufferpool configuration failed");
      return FALSE;
    }

    gboolean is_active = gst_buffer_pool_set_active (segvisual->pool, TRUE);
    if (!is_active) {
      GST_WARNING (" Failed to allocate the buffers inside the output pool");
      return FALSE;
    } else {
      GST_DEBUG (" Output buffer pool (%p) successfully created",
                  segvisual->pool);
    }
  }

  return TRUE;
}

static GstFlowReturn
gst_nvseg_visual_prepare_output_buffer (GstBaseTransform * trans,
    GstBuffer * inbuf, GstBuffer ** outbuf)
{
  GstBuffer *gstOutBuf = NULL;
  GstFlowReturn result = GST_FLOW_OK;
  GstNvSegVisual *segvisual = GST_NV_SEG_VISUAL (trans);

  result = gst_buffer_pool_acquire_buffer (segvisual->pool, &gstOutBuf, NULL);
  GST_DEBUG_OBJECT (segvisual, "%s : Frame=%lu Gst-OutBuf=%p\n",
		  __func__, segvisual->frame_num, gstOutBuf);

  if (result != GST_FLOW_OK)
  {
    GST_ERROR_OBJECT (segvisual, "gst_segvisual_prepare_output_buffer failed");
    return result;
  }

  *outbuf = gstOutBuf;
  return result;
}

/* For segmentation visulization */
static unsigned char class2BGR[] = {
  0, 0, 0,        0, 0, 128,      128, 128, 128,
  0, 128, 128,    128, 0, 0,      128, 0, 128,
  128, 128, 0,    0, 128, 0,      0, 0, 64,
  0, 0, 192,      0, 128, 64,     0, 128, 192,
  128, 0, 64,     128, 0, 192,    128, 128, 64,
  128, 128, 192,  0, 64, 0,       0, 64, 128,
  0, 192, 0,     0, 192, 128,    128, 64, 0,
  192, 192, 0
};

static void overlayColor(int* mask, unsigned char* buffer,
                             int height, int width,
                             int stream_num, int frame_num)
{
  unsigned char* buffer_R;
  unsigned char* buffer_G;
  unsigned char* buffer_B;
  unsigned char* buffer_A;

  for(int pix_id = 0; pix_id < width * height; pix_id++) {
    unsigned char* color = class2BGR + ((mask[pix_id] + 3) * 3);
    buffer_B = buffer + pix_id * 4;
    buffer_G = buffer + pix_id * 4 + 1;
    buffer_R = buffer + pix_id * 4 + 2;
    buffer_A = buffer + pix_id * 4 + 3;
    *buffer_B = color[0];
    *buffer_G = color[1];
    *buffer_R = color[2];
    *buffer_A = 255;
  }

#if 0
  char file_name[128];
  sprintf(file_name, "dump_map_stream%d_frame%03d.rgba", stream_num, frame_num);
  FILE* fp = fopen(file_name, "ab");
  fwrite(buffer, 4*height*width, 1, fp);
  fclose(fp);
#endif
}

static GstFlowReturn
gst_nvseg_visual_transform_internal(GstBaseTransform *btrans,
                                       GstBuffer *inbuf, GstBuffer *outbuf)
{
  GstNvSegVisual *segvisual = GST_NV_SEG_VISUAL (btrans);
  GstFlowReturn flow_ret = GST_FLOW_OK;
  gpointer state = NULL;
  gboolean of_metadata_found = FALSE;
  GstMeta *gst_meta = NULL;
  NvDsMeta *dsmeta = NULL;
  NvDsBatchMeta *batch_meta = NULL;
  guint i = 0;

  GstMapInfo outmap;
  if (!gst_buffer_map (outbuf, &outmap, GST_MAP_WRITE))
  {
	  g_print ("%s output buf map failed\n", __func__);
	  return GST_FLOW_ERROR;
  }

  NvBufSurface *dstSurf = (NvBufSurface *)outmap.data;
  gst_buffer_unmap (outbuf, &outmap);

  // Required in the case of tiler
  if (!gst_buffer_copy_into (outbuf, inbuf, GST_BUFFER_COPY_META, 0, -1)) {
	  GST_DEBUG ("Buffer metadata copy failed \n");
  }

  if (cudaSetDevice(segvisual->gpu_id) != cudaSuccess)
  {
    g_printerr("Error: failed to set GPU to %d\n", segvisual->gpu_id);
    return GST_FLOW_ERROR;
  }

  while ((gst_meta = gst_buffer_iterate_meta (inbuf, &state)))
  {
    if (gst_meta_api_type_has_tag(gst_meta->info->api, _dsmeta_quark))
    {
      dsmeta = (NvDsMeta *) gst_meta;
      if (dsmeta->meta_type == NVDS_BATCH_GST_META) {
        batch_meta = (NvDsBatchMeta *)dsmeta->meta_data;
        break;
      }
    }
  }

  if (batch_meta == NULL)
  {
    g_print ("batch_meta not found, skipping optical flow visual draw execution\n");
    return GST_FLOW_ERROR;
  }

  dstSurf->numFilled = batch_meta->num_frames_in_batch;

  // TODO: Improve by mapping and unmapping buffer at the time of buffer creation
  NvBufSurfaceMap (dstSurf, -1, -1, NVBUF_MAP_WRITE);

  static int frame_n = 0;
  for (i=0; i < batch_meta->num_frames_in_batch; i++)
  {
    NvDsFrameMeta *frame_meta = nvds_get_nth_frame_meta (batch_meta->frame_meta_list, i);
    if (frame_meta->frame_user_meta_list)
    {
      NvDsFrameMetaList *fmeta_list = NULL;
      NvDsUserMeta *of_user_meta = NULL;

      for (fmeta_list = frame_meta->frame_user_meta_list; fmeta_list != NULL; fmeta_list = fmeta_list->next)
      {
        of_user_meta = (NvDsUserMeta *)fmeta_list->data;
        if (of_user_meta && of_user_meta->base_meta.meta_type == NVDSINFER_SEGMENTATION_META) {
          NvDsInferSegmentationMeta *segmeta = (NvDsInferSegmentationMeta *) (of_user_meta->user_meta_data);
          GST_DEBUG("classes/width/height=%d/%d/%d\n",
                    segmeta->classes,
                    segmeta->width,
                    segmeta->height);
          GST_DEBUG("dstSurf [%d] dataSize=%d\n", i, dstSurf->surfaceList[i].dataSize);
          int rgba_bytes = 4;
          unsigned char* buffer = (unsigned char*)(malloc(rgba_bytes * segmeta->height * segmeta->width));
          overlayColor(segmeta->class_map, buffer, segmeta->height, segmeta->width, i, frame_n);

#if defined(__aarch64__)
          for (unsigned int h = 0; h < segmeta->height; h++) {
            memcpy((char *)dstSurf->surfaceList[i].mappedAddr.addr[0] +
                       h * dstSurf->surfaceList[i].planeParams.pitch[0],
                   buffer + h * segmeta->width * 4,
                   segmeta->width * 4);
          }
#else
          cudaMemcpy((void*)dstSurf->surfaceList[i].mappedAddr.addr[0],
                     (void*)buffer,
                     rgba_bytes * segmeta->height * segmeta->width,
                     cudaMemcpyHostToDevice);
#endif

          free(buffer);
        }
      }
    }
  }
  frame_n++;

  NvBufSurfaceSyncForDevice (dstSurf, -1, -1);
  NvBufSurfaceUnMap (dstSurf, -1, -1);

  if (of_metadata_found == FALSE)
  {
    GST_WARNING_OBJECT (segvisual, "SEG METADATA NOT FOUND\n");
  }
  return flow_ret;
}

/**
 * Called when the plugin works in non-passthough mode
 */
static GstFlowReturn
gst_nvseg_visual_transform(GstBaseTransform* btrans, GstBuffer* inbuf, GstBuffer* outbuf)
{
  return gst_nvseg_visual_transform_internal(btrans, inbuf, outbuf);
}

/**
 * Boiler plate for registering a plugin and an element.
 */
static gboolean
nvseg_visual_plugin_init (GstPlugin * plugin)
{
  GST_DEBUG_CATEGORY_INIT (gst_nvseg_visual_debug, "nvsegvisual", 0,
      "nvsegvisual plugin");

  return gst_element_register (plugin, "nvsegvisual", GST_RANK_PRIMARY,
          GST_TYPE_NV_SEG_VISUAL);
}

GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
    GST_VERSION_MINOR,
    nvdsgst_segvisual,
    DESCRIPTION, nvseg_visual_plugin_init, "4.0", LICENSE, BINARY_PACKAGE, URL)

gstnvsegvisual.h (2.86 KB)

gstnvdsbufferpool.h (1.47 KB)

Thank you!

Hi, I am also trying to modify the nvsegvisual to make it display overlay instead of mask.
When I try to compile the code, I found I need some header files such as “gstnvdsbufferpool.h” and “gstnvsegvisual.h”. Can you share these files? Thanks.

Hi jason.1.wong,

Sorry for the late ply, if this still an issue at your side, please help to file a new topic. Thanks

Hi, I have the same problem, where can I find the files “gstnvdsbufferpool.h” and “gstnvsegvisual.h”?
Thanks.

Hi fgarciat6h3j,

Please open a new topic for your issue. Thanks

Hi, kayccc

I run the above code of nvsegvisual plugin and reported the following error:

gstnvsegvisual.o: In function gst_nvseg_visual_set_caps(_GstBaseTransform*, _GstCaps*, _GstCaps*)': gstnvsegvisual.cpp:(.text+0xe98): undefined reference to gst_nvds_buffer_pool_new’
collect2: error: ld returned 1 exit status


I modified the gstnvsegvision.cpp file:

Segvisual - > pool = gst_buffer_pool_new ();
-> segvisu-> pool = gst_nvds_buffer_pool_new ();

After modification can run deepstream-segmentation-test


However, the following error is reported:

In cb_newpad
In cb_newpad
in videoconvert caps = video/x-raw(memory:NVMM), format=(string)RGBA, framerate=(fraction)30/1, width=(int)512, height=(int)512
In cb_newpad
In cb_newpad
nvbufsurface: mapping of memory type (0) not supported
classes/width/height=4/512/512
Segmentation fault (core dumped)

How to solve this problem?
nvbufsurface: mapping of memory type (0) not supported, thanks

Hi tangfei11,

Let’s separate your issue from the original one, please open a new topic, thanks.

Use -lnvdsbufferpool solve the problem. (libnvdsbufferpool.so located in …/deepstream-5.0/lib)