VPI Image wraper around NvBufSurface on Jetson AGX Xavier

Hello. I have an image in the NvBufSurface, the image is ok. I am trying to build an VPIImage object pointing to this memory in order to make a prerpocessing before the input to AI model.

The image in the NvBufSurface is absolutely ok, because I can do this with the OpenCV library. But it doesn’t work with the VPI library.

The code looks like:

NvBufSurface *surface{};
surface = reinterpret_cast<NvBufSurface *>(map.data); // data was in map.data

// build VPI Image
VPIImage img_vpi = NULL;
VPIImageData img_data = {};
NvBufSurfaceMapParams buffer_params;
int result = NvBufSurfaceGetMapParams(surface, 0, &buffer_params);
assert(result == 0); // it is OK here
assert(buffer_params.fd != 0); // it is OK here

img_data.bufferType = VPI_IMAGE_BUFFER_NVBUFFER;
img_data.buffer.fd = buffer_params.fd;

err_vpi = vpiImageCreateWrapper(&img_data, nullptr, imgFlags, &img_vpi);
assert(err_vpi == VPI_SUCCESS); // it is OK here
std::cout << "img_data.bufferType = " << img_data.bufferType << std::endl; // here output is “5”, what means "VPI_IMAGE_BUFFER_NVBUFFER "

// check some parameters
err_vpi = vpiImageGetSize(img_vpi, &width, &height);
assert(err_vpi == VPI_SUCCESS); // it is OK here
std::cout << "width = " << width << ", height = " << height << std::endl; // ouputs correct values
VPIImageFormat img_fmt;
err_vpi = vpiImageGetFormat(img_vpi, &img_fmt);
assert(err_vpi == VPI_SUCCESS); // it is OK here
std::cout << vpiImageFormatGetDataType(img_fmt) << std::endl; // outputs “1”, what means unsigned int
std::cout << vpiImageFormatGetColorModel(img_fmt) << std::endl; // outputs “2”, RGB
std::cout << vpiImageFormatGetPlaneCount(img_fmt) << std::endl; // outputs “1”
std::cout << vpiImageFormatGetChannelCount(img_fmt) << std::endl; // outputs “4”

// Now I try to access data in the image
VPIImageData outData;
err_vpi = vpiImageLockData(img_vpi, VPI_LOCK_READ, VPI_IMAGE_BUFFER_CUDA_PITCH_LINEAR, &outData);
std::cout << "outData.bufferType = " << outData.bufferType << std::endl; // outputs “2” what is VPI_IMAGE_BUFFER_CUDA_PITCH_LINEAR; but it must be: VPI_IMAGE_BUFFER_NVBUFFER !
assert(err_vpi == VPI_SUCCESS); // No error here
assert(outData.bufferType == VPI_IMAGE_BUFFER_CUDA_PITCH_LINEAR); // No error here, but why is it CUDA_PITCH_LINEAR?
// try to make an output with OpenCV
cv::Mat forOutput;
err_vpi = vpiImageDataExportOpenCVMat(outData, &forOutput);
std::cout << "vpiImageDataExportOpenCVMat status: " << vpiStatusGetName(err_vpi) << std::endl; // VPI_ERROR_INVALID_ARGUMENT
assert(err_vpi == VPI_SUCCESS); // error here

If I do not try to convert to OpenCV and try to use image in the preprocessing function, it is simply empty.

Could you please tell me what I am doing wrong? And why is the buffer type converted to CUDA_PITCH_LINEAR if I explicitly used VPI_IMAGE_BUFFER_NVBUFFER?

Hi,

Please find the below topic for info:

Thanks.

Thank you, but that thread on the forum that you provided is my thread ))) There I thought that it was working because the VPI Status was “VPI_SUCCESS”. However, the image is empty. That is why the problem according the provided link was not solved indeed.

Hi,

Sorry that I don’t notice that.

Would you mind sharing a complete runnable source code with us?
We need to reproduce it locally and check it further.

Thanks.

Hello, the code is here. I read images from gstreamer pipeline to NvBufSurface. And then try to build VPIImage and make some operations with it. But it doesn’t work.
But when I use EGL frames and OpenCV then it is ok.

include
include
include <cudaEGL.h>
include <EGL/eglext.h>
include <gst/gst.h>
include <nvbufsurface.h>
include
include

include <vpi/Image.h>
include <vpi/Status.h>
include <vpi/ImageFormat.h>
include <vpi/Stream.h>
include <vpi/OpenCVInterop.hpp>

include <opencv2/cudaimgproc.hpp>

GstElement* startPipeline(const std::string& pipeline_str);

gboolean bus_callback(GstBus* bus, GstMessage* msg, void* transform)
{
// log bus messages
switch (GST_MESSAGE_TYPE(msg))
{
case GST_MESSAGE_ERROR:
{
GError* err;
gchar* debug_info;
gst_message_parse_error(msg, &err, &debug_info);
g_printerr(“Error received from element %s: %s\n”, GST_OBJECT_NAME(msg->src), err->message);
g_printerr(“Debugging information: %s\n”, debug_info ? debug_info : “none”);
g_clear_error(&err);
g_free(debug_info);
break;
}
case GST_MESSAGE_EOS:
g_print(“End-Of-Stream reached.\n”);
break;
case GST_MESSAGE_WARNING:
{
GError* err;
gchar* debug_info;
gst_message_parse_warning(msg, &err, &debug_info);
g_printerr(“Warning received from element %s: %s\n”, GST_OBJECT_NAME(msg->src), err->message);
g_printerr(“Debugging information: %s\n”, debug_info ? debug_info : “none”);
g_clear_error(&err);
g_free(debug_info);
break;
}
// progress messages
case GST_MESSAGE_BUFFERING:
{
gint percent = 0;
gst_message_parse_buffering(msg, &percent);
g_print(“Buffering (%3d%%)\r”, percent);
break;
}
// state change messages
case GST_MESSAGE_STATE_CHANGED:
{
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed(msg, &old_state, &new_state, &pending_state);
// g_print(“Pipeline state changed from %s to %s:\n”, gst_element_state_get_name(old_state),
// gst_element_state_get_name(new_state));
break;
}
default:
break;
}
// return true to keep the bus callback alive
return true;
}

/*

  • Usage: transform-app video10
  • /
    int main(int argc, char
    argv)
    {
    gst_init(NULL, NULL);
    // set the current plugin path to the current directory
    GstRegistry* registry;
    registry = gst_registry_get();
    auto pluginPath = std::filesystem::current_path().string();
    gst_registry_scan_path(registry, pluginPath.c_str());
    GMainLoop* loop = g_main_loop_new(NULL, FALSE);
    auto nvvidconv = gst_element_factory_make(“nvvidconv”, NULL);
    std::string pipeline_str{};
    if (nvvidconv != nullptr)
    {
    g_print(“Nvidia Video Converter (nvvidconv) plugin is installed. [Platform = TEGRA]\n”);
    pipeline_str = "videotestsrc ! video/x-raw, width=(int)1280,height=(int)720, framerate=(fraction)30/1, format=(string)NV12 "
    "! nvvidconv ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, framerate=(fraction)30/1, format=(string)NV12 "
    “! appsink name=app”;
    gst_object_unref(nvvidconv);
    }
    else
    {
    g_print(“Nvidia Video Converter (nvvidconv) plugin is not installed. [Platform = X86]\n”);
    pipeline_str = "videotestsrc ! video/x-raw, width=(int)1280,height=(int)720, framerate=(fraction)30/1, format=(string)NV12 "
    “! appsink name=app”;
    }
    auto pipeline = startPipeline(pipeline_str);
    g_main_loop_run(loop);
    return 0;
    }

// on_new_sample_from_sink
static GstFlowReturn on_new_sample_from_sink(GstElement* elt, void* data)
{
GstSample* sample;
VPIStatus err_vpi;
int imgFlags = VPI_BACKEND_CPU | VPI_BACKEND_CUDA;
int width, height;
g_signal_emit_by_name(elt, “pull-sample”, &sample);
if (sample)
{
GstBuffer* buffer = gst_sample_get_buffer(sample);
GstMapInfo map;

    if (!gst_buffer_map(buffer, &map, GST_MAP_READ)) {
        gst_buffer_unmap(buffer, &map);
        return GST_FLOW_ERROR;
    }
    std::cout << "got input: " << std::endl;
    NvBufSurface* surface = reinterpret_cast<NvBufSurface*>(map.data);
    // check surface
    assert(surface);
    assert(surface->memType == NvBufSurfaceMemType::NVBUF_MEM_CUDA_UNIFIED || surface->memType == NvBufSurfaceMemType::NVBUF_MEM_SURFACE_ARRAY);
    assert(surface->batchSize == 1);
    assert(surface->numFilled == 1); // all the asserts are ok

    VPIImage img_vpi = NULL;
    VPIImageData img_data = {}; 
    NvBufSurfaceMapParams buffer_params;
    int result = NvBufSurfaceGetMapParams(surface, 0, &buffer_params);
    assert(result == 0);
    assert(buffer_params.fd != 0);

    img_data.bufferType = VPI_IMAGE_BUFFER_NVBUFFER;
    img_data.buffer.fd = buffer_params.fd;

    err_vpi = vpiImageCreateWrapper(&img_data, nullptr, imgFlags, &img_vpi);
    assert(err_vpi == VPI_SUCCESS);
    std::cout << "img_data.bufferType = " << img_data.bufferType << std::endl; // Output: 5
    err_vpi = vpiImageGetSize(img_vpi, &width, &height);
    assert(err_vpi == VPI_SUCCESS);
    std::cout << "width = " << width << ", height = " << height << std::endl;
    VPIImageFormat img_fmt;
    err_vpi = vpiImageGetFormat(img_vpi, &img_fmt);
    assert(err_vpi == VPI_SUCCESS);
    std::cout << "Before preprocessing data type = " << vpiImageFormatGetDataType(img_fmt) << std::endl;
    std::cout << "Before preprocessing Color Model = " << vpiImageFormatGetColorModel(img_fmt) << std::endl;
    std::cout << "Before preprocessing Plane Count = " << vpiImageFormatGetPlaneCount(img_fmt) << std::endl;
    std::cout << "Before preprocessing Channel Count = " << vpiImageFormatGetChannelCount(img_fmt) << std::endl;

    VPIImageData outData;
    err_vpi = vpiImageLockData(img_vpi, VPI_LOCK_READ, VPI_IMAGE_BUFFER_CUDA_PITCH_LINEAR, &outData);
    std::cout << "outData.bufferType = " << outData.bufferType << std::endl; // Output 2
    assert(err_vpi == VPI_SUCCESS);
    std::cout << "Status in main, wrapper around received image: " << vpiStatusGetName(err_vpi) << std::endl; //Output: VPI_SUCCESS

    VPIImage img_vpi_cropped = NULL;
	VPIRectangleI Rect;
	Rect.x =100;
	Rect.y = 200;
	Rect.width = 100;
	Rect.height = 100;
	err_vpi = vpiImageCreateView(img_vpi, &Rect, 0, &img_vpi_cropped); // crop
	std::cout << "Crop status: " << vpiStatusGetName(err_vpi) << std::endl; // Output: VPI_ERROR_INVALID_OPERATION
	assert(err_vpi == VPI_SUCCESS); // failed

    /*cv::Mat forOutput;
    err_vpi = vpiImageDataExportOpenCVMat(outData, &forOutput);
    std::cout << "vpiImageDataExportOpenCVMat status: " << vpiStatusGetName(err_vpi) << std::endl;
    assert(err_vpi == VPI_SUCCESS);*/

    // do something with the buffer
    gst_buffer_unmap(buffer, &map);
    gst_sample_unref(sample);
}
return GST_FLOW_OK;

}

GstElement* startPipeline(const std::string& pipeline_str)
{
GError* err = nullptr;
auto pipeline = gst_parse_launch(pipeline_str.c_str(), &err);
if (err != nullptr)
{
g_printerr(“Failed to parse pipeline: %s\n”, err->message);
g_error_free(err);
exit(1);
}
if (!pipeline)
{
g_printerr(“Could not construct pipeline.\n”);
exit(1);
}
auto bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
gst_bus_add_watch(bus, (GstBusFunc)bus_callback, nullptr);
gst_object_unref(bus);

// register callback for appsink
auto appsink = gst_bin_get_by_name(GST_BIN(pipeline), "app");
g_object_set(appsink, "emit-signals", TRUE, "sync", FALSE, NULL);
g_signal_connect(appsink, "new-sample", G_CALLBACK(on_new_sample_from_sink), nullptr);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
return pipeline;

}