Struggling to convert a camera frame (NvBufSurface) to OpenCV Mat

Hello everyone,

I am trying to convert a camera frame (NvBufSurface) to OpenCV Mat. I have read many similar questions in this forum, but I sadly have not found a way to fix my issue. I have an IMX477 camera connected to an Jetson Orin NX and want to read the camera frames to use them in OpenCV without wasting much performance and adding latency (which I read about gstreamer).

I found out that the buffer I get from the camera is not contiguous, I have 1 filled buffer of type NvBufSurfaceMemType::NVBUF_MEM_SURFACE_ARRAY and NvBufSurfaceColorFormat::NVBUF_COLOR_FORMAT_NV12

I started with example 10_argus_camera_recording, which I tried to modify to get my OpenCV Mat out:

#include "Error.h"
#include "Thread.h"
#include "nvmmapi/NvNativeBuffer.h"
#include <Argus/Argus.h>
#include <NvVideoEncoder.h>
#include "NvBufSurface.h"
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <thread>
#include <opencv2/opencv.hpp>
#include <chrono>
#include <iostream>
#include <thread>

using namespace Argus;

/* Constant configuration */
static const int    MAX_ENCODER_FRAMES = 5;
static const int    DEFAULT_FPS        = 30;
static const int    Y_INDEX            = 0;
static const int    START_POS          = 32;
static const int    FONT_SIZE          = 64;
static const int    SHIFT_BITS         = 3;
static const int    array_n[8][8] = {
    { 1, 1, 0, 0, 0, 0, 1, 1 },
    { 1, 1, 1, 0, 0, 0, 1, 1 },
    { 1, 1, 1, 1, 0, 0, 1, 1 },
    { 1, 1, 1, 1, 1, 0, 1, 1 },
    { 1, 1, 0, 1, 1, 1, 1, 1 },
    { 1, 1, 0, 0, 1, 1, 1, 1 },
    { 1, 1, 0, 0, 0, 1, 1, 1 },
    { 1, 1, 0, 0, 0, 0, 1, 1 }
};

/* This value is tricky.
   Too small value will impact the FPS */
static const int    NUM_BUFFERS        = 10;

/* Configurations which can be overrided by cmdline */
static int          CAPTURE_TIME = 5; // In seconds.
static uint32_t     CAMERA_INDEX = 0;
static Size2D<uint32_t> STREAM_SIZE (1920, 1080);
static std::string  OUTPUT_FILENAME ("output.h264");
static uint32_t     ENCODER_PIXFMT = V4L2_PIX_FMT_H264;
static bool         VERBOSE_ENABLE = true;
static bool         DO_CPU_PROCESS = false;

/* Debug print macros */
#define PRODUCER_PRINT(...) printf("PRODUCER: " __VA_ARGS__)
#define CONSUMER_PRINT(...) printf("CONSUMER: " __VA_ARGS__)
#define CHECK_ERROR(expr) \
    do { \
        if ((expr) < 0) { \
            abort(); \
            ORIGINATE_ERROR(#expr " failed"); \
        } \
    } while (0);

namespace ArgusSamples {

/*
   Helper class to map NvNativeBuffer to Argus::Buffer and vice versa.
   A reference to DmaBuffer will be saved as client data in each Argus::Buffer.
   Also DmaBuffer will keep a reference to corresponding Argus::Buffer.
   This class also extends NvBuffer to act as a share buffer between Argus and V4L2 encoder.
*/
class DmaBuffer : public NvNativeBuffer, public NvBuffer
{
public:
    /* Always use this static method to create DmaBuffer */
    static DmaBuffer* create(const Argus::Size2D<uint32_t>& size,
                             NvBufSurfaceColorFormat colorFormat,
                             NvBufSurfaceLayout layout = NVBUF_LAYOUT_PITCH)
    {
        DmaBuffer* buffer = new DmaBuffer(size);
        if (!buffer)
            return NULL;

        NvBufSurf::NvCommonAllocateParams cParams;

        cParams.memtag = NvBufSurfaceTag_CAMERA;
        cParams.width = size.width();
        cParams.height = size.height();
        cParams.colorFormat = colorFormat;
        cParams.layout = layout;
        cParams.memType = NVBUF_MEM_SURFACE_ARRAY;

        if (NvBufSurf::NvAllocate(&cParams, 1, &buffer->m_fd))
        {
            delete buffer;
            return NULL;
        }

        /* save the DMABUF fd in NvBuffer structure */
        buffer->planes[0].fd = buffer->m_fd;
        /* byteused must be non-zero for a valid buffer */
        buffer->planes[0].bytesused = 1;

        return buffer;
    }

    /* Help function to convert Argus Buffer to DmaBuffer */
    static DmaBuffer* fromArgusBuffer(Buffer *buffer)
    {
        IBuffer* iBuffer = interface_cast<IBuffer>(buffer);
        const DmaBuffer *dmabuf = static_cast<const DmaBuffer*>(iBuffer->getClientData());

        return const_cast<DmaBuffer*>(dmabuf);
    }

    /* Return DMA buffer handle */
    int getFd() const { return m_fd; }

    /* Get and set reference to Argus buffer */
    void setArgusBuffer(Buffer *buffer) { m_buffer = buffer; }
    Buffer *getArgusBuffer() const { return m_buffer; }

private:
    DmaBuffer(const Argus::Size2D<uint32_t>& size)
        : NvNativeBuffer(size),
          NvBuffer(0, 0),
          m_buffer(NULL)
    {
    }

    Buffer *m_buffer;   /* Reference to Argus::Buffer */
};

void nv12ToMat(NvBufSurface *nvBuf, cv::Mat &outMat) {
    // Ensure the buffer is mapped
    if (NvBufSurfaceMap(nvBuf, 0, 0, NVBUF_MAP_READ) != 0) {
        std::cerr << "Failed to map NvBufSurface." << std::endl;
        return;
    }
        // Sync for CPU read
    if (NvBufSurfaceSyncForCpu(nvBuf, 0, 0) != 0) {
        std::cerr << "Failed to sync NvBufSurface for CPU." << std::endl;
        return;
    }

    // Pointer to Y data
    unsigned char *y_ptr = (unsigned char *)nvBuf->surfaceList[0].mappedAddr.addr[0];
    int y_pitch = nvBuf->surfaceList[0].pitch;

    // Pointer to UV data
    unsigned char *uv_ptr = (unsigned char *)nvBuf->surfaceList[0].mappedAddr.addr[1];
    int uv_pitch = nvBuf->surfaceList[0].pitch;

    int width = nvBuf->surfaceList[0].width;
    int height = nvBuf->surfaceList[0].height;


    // Create a Mat containing the Y plane
    cv::Mat y_mat(height, width, CV_8UC1, y_ptr, y_pitch);

    // Create a Mat containing the UV plane
    cv::Mat uv_mat(height / 2, width / 2, CV_8UC2, uv_ptr, uv_pitch);

    // Output Mat
    outMat.create(height, width, CV_8UC3);

    // Convert NV12 to BGR
    cv::cvtColorTwoPlane(y_mat, uv_mat, outMat, cv::COLOR_YUV2BGR_NV12);

    // Unmap the buffer
    NvBufSurfaceUnMap(nvBuf, 0, 0);
}

cv::Mat NvBufSurfaceToMat(NvBufSurface* surface) {
  // Map the buffer so that it can be accessed by CPU
  if (NvBufSurfaceMap (surface, 0, 0, NVBUF_MAP_READ) != 0){
    std::cout << "NvBufSurfaceMap did not work" << std::endl;
  }

  // Cache the mapped data for CPU access
  NvBufSurfaceSyncForCpu (surface, 0, 0);

  // Use openCV to remove padding and convert RGBA to BGR. Can be skipped if
  // algorithm can handle padded RGBA data.
  return cv::Mat (1920, 1080,
      CV_8UC4, surface->surfaceList[0].mappedAddr.addr[0],
      surface->surfaceList[0].pitch);
}

/**
 * Argus Producer thread:
 *   Opens the Argus camera driver, creates an BufferOutputStream to output
 *   frames, then performs repeating capture requests for CAPTURE_TIME
 *   seconds before closing the producer and Argus driver.
 */
static bool execute(){
    NvBufSurface *surf[NUM_BUFFERS] = {0};

     /* Create the CameraProvider object and get the core interface */
    UniqueObj<CameraProvider> cameraProvider = UniqueObj<CameraProvider>(CameraProvider::create());
    ICameraProvider *iCameraProvider = interface_cast<ICameraProvider>(cameraProvider);
    if (!iCameraProvider)
        ORIGINATE_ERROR("Failed to create CameraProvider");

    /* Get the camera devices */
    std::vector<CameraDevice*> cameraDevices;
    iCameraProvider->getCameraDevices(&cameraDevices);
    if (cameraDevices.size() == 0)
        ORIGINATE_ERROR("No cameras available");

    if (CAMERA_INDEX >= cameraDevices.size())
    {
        PRODUCER_PRINT("CAMERA_INDEX out of range. Fall back to 0\n");
        CAMERA_INDEX = 0;
    }

    /* Create the capture session using the first device and get the core interface */
    UniqueObj<CaptureSession> captureSession(
            iCameraProvider->createCaptureSession(cameraDevices[CAMERA_INDEX]));
    ICaptureSession *iCaptureSession = interface_cast<ICaptureSession>(captureSession);
    if (!iCaptureSession)
        ORIGINATE_ERROR("Failed to get ICaptureSession interface");

    /* Create the OutputStream */
    PRODUCER_PRINT("Creating output stream\n");
    UniqueObj<OutputStreamSettings> streamSettings(
        iCaptureSession->createOutputStreamSettings(STREAM_TYPE_BUFFER));
    IBufferOutputStreamSettings *iStreamSettings =
        interface_cast<IBufferOutputStreamSettings>(streamSettings);
    if (!iStreamSettings)
        ORIGINATE_ERROR("Failed to get IBufferOutputStreamSettings interface");

    /* Configure the OutputStream to use the EGLImage BufferType */
    iStreamSettings->setBufferType(BUFFER_TYPE_EGL_IMAGE);

    /* Create the OutputStream */
    UniqueObj<OutputStream> outputStream(iCaptureSession->createOutputStream(streamSettings.get()));
    IBufferOutputStream *iBufferOutputStream = interface_cast<IBufferOutputStream>(outputStream);

    /* Allocate native buffers */
    DmaBuffer* nativeBuffers[NUM_BUFFERS];

    for (uint32_t i = 0; i < NUM_BUFFERS; i++)
    {
        nativeBuffers[i] = DmaBuffer::create(STREAM_SIZE, NVBUF_COLOR_FORMAT_NV12,
                    DO_CPU_PROCESS ? NVBUF_LAYOUT_PITCH : NVBUF_LAYOUT_BLOCK_LINEAR);
        if (!nativeBuffers[i])
            ORIGINATE_ERROR("Failed to allocate NativeBuffer");
    }

    /* Create EGLImages from the native buffers */
    EGLImageKHR eglImages[NUM_BUFFERS];
    for (uint32_t i = 0; i < NUM_BUFFERS; i++)
    {
        int ret = 0;

        ret = NvBufSurfaceFromFd(nativeBuffers[i]->getFd(), (void**)(&surf[i]));
        if (ret)
            ORIGINATE_ERROR("%s: NvBufSurfaceFromFd failed\n", __func__);

        ret = NvBufSurfaceMapEglImage (surf[i], 0);
        if (ret)
            ORIGINATE_ERROR("%s: NvBufSurfaceMapEglImage failed\n", __func__);

        eglImages[i] = surf[i]->surfaceList[0].mappedAddr.eglImage;
        if (eglImages[i] == EGL_NO_IMAGE_KHR)
            ORIGINATE_ERROR("Failed to create EGLImage");
    }

    /* Create the BufferSettings object to configure Buffer creation */
    UniqueObj<BufferSettings> bufferSettings(iBufferOutputStream->createBufferSettings());
    IEGLImageBufferSettings *iBufferSettings =
        interface_cast<IEGLImageBufferSettings>(bufferSettings);
    if (!iBufferSettings)
        ORIGINATE_ERROR("Failed to create BufferSettings");

    /* Create the Buffers for each EGLImage (and release to
       stream for initial capture use) */
    UniqueObj<Buffer> buffers[NUM_BUFFERS];
    for (uint32_t i = 0; i < NUM_BUFFERS; i++)
    {
        iBufferSettings->setEGLImage(eglImages[i]);
        buffers[i].reset(iBufferOutputStream->createBuffer(bufferSettings.get()));
        IBuffer *iBuffer = interface_cast<IBuffer>(buffers[i]);

        /* Reference Argus::Buffer and DmaBuffer each other */
        iBuffer->setClientData(nativeBuffers[i]);
        nativeBuffers[i]->setArgusBuffer(buffers[i].get());

        if (!interface_cast<IEGLImageBuffer>(buffers[i]))
            ORIGINATE_ERROR("Failed to create Buffer");
        if (iBufferOutputStream->releaseBuffer(buffers[i].get()) != STATUS_OK)
            ORIGINATE_ERROR("Failed to release Buffer for capture use");
    }

    /* Create capture request and enable output stream */
    UniqueObj<Request> request(iCaptureSession->createRequest());
    IRequest *iRequest = interface_cast<IRequest>(request);
    if (!iRequest)
        ORIGINATE_ERROR("Failed to create Request");
    iRequest->enableOutputStream(outputStream.get());

    ISourceSettings *iSourceSettings = interface_cast<ISourceSettings>(iRequest->getSourceSettings());
    if (!iSourceSettings)
        ORIGINATE_ERROR("Failed to get ISourceSettings interface");
    iSourceSettings->setFrameDurationRange(Range<uint64_t>(1e9/DEFAULT_FPS));

    /* Submit capture requests */
    PRODUCER_PRINT("Starting repeat capture requests.\n");
    if (iCaptureSession->repeat(request.get()) != STATUS_OK)
        ORIGINATE_ERROR("Failed to start repeat capture request");

    PRODUCER_PRINT("Created repeat capture requests.\n");

    /* Capture and process frames */
    while (true) {  // TODO Change
        Argus::Status status = STATUS_OK;
        PRODUCER_PRINT("START of loop\n");
        Buffer* buffer = iBufferOutputStream->acquireBuffer(TIMEOUT_INFINITE, &status);
        if (status == STATUS_END_OF_STREAM) {
            break;
        }
        PRODUCER_PRINT("Acquired buffer\n");

        // Get the DmaBuffer and raw frame data
        DmaBuffer *dmabuf = DmaBuffer::fromArgusBuffer(buffer);
        int dmabuf_fd = dmabuf->getFd();
        NvBufSurface *surf;
        if (NvBufSurfaceFromFd(dmabuf_fd, (void**)(&surf))) {
            ORIGINATE_ERROR("Failed to get NvBufSurface from dmabuf_fd");
            break;
        }

        // NvBufSurfaceSyncForCpu(surf, 0, 0);

        // int width = surf->surfaceList[0].width;
        // int height = surf->surfaceList[0].height;
        // int pitch = surf->surfaceList[0].pitch; // Get the pitch
        // unsigned char* y_plane = (unsigned char*)surf->surfaceList[0].mappedAddr.addr;
        // unsigned char* uv_plane = (unsigned char*)surf->surfaceList[1].mappedAddr.addr;

        // std::cout << "Width: " << width << ", Height: " << height << std::endl;
        // std::cout << "Y Plane Address: " << (void*)y_plane << std::endl;

        // // Correct memory allocation for YUV420 (NV12)
        // cv::Mat yuv_mat(100*height, 100*width, CV_8UC1);
        // // Copy Y plane data with size check
        // // Calculate Y plane size with pitch
        // size_t y_plane_size = pitch * height; 
        // std::cout << "Copying Y plane data: " << y_plane_size << " bytes" << std::endl;

        // memcpy(yuv_mat.data, y_plane, width * height);


        // PRODUCER_PRINT("Memcopy\n");

        // // Copy UV plane data (consider subsampling)
        // for (int i = 0; i < height / 2; ++i) {
        //     memcpy(yuv_mat.data + width * height + i * width, 
        //         uv_plane + i * width, 
        //         width); 
        // }

        // cv::Mat bgr_mat;
        // cv::cvtColor(yuv_mat, bgr_mat, cv::COLOR_YUV2BGR_NV12);
        // cv::Mat bgr_mat;
        // nv12ToMat(surf, bgr_mat);

        cv::Mat bgr_mat = NvBufSurfaceToMat(surf);

        cv::imshow("Frame", bgr_mat);
        cv::waitKey(1);

        iBufferOutputStream->releaseBuffer(dmabuf->getArgusBuffer());
    }

    /* Stop the repeating request and wait for idle */
    iCaptureSession->stopRepeat();
    iBufferOutputStream->endOfStream();
    iCaptureSession->waitForIdle();

    /* Destroy the output stream to end the consumer thread */
    outputStream.reset();

    /* Destroy the EGLImages */
    for (uint32_t i = 0; i < NUM_BUFFERS; i++)
        NvBufSurfaceUnMapEglImage (surf[i], 0);

    /* Destroy the native buffers */
    for (uint32_t i = 0; i < NUM_BUFFERS; i++)
        delete nativeBuffers[i];

    PRODUCER_PRINT("Done -- exiting.\n");

    return true;
}

}; // namespace ArgusSamples 

int main(int argc, char *argv[])
{
    if (!ArgusSamples::execute())
        return EXIT_FAILURE;

    return EXIT_SUCCESS;
}

The issue with NvBufSurfaceToMat is:
terminate called after throwing an instance of ‘cv::Exception’
what(): OpenCV(4.5.4) /home/ubuntu/build_opencv/opencv/modules/core/src/matrix.cpp:438: error: (-215:Assertion failed) _step >= minstep in function ‘Mat’

And the issue with nv12ToMat is the following because there is no second buffer from which I could read:
terminate called after throwing an instance of ‘cv::Exception’
what(): OpenCV(4.5.4) /home/ubuntu/build_opencv/opencv/modules/core/src/matrix.cpp:428: error: (-215:Assertion failed) total() == 0 || data != NULL in function ‘Mat’

I also tried countless other methods and am struggling a lot as I always think it could not be that hard to get a cv::Mat from libargus and all the solutions I read on here didn’t help me.

Thank you very much for your help.

Hi,
You would need to allocate NvBufSurface in RGBA. Please refer to this patch:
How to create OpenCV cv::Mat from NvBuffer in Jetpack 5.1 - #8 by DaveYYY

Thanks for the comment on converting the buffer. This looks straight forward and I could get to not crash.

But the image I am creating only has completely black pixels. This could also be because it is the first frame of the stream, so I do not currently know if it is really a bug, but I assume so.

The bigger issue that inside the while loop, after the first frame is captured, I am stuck in Buffer *buffer = iBufferOutputStream->acquireBuffer(TIMEOUT_INFINITE, &status); .

I thought that the CaptureSession repeat function should capture as many frames as there are? I did not fully understand the sample so it is very much possible that I understood a few things incorrectly. Maybe a few questions if there is not an obvious issue in the way I am capturing images: Do I even need the DmaBuffer class, or is it not needed for my usecase? Do I want / need EGL* images?

Thank you very much for the help.

#include "Error.h"
#include "Thread.h"
#include "nvmmapi/NvNativeBuffer.h"
#include <Argus/Argus.h>
#include <NvVideoEncoder.h>
#include "NvBufSurface.h"
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <opencv2/opencv.hpp>

using namespace Argus;

static const int NUM_BUFFERS = 10;  /* This value is tricky. Too small value will impact the FPS */
static const int DEFAULT_FPS = 30;
static uint32_t CAMERA_INDEX = 0;
static Size2D<uint32_t> STREAM_SIZE(1920, 1080);
int i = 0;
#define PRODUCER_PRINT(...) printf("PRODUCER: " __VA_ARGS__)

namespace ArgusSamples
{

    /*
       Helper class to map NvNativeBuffer to Argus::Buffer and vice versa.
       A reference to DmaBuffer will be saved as client data in each Argus::Buffer.
       Also DmaBuffer will keep a reference to corresponding Argus::Buffer.
       This class also extends NvBuffer to act as a share buffer between Argus and V4L2 encoder.
    */
    class DmaBuffer : public NvNativeBuffer, public NvBuffer
    {
    public:
        /* Always use this static method to create DmaBuffer */
        static DmaBuffer *create(const Argus::Size2D<uint32_t> &size,
                                 NvBufSurfaceColorFormat colorFormat,
                                 NvBufSurfaceLayout layout = NVBUF_LAYOUT_PITCH)
        {
            DmaBuffer *buffer = new DmaBuffer(size);
            if (!buffer)
                return NULL;

            NvBufSurf::NvCommonAllocateParams cParams;

            cParams.memtag = NvBufSurfaceTag_CAMERA;
            cParams.width = size.width();
            cParams.height = size.height();
            cParams.colorFormat = colorFormat;
            cParams.layout = layout;
            cParams.memType = NVBUF_MEM_SURFACE_ARRAY;
            cParams.memtag = NvBufSurfaceTag_VIDEO_CONVERT;

            if (NvBufSurf::NvAllocate(&cParams, 1, &buffer->m_fd))
            {
                delete buffer;
                return NULL;
            }

            /* save the DMABUF fd in NvBuffer structure */
            buffer->planes[0].fd = buffer->m_fd;
            /* byteused must be non-zero for a valid buffer */
            buffer->planes[0].bytesused = 1;

            return buffer;
        }

        /* Help function to convert Argus Buffer to DmaBuffer */
        static DmaBuffer *fromArgusBuffer(Buffer *buffer)
        {
            IBuffer *iBuffer = interface_cast<IBuffer>(buffer);
            const DmaBuffer *dmabuf = static_cast<const DmaBuffer *>(iBuffer->getClientData());

            return const_cast<DmaBuffer *>(dmabuf);
        }

        /* Return DMA buffer handle */
        int getFd() const { return m_fd; }

        /* Get and set reference to Argus buffer */
        void setArgusBuffer(Buffer *buffer) { m_buffer = buffer; }
        Buffer *getArgusBuffer() const { return m_buffer; }

    private:
        DmaBuffer(const Argus::Size2D<uint32_t> &size)
            : NvNativeBuffer(size),
              NvBuffer(0, 0),
              m_buffer(NULL)
        {
        }

        Buffer *m_buffer; /* Reference to Argus::Buffer */
    };

    static bool execute()
    {
        NvBufSurface *surf[NUM_BUFFERS] = {0};

        /* Create the CameraProvider object and get the core interface */
        UniqueObj<CameraProvider> cameraProvider = UniqueObj<CameraProvider>(CameraProvider::create());
        ICameraProvider *iCameraProvider = interface_cast<ICameraProvider>(cameraProvider);
        if (!iCameraProvider)
            ORIGINATE_ERROR("Failed to create CameraProvider");

        /* Get the camera devices */
        std::vector<CameraDevice *> cameraDevices;
        iCameraProvider->getCameraDevices(&cameraDevices);
        if (cameraDevices.size() == 0)
            ORIGINATE_ERROR("No cameras available");

        if (CAMERA_INDEX >= cameraDevices.size())
        {
            PRODUCER_PRINT("CAMERA_INDEX out of range. Fall back to 0\n");
            CAMERA_INDEX = 0;
        }

        /* Create the capture session using the first device and get the core interface */
        UniqueObj<CaptureSession> captureSession(iCameraProvider->createCaptureSession(cameraDevices[CAMERA_INDEX]));
        ICaptureSession *iCaptureSession = interface_cast<ICaptureSession>(captureSession);
        if (!iCaptureSession)
            ORIGINATE_ERROR("Failed to get ICaptureSession interface");

        /* Create the OutputStream */
        PRODUCER_PRINT("Creating output stream\n");
        UniqueObj<OutputStreamSettings> streamSettings(
            iCaptureSession->createOutputStreamSettings(STREAM_TYPE_BUFFER));
        IBufferOutputStreamSettings *iStreamSettings =
            interface_cast<IBufferOutputStreamSettings>(streamSettings);
        if (!iStreamSettings)
            ORIGINATE_ERROR("Failed to get IBufferOutputStreamSettings interface");

        /* Configure the OutputStream to use the EGLImage BufferType */
        iStreamSettings->setBufferType(BUFFER_TYPE_EGL_IMAGE);

        /* Create the OutputStream */
        UniqueObj<OutputStream> outputStream(iCaptureSession->createOutputStream(streamSettings.get()));
        IBufferOutputStream *iBufferOutputStream = interface_cast<IBufferOutputStream>(outputStream);

        /* Allocate native buffers */
        DmaBuffer *nativeBuffers[NUM_BUFFERS];

        for (uint32_t i = 0; i < NUM_BUFFERS; i++)
        {
            nativeBuffers[i] = DmaBuffer::create(STREAM_SIZE, NVBUF_COLOR_FORMAT_RGBA, NVBUF_LAYOUT_PITCH);
            if (!nativeBuffers[i])
                ORIGINATE_ERROR("Failed to allocate NativeBuffer");
        }

        /* Create EGLImages from the native buffers */
        EGLImageKHR eglImages[NUM_BUFFERS];
        for (uint32_t i = 0; i < NUM_BUFFERS; i++)
        {
            int ret = 0;

            ret = NvBufSurfaceFromFd(nativeBuffers[i]->getFd(), (void **)(&surf[i]));
            if (ret)
                ORIGINATE_ERROR("%s: NvBufSurfaceFromFd failed\n", __func__);

            ret = NvBufSurfaceMapEglImage(surf[i], 0);
            if (ret)
                ORIGINATE_ERROR("%s: NvBufSurfaceMapEglImage failed\n", __func__);

            eglImages[i] = surf[i]->surfaceList[0].mappedAddr.eglImage;
            if (eglImages[i] == EGL_NO_IMAGE_KHR)
                ORIGINATE_ERROR("Failed to create EGLImage");
        }

        /* Create the BufferSettings object to configure Buffer creation */
        UniqueObj<BufferSettings> bufferSettings(iBufferOutputStream->createBufferSettings());
        IEGLImageBufferSettings *iBufferSettings =
            interface_cast<IEGLImageBufferSettings>(bufferSettings);
        if (!iBufferSettings)
            ORIGINATE_ERROR("Failed to create BufferSettings");

        /* Create the Buffers for each EGLImage (and release to
           stream for initial capture use) */
        UniqueObj<Buffer> buffers[NUM_BUFFERS];
        for (uint32_t i = 0; i < NUM_BUFFERS; i++)
        {
            iBufferSettings->setEGLImage(eglImages[i]);
            buffers[i].reset(iBufferOutputStream->createBuffer(bufferSettings.get()));
            IBuffer *iBuffer = interface_cast<IBuffer>(buffers[i]);

            /* Reference Argus::Buffer and DmaBuffer each other */
            iBuffer->setClientData(nativeBuffers[i]);
            nativeBuffers[i]->setArgusBuffer(buffers[i].get());

            if (!interface_cast<IEGLImageBuffer>(buffers[i]))
                ORIGINATE_ERROR("Failed to create Buffer");
            if (iBufferOutputStream->releaseBuffer(buffers[i].get()) != STATUS_OK)
                ORIGINATE_ERROR("Failed to release Buffer for capture use");
        }

        /* Create capture request and enable output stream */
        UniqueObj<Request> request(iCaptureSession->createRequest());
        IRequest *iRequest = interface_cast<IRequest>(request);
        if (!iRequest)
            ORIGINATE_ERROR("Failed to create Request");
        iRequest->enableOutputStream(outputStream.get());

        ISourceSettings *iSourceSettings = interface_cast<ISourceSettings>(iRequest->getSourceSettings());
        if (!iSourceSettings)
            ORIGINATE_ERROR("Failed to get ISourceSettings interface");
        iSourceSettings->setFrameDurationRange(Range<uint64_t>(1e9 / DEFAULT_FPS));

        /* Submit capture requests */
        PRODUCER_PRINT("Starting repeat capture requests.\n");
        if (iCaptureSession->repeat(request.get()) != STATUS_OK)
            ORIGINATE_ERROR("Failed to start repeat capture request");

        PRODUCER_PRINT("Created repeat capture requests.\n");

        /* Capture and process frames */
        while (true) // TODO Change
        { 
            Argus::Status status = STATUS_OK;
            PRODUCER_PRINT("START of loop\n");
            Buffer *buffer = iBufferOutputStream->acquireBuffer(TIMEOUT_INFINITE, &status);
            if (status == STATUS_END_OF_STREAM)
            {
                break;
            }
            PRODUCER_PRINT("Acquired buffer\n");

            // Get the DmaBuffer and raw frame data
            DmaBuffer *dmabuf = DmaBuffer::fromArgusBuffer(buffer);
            int dmabuf_fd = dmabuf->getFd();
            NvBufSurface *surf;
            if (NvBufSurfaceFromFd(dmabuf_fd, (void **)(&surf)))
            {
                ORIGINATE_ERROR("Failed to get NvBufSurface from dmabuf_fd");
                break;
            }

            // Ensure the buffer is mapped
            if (NvBufSurfaceMap(surf, -1, 0, NVBUF_MAP_READ) != 0)
            {
                std::cerr << "Failed to map NvBufSurface." << std::endl;
                return false;
            }
            // Sync for CPU read
            if (NvBufSurfaceSyncForCpu(surf, -1, 0) != 0)
            {
                std::cerr << "Failed to sync NvBufSurface for CPU." << std::endl;
                return false;
            }

            cv::Mat imgbuf = cv::Mat(1080, 1920, CV_8UC4, surf->surfaceList->mappedAddr.addr[0]);
            cv::Mat display_img;
            cvtColor(imgbuf, display_img, cv::COLOR_RGBA2BGR);
            cv::imwrite("test" + std::to_string(i++) + ".png", display_img);

            // Unmap the NvBufSurface
            if (NvBufSurfaceUnMap(surf, -1, 0) != 0)
            {
                std::cerr << "Failed to unmap NvBufSurface." << std::endl;
                return false;
            }

            iBufferOutputStream->releaseBuffer(dmabuf->getArgusBuffer());
        }

        /* Stop the repeating request and wait for idle */
        iCaptureSession->stopRepeat();
        iBufferOutputStream->endOfStream();
        iCaptureSession->waitForIdle();

        /* Destroy the output stream to end the consumer thread */
        outputStream.reset();

        /* Destroy the EGLImages */
        for (uint32_t i = 0; i < NUM_BUFFERS; i++)
            NvBufSurfaceUnMapEglImage(surf[i], 0);

        /* Destroy the native buffers */
        for (uint32_t i = 0; i < NUM_BUFFERS; i++)
            delete nativeBuffers[i];

        PRODUCER_PRINT("Done -- exiting.\n");
        return true;
    }

}; // namespace ArgusSamples

int main(int argc, char *argv[])
{
    if (!ArgusSamples::execute())
        return EXIT_FAILURE;

    return EXIT_SUCCESS;
}

Hi,
The patch is for 13 sample. Please apply it to 13 sample and check if you can successfully run the sample with the camera first. So that you can do further development based on the 13 sample.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.