Encoder error: ”Qing buffer error: Device or resource busy “

I’m trying to do a image processing on tx2 with NVCAMERA and VIC/ENCODER with Multimedia API.

I wrote the code which can work fine on tx1 with jetpack 24.2.1, but got error on jetpack 28.2.1 with both tx1 and tx2.

The pipeline:
Nvcamera -> VIC -> Image processing -> Nvbuffer create && copy to nvbuffer -> pass the nvbuffer fd to VIC -> ENCODER -> H264

  1. convert init.
int stitch_vic_init()                                            
{
    std::string conv_name_stitch="conv_stitch";                  
    GUARD_PROCESS_OUT_BOOL(stitch_conv =  NvVideoConverter::createVideoConverter(conv_name_stitch.c_str()),"create video converter");
    GUARD_PROCESS_OUT_INT(stitch_conv->setFlipMethod((enum v4l2_flip_method) -1),"set flip method");
    GUARD_PROCESS_OUT_INT(stitch_conv->setInterpolationMethod((enum v4l2_interpolation_method) -1),"set interpolation method");
    GUARD_PROCESS_OUT_INT(stitch_conv->setTnrAlgorithm((enum v4l2_tnr_algorithm) -1),"set tnr algorithm");
    GUARD_PROCESS_OUT_INT(stitch_conv->setOutputPlaneFormat(V4L2_PIX_FMT_ABGR32, result_rec.width,
                result_rec.height, V4L2_NV_BUFFER_LAYOUT_BLOCKLINEAR),"set up VIC output plane format");
    GUARD_PROCESS_OUT_INT(stitch_conv->setCapturePlaneFormat(V4L2_PIX_FMT_YUV420M, result_rec.width,
                result_rec.height , V4L2_NV_BUFFER_LAYOUT_PITCH),"set up VIC capture plane format");                                                                                                                                          

    GUARD_PROCESS_OUT_INT(stitch_conv->output_plane.setupPlane(V4L2_MEMORY_DMABUF,
                V4L2_BUFFERS_NUM, false, false) ,"allocate VIC output plane");                                                                                                                                                                
       
    GUARD_PROCESS_OUT_INT(stitch_conv->capture_plane.setupPlane(V4L2_MEMORY_MMAP, 
                V4L2_BUFFERS_NUM, true, false),"allocate VIC capture plane");                                                                                                                                                                 

    GUARD_PROCESS_OUT_BOOL(stitch_buff = (nv_buffer *)malloc(V4L2_BUFFERS_NUM*sizeof(nv_buffer)),"stitch_buff malloc");                                                                                                                       

    conv_output_plane_stitching_buf_queue = new std::queue < nv_buffer * >;                                                                                                                                                                   

    // 
    GUARD_PROCESS_OUT_BOOL(stitch_buff = (nv_buffer *)malloc(V4L2_BUFFERS_NUM*sizeof(nv_buffer)),"stitch_buff malloc")                                                                                                                        


    sem_init (&g_stitch_sem, 0, 1);
    sem_init (&g_stitch_sem_post, 0, 0);


    GUARD_PROCESS_OUT_INT(stitch_conv->output_plane.setStreamStatus(true),"set conv output stream on");                                                                                                                                       

    GUARD_PROCESS_OUT_INT(stitch_conv->capture_plane.setStreamStatus(true),"set conv capture stream on");                                                                                                                                     

    GUARD_PROCESS_OUT_BOOL(stitch_conv->output_plane.setDQThreadCallback(ArgusSamples::stitch_conv_output_dqbuf_thread_callback),\
                        "set stitch_conv_output_dqbuf_thread_callback");                                                                                                                                                                      

    GUARD_PROCESS_OUT_BOOL(stitch_conv->capture_plane.setDQThreadCallback(ArgusSamples::stitch_conv_capture_dqbuf_thread_callback),\
                        "set stitch_conv_capture_dqbuf_thread_callback");
    GUARD_PROCESS_OUT_INT(stitch_conv->output_plane.startDQThread(&ctx),"start output_plane thread");
    GUARD_PROCESS_OUT_INT(stitch_conv->capture_plane.startDQThread(&ctx),"start capture_plane thread");                                                                                                                                       

    for (unsigned int index = 0;index < stitch_conv->output_plane.getNumBuffers(); index++)                                                                                                                                                   
    {                                                            

        NvBufferCreateParams input_params = {0};
        input_params.payloadType = NvBufferPayload_SurfArray;
        input_params.nvbuf_tag = NvBufferTag_NONE;
        int fd;
        input_params.width = result_rec.width;
        input_params.height = result_rec.height;
        input_params.layout = NvBufferLayout_Pitch;
        input_params.colorFormat = NvBufferColorFormat_ABGR32;

        if (NvBufferCreateEx(&fd, &input_params) < 0)
            ORIGINATE_ERROR("Failed to create NvBuffer.");

        int ret = -1;
        NvBufferParams parm;
        ret = NvBufferGetParams(fd, &parm);

        NvBufferParams params = {0};
        NvBufferGetParams(fd, &params);

        stitch_buff_pitch = params.pitch[0];


        ret = NvBufferMemMap(fd, 0, NvBufferMem_Read_Write, (void **)&stitch_buff[index].start);

        stitch_buff[index].dmabuff_fd = fd;
        stitch_buff[index].size = params.height[0] * params.pitch[0];;


        struct v4l2_buffer v4l2_buf;
        struct v4l2_plane planes[MAX_PLANES];
        memset(&v4l2_buf, 0, sizeof(v4l2_buf));
        memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
        v4l2_buf.index = index;
        v4l2_buf.m.planes = planes;
        GUARD_PROCESS_OUT_INT(stitch_conv->output_plane.qBuffer(v4l2_buf, NULL) ,"enqueue empty buffer into VIC output plane");
    }

    for (unsigned int index = 0;index < stitch_conv->capture_plane.getNumBuffers(); index++)
    {
        struct v4l2_buffer v4l2_buf;
        struct v4l2_plane planes[MAX_PLANES];
        memset(&v4l2_buf, 0, sizeof(v4l2_buf));
        memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
        v4l2_buf.index = index;
        v4l2_buf.m.planes = planes;
        GUARD_PROCESS_OUT_INT(stitch_conv->capture_plane.qBuffer(v4l2_buf, NULL) ,"enqueue empty buffer into VIC output plane");
    }

    return true;
}
  1. convert_call_back
bool stitch_conv_output_dqbuf_thread_callback(struct v4l2_buffer *v4l2_buf, NvBuffer * buffer,  NvBuffer * shared_buffer, void *arg)                                                                                                          
{
    nv_buffer  * cam_g_buff;   

    sem_wait(&g_stitch_sem_post);
    if(conv_output_plane_stitching_buf_queue->size()>0)
    {  
        cam_g_buff = conv_output_plane_stitching_buf_queue->front();      
        conv_output_plane_stitching_buf_queue->pop();
        if (cam_g_buff->dmabuff_fd == 0)
        {
            printf("-------cam_g_buff->dmabuff_fd == 0\n");
            return false;
        }
        else
        {
            // Enqueue vic output plane
            v4l2_buf->m.planes[0].m.fd      = (unsigned long)cam_g_buff->dmabuff_fd;
            v4l2_buf->m.planes[0].bytesused = cam_g_buff->size;
                                                                                                                             
        }

        if (stitch_conv->output_plane.qBuffer(*v4l2_buf, NULL) < 0)       
        {
 
            ERROR_RETURN("Failed to enqueue VIC output plane");
        }
    }   

    return true;

}
  1. encoder init and its callback
bool encoderCapturePlaneDqCallback(struct v4l2_buffer *v4l2_buf, NvBuffer * buffer,NvBuffer * shared_buffer,void *arg)
{
    static int64 t  = cv::getTickCount();
    static int frame_count  = 0;
    if (!v4l2_buf)
    {  
        m_VideoEncoder->abort();
        THREAD_PRINT("Failed to dequeue buffer from encoder capture plane");                                                                                                                                                                  
    }  
    if (m_VideoEncoder->capture_plane.qBuffer(*v4l2_buf, NULL) < 0)   
    {  
        std::cerr << "Allen: Error while Qing buffer at capture plane" << std::endl;                                                                                                                                                          
    }  
        
    // GOT EOS from m_VideoEncoderoder. Stop dqthread.
    if (buffer->planes[0].bytesused == 0)                        
    {  
        WARN("Got EOS, exiting...\n");                           
        return false;
    }
    return true;
}
  1. error log
=================== stitch_conv_output_dqbuf_thread_callback, 795 cam_g_buff->size: 4249600
stitch_conv_capture_dqbuf_thread_callback, 814, v4l2_buf->m.planes[0].bytesused: 1310720 
stitch_conv_capture_dqbuf_thread_callback, 820 ====================  buffer->planes[0].bytesused:1310720
===== MSENC blits (mode: 1) into tiled surfaces =====
allen ==== : converterCapturePlaneDqCallback, 506  cam_idx 0
 encoderCapturePlaneDqCallback, 625 ====================  byteused: 24 
 encoderCapturePlaneDqCallback, 649 ====================  byteused: 24 
 encoderCapturePlaneDqCallback, 655 ====================  byteused: 24 
encoderCapturePlaneDqCallback, 625 ====================  byteused: 278 
encoderCapturePlaneDqCallback, 649 ====================  byteused: 278 
encoderCapturePlaneDqCallback, 655 ==================== byteused: 278 

  =================== stitch_conv_output_dqbuf_thread_callback, 795 cam_g_buff->size: 4249600
stitch_conv_capture_dqbuf_thread_callback, 814, v4l2_buf->m.planes[0].bytesused: 1310720 
 stitch_conv_capture_dqbuf_thread_callback, 820 ====================  buffer->planes[0].bytesused:1310720
 encoderCapturePlaneDqCallback, 625 ==================== byteused: 111 
 encoderCapturePlaneDqCallback, 649 ==================== byteused: 111 
 encoderCapturePlaneDqCallback, 655 ==================== byteused: 111 

  =================== stitch_conv_output_dqbuf_thread_callback, 795 cam_g_buff->size: 4249600
stitch_conv_capture_dqbuf_thread_callback, 814, v4l2_buf->m.planes[0].bytesused: 1310720 
stitch_conv_capture_dqbuf_thread_callback, 820 ====================  buffer->planes[0].bytesused:1310720
 encoderCapturePlaneDqCallback, 625 ====================  byteused: 104 
 encoderCapturePlaneDqCallback, 649 ====================  byteused: 104 
 encoderCapturePlaneDqCallback, 655 ====================  byteused: 104 
=================== stitch_conv_output_dqbuf_thread_callback, 795 cam_g_buff->size: 4249600
stitch_conv_capture_dqbuf_thread_callback, 814, v4l2_buf->m.planes[0].bytesused: 1310720 
stitch_conv_capture_dqbuf_thread_callback, 820 ====================  buffer->planes[0].bytesused:1310720
 =================== stitch_conv_output_dqbuf_thread_callback, 795 cam_g_buff->size: 4249600
stitch_conv_capture_dqbuf_thread_callback, 814, v4l2_buf->m.planes[0].bytesused: 1310720 
stitch_conv_capture_dqbuf_thread_callback, 820 ====================  buffer->planes[0].bytesused:1310720
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy

Could anyone give some help on this.

Could anyone give some suggestion?

Hi Allen,
Please try NvBufferTransform:

/**
 * This method is used to transform one DMA buffer to another DMA buffer.
 * It can support transforms for copying, scaling, fliping, rotation and cropping.
 * @param[in] src_dmabuf_fd DMABUF FD of source buffer
 * @param[in] dst_dmabuf_fd DMABUF FD of destination buffer
 * @param[in] transform_params transform parameters
 *
 * @return 0 for sucess, -1 for failure.
 */
int NvBufferTransform (int src_dmabuf_fd, int dst_dmabuf_fd, NvBufferTransformParams *transform_params);

Here is an sample with V4L2 camera:
https://devtalk.nvidia.com/default/topic/1031967/jetson-tx2/tegra_multimedia_api-dq-buffer-from-encoder-output_plane-can-not-completed/post/5251268/#5251268

Hi,DaneLLL

Thanks for your suggestions. I’ll try.

Could you give some advice on how to put an gpu memory into VIC fd without memory copy?
It’s important for us for real-time image processimg.

Hi, DaneLLL

I tried with NvBufferTransform, but got the errors.

LSC: LSC surface is not based on full res!
Set governor to performance before enabling profiler
-------result_rec.width(1555)---result_rec.height(664)---
Failed to query video capabilities: Inappropriate ioctl for device
libv4l2_nvvidconv (0):(792) (INFO) : Allocating (4) OUTPUT PLANE BUFFERS Layout=1
libv4l2_nvvidconv (0):(808) (INFO) : Allocating (4) CAPTURE PLANE BUFFERS Layout=0
allen:  ====== stitch_vic_init, params.pitch[0] 6400 ,  result_rec.height: 664
stitch_vic_init, 1515
stitch_vic_init, 1526
nvbuf_utils: dmabuf_fd 0 mapped entry NOT found
Segmentation fault
for (unsigned int index = 0;index < stitch_conv->output_plane.getNumBuffers(); index++)
    {

        NvBufferCreateParams input_params = {0};
        input_params.payloadType = NvBufferPayload_SurfArray;
        input_params.nvbuf_tag = NvBufferTag_NONE;
        int nv_fd;
        int hw_fd;
        int fd;
        input_params.width = result_rec.width;                   
        input_params.height = result_rec.height;                 
        input_params.layout = NvBufferLayout_Pitch;              
        input_params.colorFormat = NvBufferColorFormat_ABGR32;   

        input_params.nvbuf_tag = NvBufferTag_NONE;               

        if (NvBufferCreateEx(&nv_fd, &input_params) < 0)         
            ORIGINATE_ERROR("Failed to create NvBuffer.");       

//      m_emptyTRTBufferQueue.push(fd);                          

        int ret = -1;                                            
        NvBufferParams parm;                                     
                                                                 

        ret = NvBufferGetParams(nv_fd, &parm);

        if (ret != 0)
            return -1;

        NvBufferParams params = {0};
        NvBufferGetParams(nv_fd, &params);

        stitch_buff_pitch = params.pitch[0];

        printf("allen:  ====== %s, params.pitch[0] %d ,  result_rec.height: %d\n", __func__, params.pitch[0], result_rec.height);

        ret = NvBufferMemMap(nv_fd, 0, NvBufferMem_Read_Write, (void **)&stitch_buff[index].start);
        printf("%s, %d\n", __func__, __LINE__);

        NvBufferMemSyncForDevice(nv_fd, 0, (void **)&stitch_buff[index].start);


        NvBufferTransformParams transParams;                     
        //Init the NvBufferTransformParams                       
        memset(&transParams, 0, sizeof(transParams));            
        transParams.transform_flag = NVBUFFER_TRANSFORM_FILTER;  
        transParams.transform_filter = NvBufferTransform_Filter_Smart;    
 
        printf("%s, %d\n", __func__, __LINE__);                  
        if (-1 == NvBufferTransform(nv_fd, hw_fd,                
                    &transParams))                               
            ERROR_RETURN("Failed to convert the buffer");        
        printf("%s, %d\n", __func__, __LINE__);                  

        stitch_buff[index].dmabuff_fd = hw_fd;                   
        stitch_buff[index].size = params.height[0] * params.pitch[0];;    
        printf("%s, %d\n", __func__, __LINE__);


        struct v4l2_buffer v4l2_buf;
        struct v4l2_plane planes[MAX_PLANES];
        memset(&v4l2_buf, 0, sizeof(v4l2_buf));
        memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
        v4l2_buf.index = index;
        v4l2_buf.m.planes = planes;
        GUARD_PROCESS_OUT_INT(stitch_conv->output_plane.qBuffer(v4l2_buf, NULL) ,"enqueue empty buffer into VIC output plane");
    }

Hi Allen

Nvcamera -> <b>first VIC</b> -> Image processing -> Nvbuffer create && copy to nvbuffer -> pass the nvbuffer fd to <b>second VIC</b> -> ENCODER -> H264

For NVCAMERA, do you mean Argus?

Does the segment fault happen in first VIC or second VIC?

What is the format conversion in first VIC and second VIC? Forst does NV12->RGBA and second does RGBA->NV12?

Hi DaneLLL

Yes, argus API.

The second VIC. Just segment during NvBufferTransform().

The first VIC: NV12 -> RGBA
The second VIC: RGBA-> YUV420M (because encoder need this format to encode)

Hi Allen,
Here is a suggestion to your case:

// get RGBA buffer via createNvBuffer()
iNativeBuffer->createNvBuffer(RGBA)
// convert to NV12 via NVBufferTransform()
NvBufferTransform(RGBA->NV12)

Here is the patch to 10_camera_recording

diff --git a/multimedia_api/ll_samples/samples/10_camera_recording/main.cpp b/multimedia_api/ll_samples/samples/10_camera_recording/main.cpp
index 6f531b8..a57f193 100644
--- a/multimedia_api/ll_samples/samples/10_camera_recording/main.cpp
+++ b/multimedia_api/ll_samples/samples/10_camera_recording/main.cpp
@@ -218,7 +218,7 @@ bool ConsumerThread::threadExecute()
     while (!m_gotError)
     {
         NvBuffer *buffer;
-        int fd = -1;
+        int fd = -1, fd1 = -1;
 
         struct v4l2_buffer v4l2_buf;
         struct v4l2_plane planes[MAX_PLANES];
@@ -266,11 +266,26 @@ bool ConsumerThread::threadExecute()
             interface_cast<NV::IImageNativeBuffer>(iFrame->getImage());
         if (!iNativeBuffer)
             ORIGINATE_ERROR("IImageNativeBuffer not supported by Image.");
-        fd = iNativeBuffer->createNvBuffer(STREAM_SIZE,
-                                           NvBufferColorFormat_YUV420,
+        fd1 = iNativeBuffer->createNvBuffer(STREAM_SIZE,
+                                           NvBufferColorFormat_ABGR32,
                                            (DO_CPU_PROCESS)?NvBufferLayout_Pitch:NvBufferLayout_BlockLinear);
         if (VERBOSE_ENABLE)
-            CONSUMER_PRINT("Acquired Frame. %d\n", fd);
+            CONSUMER_PRINT("Acquired Frame. %d\n", fd1);
+
+        NvBufferCreateParams input_params = {0};
+        input_params.payloadType = NvBufferPayload_SurfArray;
+        input_params.width = STREAM_SIZE.width();
+        input_params.height = STREAM_SIZE.height();
+        input_params.layout = NvBufferLayout_Pitch;
+        input_params.colorFormat = NvBufferColorFormat_NV12;
+        input_params.nvbuf_tag = NvBufferTag_CAMERA;
+        NvBufferCreateEx(&fd, &input_params);
+
+        NvBufferTransformParams transParams = {0};
+        transParams.transform_flag = NVBUFFER_TRANSFORM_FILTER;
+        transParams.transform_filter = NvBufferTransform_Filter_Smart;
+        NvBufferTransform(fd1, fd, &transParams);
+        NvBufferDestroy(fd1);
 
         if (DO_CPU_PROCESS) {
             NvBufferParams par;

We have verified it with

$ ./camera_recording -d 5 -c

Hi, DaneLLL

I tried with your suggestion. NvBufferTransform can work. but I still got the error.

LSC: LSC surface is not based on full res!
Failed to query video capabilities: Inappropriate ioctl for device
libv4l2_nvvidconv (0):(792) (INFO) : Allocating (3) OUTPUT PLANE BUFFERS Layout=1
libv4l2_nvvidconv (0):(808) (INFO) : Allocating (3) CAPTURE PLANE BUFFERS Layout=0
SCF: Error InvalidState:  NonFatal ISO BW requested not set. Requested = 2148120071 Set = 4687500 (in src/services/power/PowerServiceCore.cpp, function setCameraBw(), line 653)
this thread -1760235792 is running in processor 0, stitch_thread
this thread -1760235792 is running in processor 3, stitch_thread
===== MSENC blits (mode: 1) into tiled surfaces =====
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy
[ERROR] (NvV4l2ElementPlane.cpp:254) <enc0> Output Plane:Error while Qing buffer: Device or resource busy

I have a fiew questions.

/**    
 * @defgroup V4L2Enc V4L2 Video Encoder
 *
 * @brief NVIDIA Tegra V4L2 Video Encoder Description and Extensions
 *
 * The video encoder device node is \c "/dev/nvhost-msenc".
 *
 * ### Supported Pixelformats
 * OUTPUT PLANE            | CAPTURE PLANE
 * :---------------------: | :--------------
 * V4L2_PIX_FMT_YUV420M    | V4L2_PIX_FMT_H264
 *           -             | V4L2_PIX_FMT_H265

For the encoder, only V4L2_PIX_FMT_YUV420M can be supported. so

// convert to NV12 via NVBufferTransform()
NvBufferTransform(RGBA->NV12)

is not what I wanted.

I need YUV420M to feed into the hardware encode.

The code I update as suggested.

std::string conv_name_stitch="conv_stitch";
        GUARD_PROCESS_OUT_BOOL(stitch_conv = NvVideoConverter::createVideoConverter(conv_name_stitch.c_str()),"create video converter");
        GUARD_PROCESS_OUT_INT(stitch_conv->setFlipMethod((enum v4l2_flip_method) -1),"set flip method");
        GUARD_PROCESS_OUT_INT(stitch_conv->setInterpolationMethod((enum v4l2_interpolation_method) -1),"set interpolation method");
        GUARD_PROCESS_OUT_INT(stitch_conv->setTnrAlgorithm((enum v4l2_tnr_algorithm) -1),"set tnr algorithm");
        GUARD_PROCESS_OUT_INT(stitch_conv->setOutputPlaneFormat(V4L2_PIX_FMT_ABGR32, result_rec.width,
                    result_rec.height, V4L2_NV_BUFFER_LAYOUT_BLOCKLINEAR),"set up VIC output plane format");
        GUARD_PROCESS_OUT_INT(stitch_conv->setCapturePlaneFormat(V4L2_PIX_FMT_YUV420M, result_rec.width,
                    result_rec.height , V4L2_NV_BUFFER_LAYOUT_PITCH),"set up VIC capture plane format");                                                                                                                                      

        GUARD_PROCESS_OUT_INT(stitch_conv->output_plane.setupPlane(V4L2_MEMORY_DMABUF,
                    V4L2_BUFFERS_NUM, false, false) ,"allocate VIC output plane");                                                                                                                                                            

        GUARD_PROCESS_OUT_INT(stitch_conv->capture_plane.setupPlane(V4L2_MEMORY_MMAP, 
                    V4L2_BUFFERS_NUM, true, false),"allocate VIC capture plane");                                                                                                                                                             

        GUARD_PROCESS_OUT_BOOL(stitch_buff = (nv_buffer *)malloc(V4L2_BUFFERS_NUM*sizeof(nv_buffer)),"stitch_buff malloc");                                                                                                                   

        GUARD_PROCESS_OUT_INT(stitch_conv->output_plane.setStreamStatus(true),"set conv output stream on");                                                                                                                                   

        GUARD_PROCESS_OUT_INT(stitch_conv->capture_plane.setStreamStatus(true),"set conv capture stream on");                                                                                                                                 

        GUARD_PROCESS_OUT_BOOL(stitch_conv->output_plane.setDQThreadCallback(ArgusSamples::stitch_conv_output_dqbuf_thread_callback),\                                                                                                        
                "set stitch_conv_output_dqbuf_thread_callback");

        GUARD_PROCESS_OUT_BOOL(stitch_conv->capture_plane.setDQThreadCallback(ArgusSamples::stitch_conv_capture_dqbuf_thread_callback),\                                                                                                      
                "set stitch_conv_capture_dqbuf_thread_callback");
        GUARD_PROCESS_OUT_INT(stitch_conv->output_plane.startDQThread(&ctx),"start output_plane thread");
        GUARD_PROCESS_OUT_INT(stitch_conv->capture_plane.startDQThread(&ctx),"start capture_plane thread"); 
        for (unsigned int index = 0;index < stitch_conv->output_plane.getNumBuffers(); index++)
        {

            int nv_fd;
            int hw_fd;
            int fd;

            NvBufferCreateParams input_params = {0};
            input_params.payloadType = NvBufferPayload_SurfArray;
            input_params.nvbuf_tag = NvBufferTag_NONE;
            input_params.width = result_rec.width;
            input_params.height = result_rec.height;
            input_params.layout = NvBufferLayout_Pitch;
            input_params.colorFormat = NvBufferColorFormat_ABGR32;
            //input_params.colorFormat = NvBufferColorFormat_NV12;


            if (NvBufferCreateEx(&nv_fd, &input_params) < 0)
                ORIGINATE_ERROR("Failed to create NvBuffer.");

            int ret = -1;
            NvBufferParams params = {0};
            ret = NvBufferGetParams(nv_fd, &params);
            if (ret != 0)
                return -1;

            stitch_buff_pitch = params.pitch[0];

            printf("allen:  ====== %s, params.pitch[0] %d ,  result_rec.height: %d\n", __func__, params.pitch[0], result_rec.height);

            ret = NvBufferMemMap(nv_fd, 0, NvBufferMem_Read_Write, (void **)&stitch_buff[index].start);
            printf("%s, %d\n", __func__, __LINE__);

            NvBufferMemSyncForDevice(nv_fd, 0, (void **)&stitch_buff[index].start);

            //      m_emptyTRTBufferQueue.push(fd);                          
            NvBufferCreateParams dst_input_params = {0};
            dst_input_params.payloadType = NvBufferPayload_SurfArray;
            dst_input_params.nvbuf_tag = NvBufferTag_NONE;
            dst_input_params.width = result_rec.width;
            dst_input_params.height = result_rec.height;
            dst_input_params.layout = NvBufferLayout_Pitch;
            dst_input_params.colorFormat = NvBufferColorFormat_NV12;

            if (NvBufferCreateEx(&hw_fd, &input_params) < 0)
                ORIGINATE_ERROR("Failed to create NvBuffer.");


            //Init the NvBufferTransformParams                       
            NvBufferTransformParams transParams;
            memset(&transParams, 0, sizeof(transParams));
            transParams.transform_flag = NVBUFFER_TRANSFORM_FILTER;
            transParams.transform_filter = NvBufferTransform_Filter_Smart;

            if (-1 == NvBufferTransform(nv_fd, hw_fd,
                        &transParams))
                ERROR_RETURN("Failed to convert the buffer");

            stitch_buff[index].dmabuff_fd = hw_fd;
            stitch_buff[index].size = params.height[0] * params.pitch[0];;
            printf("%s, %d\n", __func__, __LINE__);


            struct v4l2_buffer v4l2_buf;
            struct v4l2_plane planes[MAX_PLANES];
            memset(&v4l2_buf, 0, sizeof(v4l2_buf));
            memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
            v4l2_buf.index = index;
            v4l2_buf.m.planes = planes;
            GUARD_PROCESS_OUT_INT(stitch_conv->output_plane.qBuffer(v4l2_buf, NULL) ,"enqueue empty buffer into VIC output plane");
        }
        for (unsigned int index = 0;index < stitch_conv->capture_plane.getNumBuffers(); index++)
        {
            struct v4l2_buffer v4l2_buf;
            struct v4l2_plane planes[MAX_PLANES];
            memset(&v4l2_buf, 0, sizeof(v4l2_buf));
            memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
            v4l2_buf.index = index;
            v4l2_buf.m.planes = planes;
            GUARD_PROCESS_OUT_INT(stitch_conv->capture_plane.qBuffer(v4l2_buf, NULL) ,"enqueue empty buffer into VIC output plane");
            printf("%s, %d\n", __func__, __LINE__);
        }
        sem_init (&g_stitch_sem, 0, 1);
        sem_init (&g_stitch_sem_post, 0, 0);

        //init stitch 
        stitch_init(stitch_buff_pitch,result_rec.height);


        return true;
    }
  
                                                                                                                                                                                                                            1511,1-4      80%

Hi Allen,
NvBufferTransform(RGBA->I420) also working fine. You can give it a try. We’ve given a sample. Please compare to get where can be wrong in your code.

Hi DaneLLL.
I merge the sample in this thread :

Here is an sample with V4L2 camera:
<a target='_blank' rel='noopener noreferrer' href='https://devtalk.nvidia.com/default/topic/1031967/jetson-tx2/tegra_multimedia_api-dq-buffer-from-encoder-output_plane-can-not-completed/post/5251268/#5251268'>https://devtalk.nvidia.com/default/topic/1031967/jetson-tx2/tegra_multimedia_api-dq-buffer-from-encoder-output_plane-can-not-completed/post/5251268/#5251268</a>
This is my code:

struct v4l2_buffer enc_buf;
struct v4l2_plane enc_planes[MAX_PLANES];
NvBuffer *enc_buffer;
NvBuffer *enc_shared_buffer;

        memset(&enc_buf, 0, sizeof(enc_buf));
        memset(enc_planes, 0, sizeof(enc_planes));
        enc_buf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
        enc_buf.m.planes = enc_planes;
        enc_buf.memory = V4L2_MEMORY_DMABUF;

#if 0
#else
if (1)
{
input_params.payloadType = NvBufferPayload_SurfArray;
input_params.width = result_rec.width;
input_params.height = result_rec.height;
input_params.layout = NvBufferLayout_Pitch;
input_params.colorFormat = NvBufferColorFormat_NV12;//V4L2_PIX_FMT_YUV420M;
input_params.nvbuf_tag = NvBufferTag_VIDEO_ENC;
printf("%s|%d, enc_index:%d, qued num:%d, num buf:%d\n", func, LINE, enc_index, m_VideoEncoder->output_plane.getNumQueuedBuffers(),
m_VideoEncoder->output_plane.getNumBuffers());
if(enc_index < V4L2_BUFFERS_NUM &&
m_VideoEncoder->output_plane.getNumQueuedBuffers() < m_VideoEncoder->output_plane.getNumBuffers())
{
enc_buf.index = enc_index;
if( -1 == NvBufferCreateEx(&enc_fd, &input_params))
{
printf("%s|%d, nvBufferCreateEx failed:enc_fd:%d\n", func, LINE, enc_fd);

        		}
        		enc_index = (++enc_index) % (V4L2_BUFFERS_NUM);
        		printf("%s|%d, create enc_fd : %d\n", __func__, __LINE__, enc_fd);
        	}
        	else
        	{
        		m_VideoEncoder->output_plane.dqBuffer(enc_buf, &enc_buffer, NULL, 10);
        		enc_fd = enc_buf.m.planes[0].m.fd;
        		
        	}

            trans_params.transform_flag = NVBUFFER_TRANSFORM_FILTER;
            trans_params.transform_filter = NvBufferTransform_Filter_Smart;
           
            if(-1 == NvBufferTransform(buffer->planes[0].fd, enc_fd, &trans_params))
            {
            	printf("%s|%d, trans from v4l2 dma fd ; %d to enc fd %d error\n", __func__ ,__LINE__, stitch_buff[enc_index].dmabuff_fd, enc_fd);
            }
            enc_buf.m.planes[0].m.fd = enc_fd;
            enc_buf.m.planes[0].bytesused = 1; // byteused must be non-zero  
            ret = m_VideoEncoder->output_plane.qBuffer(enc_buf, NULL);
            if(ret != 0){
                printf("m_VideoEncoder->output_plane qBuffer failed one \n");
            }


            
        }

#endif
}

And now the error " Output Plane:Error while Qing buffer: Device or resource busy " has disappeared. But in func encoderCapturePlaneDqCallback , I print v4l2_buf and NvBuffer *buffer, it shows byteuseds is not the way i expected.

The input byteuseds length is :

stitch_conv_capture_dqbuf_thread_callback|876, v4l2_buf->m.planes[0].bytesused: 1310720 the address of m_VideoEncoder->output_plane:0x2445908
stitch_conv_capture_dqbuf_thread_callback|877, v4l2_buf->m.planes[0].m.fd: 0 
stitch_conv_capture_dqbuf_thread_callback|878, byteuseds:1310720, fd:1142

But in enc callback:

encoderCapturePlaneDqCallback|648, byteuseds:409, fd:1128

Thanks.

Hi jiamianshiye,
The encoded frame size is different frame by frame. We have verified the output stream with 00_video_decode. Please verify the validity of encoded stream with 00_video_decode.

Hi,DaneLLL.
I test demo 00_video_decode and ,as you said the encoded frame size is different.But the encoded frame in my project is not right and i’m sure about it. In enc-capture-callback , I got byteuseds 409 just one time and next all frames byteuseds is 40.

I check API header file for function NvBufferTransform :

/**
 * This method is used to transform one DMA buffer to another DMA buffer.
 * It can support transforms for copying, scaling, fliping, rotation and cropping.
 * @param[in] src_dmabuf_fd DMABUF FD of source buffer
 * @param[in] dst_dmabuf_fd DMABUF FD of destination buffer
 * @param[in] transform_params transform parameters
 *
 * @return 0 for sucess, -1 for failure.
 */
int NvBufferTransform (int src_dmabuf_fd, int dst_dmabuf_fd, NvBufferTransformParams *transform_params);

This method used for the conversion of two DMA buffer . In my process , I need to do work [ABGR32->VIC->YUV420M]–ENC—[H264]. The parameters i set for VIC is :

stitch_conv->output_plane.setupPlane(V4L2_MEMORY_DMABUF,V4L2_BUFFERS_NUM, false, false);
stitch_conv->capture_plane.setupPlane(V4L2_MEMORY_MMAP, V4L2_BUFFERS_NUM, true, false);

It’s not dma buffer for capture plane, is it right to used NvBufferTransform here ?

Hi jiamianshiye,
Not sure but I think your pipeline is like:

Argus -> createNvBuffer(RGBA) -> image processig -> NvBufferTransform(RGBA->NV12(or I420)) -> ENC

In this case you can use NvBUffer APIs and don’t need NvVideoConverter.

Hi DaneLLL.

Yes, just as you said, I capture from four cameras and stitch to one ABGR picture. Next , converter ABGR format to NV12, and atlast encode to H264.

I remove NvVideoConverter process and do transform after image processing, but encode stream byteuseds is still not right.

I have two questions about NvBuffer. After image processing, I have raw BGR data. Now ,Should i create a NvBufferCreate(&fd…) and NvBufferMemMap(fd…start_ptr), write raw BGR data to start_ptr directly or use API Raw2NvBuffer() to copy RAW BGR data to dmabuf_fd ?
After copy data, Need to do NvBufferMemSyncForDevice() ?

Thanks.

Hi jiamianshiye,
I added the print to encoderCapturePlaneDqCallback()

if (!v4l2_buf)
    {
        thiz->abort();
        ORIGINATE_ERROR("Failed to dequeue buffer from encoder capture plane");
    }
<b>printf("encoded frame size %d \n", buffer->planes[0].bytesused);</b>
    thiz->m_outputFile->write((char *) buffer->planes[0].data,
                              buffer->planes[0].bytesused);

Verify with 10_camera_recording and 00_video_decode

nvidia@tegra-ubuntu:~/tegra_multimedia_api/samples/10_camera_recording$ export DISPLAY=:0
nvidia@tegra-ubuntu:~/tegra_multimedia_api/samples/10_camera_recording$ ./camera_recording
Set governor to performance before enabling profiler
OFParserGetVirtualDevice: virtual device driver node not found in proc device-tree
OFParserGetVirtualDevice: virtual device driver node not found in proc device-tree
LoadOverridesFile: looking for override file [/Calib/camera_override.isp] 1/16LoadOverridesFile: looking for override file [/data/nvcam/settings/camera_overrides.isp] 2/16LoadOverridesFile: looking for override file [/opt/nvidia/nvcam/settings/camera_overrides.isp] 3/16LoadOverridesFile: looking for override file [/var/nvidia/nvcam/settings/camera_overrides.isp] 4/16LoadOverridesFile: looking for override file [/data/nvcam/camera_overrides.isp] 5/16LoadOverridesFile: looking for override file [/data/nvcam/settings/e3326_front_P5V27C.isp] 6/16LoadOverridesFile: looking for override file [/opt/nvidia/nvcam/settings/e3326_front_P5V27C.isp] 7/16LoadOverridesFile: looking for override file [/var/nvidia/nvcam/settings/e3326_front_P5V27C.isp] 8/16---- imager: No override file found. ----
PRODUCER: Creating output stream
PRODUCER: Launching consumer thread
Failed to query video capabilities: Inappropriate ioctl for device
NvMMLiteOpen : Block : BlockType = 4
===== MSENC =====
NvMMLiteBlockCreate : Block : BlockType = 4
875967048
842091865
create video encoder return true
CONSUMER: Waiting until producer is connected...
PRODUCER: Starting repeat capture requests.
CONSUMER: Producer has connected; continuing.
SCF: Error InvalidState:  NonFatal ISO BW requested not set. Requested = 2147483647 Set = 4687500 (in src/services/power/PowerServiceCore.cpp, function setCameraBw(), line 653)
===== MSENC blits (mode: 1) into tiled surfaces =====
encoded frame size 22
encoded frame size 46224
encoded frame size 13279
encoded frame size 8382
encoded frame size 7996
encoded frame size 7780
encoded frame size 44112
encoded frame size 34187
encoded frame size 31455
encoded frame size 33915
encoded frame size 26718
encoded frame size 22275
encoded frame size 17839
encoded frame size 15060
encoded frame size 15889
encoded frame size 15529
encoded frame size 11973
encoded frame size 15175
encoded frame size 12474
encoded frame size 15263
encoded frame size 19665
encoded frame size 23817
encoded frame size 23171
encoded frame size 27186
encoded frame size 16445
encoded frame size 16370
encoded frame size 16344
encoded frame size 16620
encoded frame size 0
CONSUMER: Got EOS, exiting...
CONSUMER: Done.
PRODUCER: Done -- exiting.
************************************
Total Profiling Time = 0 sec
************************************
nvidia@tegra-ubuntu:~/tegra_multimedia_api/samples/10_camera_recording$ cd ../00_video_decode/
nvidia@tegra-ubuntu:~/tegra_multimedia_api/samples/00_video_decode$ ./video_decode H264 ../10_camera_recording/output.h264
Set governor to performance before enabling profiler
Failed to query video capabilities: Inappropriate ioctl for device
NvMMLiteOpen : Block : BlockType = 261
TVMR: NvMMLiteTVMRDecBlockOpen: 7647: NvMMLiteBlockOpen
NvMMLiteBlockCreate : Block : BlockType = 261
Starting decoder capture loop thread
Input file read complete
TVMR: NvMMLiteTVMRDecDoWork: 6531: NVMMLITE_TVMR: EOS detected
TVMR: cbBeginSequence: 1179: BeginSequence  640x480, bVPR = 0
TVMR: LowCorner Frequency = 0
TVMR: cbBeginSequence: 1529: DecodeBuffers = 17, pnvsi->eCodec = 4, codec = 0
TVMR: cbBeginSequence: 1600: Display Resolution : (640x480)
TVMR: cbBeginSequence: 1601: Display Aspect Ratio : (640x480)
TVMR: cbBeginSequence: 1669: ColorFormat : 5
TVMR: cbBeginSequence:1683 ColorSpace = NvColorSpace_YCbCr601
TVMR: cbBeginSequence: 1809: SurfaceLayout = 3
TVMR: cbBeginSequence: 1902: NumOfSurfaces = 24, InteraceStream = 0, InterlaceEnabled = 0, bSecure = 0, MVC = 0 Semiplanar = 1, bReinit = 1, BitDepthForSurface = 8 LumaBitDepth = 8, ChromaBitDepth = 8, ChromaFormat = 5
TVMR: cbBeginSequence: 1904: BeginSequence  ColorPrimaries = 2, TransferCharacteristics = 2, MatrixCoefficients = 2
Video Resolution: 640x480
[INFO] (NvEglRenderer.cpp:109) <renderer0> Setting Screen width 640 height 480
Query and set capture successful
TVMR: TVMRBufferProcessing: 5486: Processing of EOS
TVMR: TVMRBufferProcessing: 5563: Processing of EOS Done
Exiting decoder capture loop thread
TVMR: TVMRFrameStatusReporting: 6132: Closing TVMR Frame Status Thread -------------
TVMR: TVMRVPRFloorSizeSettingThread: 5942: Closing TVMRVPRFloorSizeSettingThread -------------
TVMR: TVMRFrameDelivery: 5982: Closing TVMR Frame Delivery Thread -------------
TVMR: NvMMLiteTVMRDecBlockClose: 7815: Done
App run was successful

We don’t support 24-byte BGR in NvBuffer. You need to make it 32-byte BGRx or RGBA.

Hi DaneLLL.
In last reply ,i said i get raw RGB data, well, i mean raw data format is ABGR32, not RGB24. sorry
It makes you misunderstand.

I have vertified demo 10_camera_recording and 00_video_decode, it’s running good as you said.

In my case, after image processing, i copy raw data to a pointer address made by NvBufferMemMap().

struct v4l2_buffer enc_buf;
    struct v4l2_plane enc_planes[MAX_PLANES];
    NvBuffer *enc_buffer;
    NvBuffer *enc_shared_buffer;

    memset(&enc_buf, 0, sizeof(enc_buf));
    memset(enc_planes, 0, sizeof(enc_planes));
    enc_buf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
    enc_buf.m.planes = enc_planes;
    enc_buf.memory = V4L2_MEMORY_DMABUF; 
    input_params.payloadType = NvBufferPayload_SurfArray;
    input_params.width = result_rec.width;
    input_params.height = result_rec.height;
    input_params.layout = NvBufferLayout_Pitch;

    stitch_trans_buff = (nv_buffer *)malloc(V4L2_BUFFERS_NUM*sizeof(nv_buffer));

    input_params.colorFormat = NvBufferColorFormat_ABGR32;
    input_params.nvbuf_tag = NvBufferTag_CAMERA;
    NvBufferParams params = {0};       
	//prepare stitching buffer
    for (unsigned int index = 0; index < V4L2_BUFFERS_NUM; index++)
    {
	int fd =0;
        
	GUARD_PROCESS_OUT_INT( NvBufferCreateEx(&fd, &input_params),"create NvBuffer");
        
	stitch_trans_buff[index].dmabuff_fd = fd;
        
        NvBufferParams params = {0};       
	GUARD_PROCESS_OUT_INT(NvBufferGetParams(fd, &params),"get NvBuffer parameters");
        stitch_buff_pitch = params.pitch[0];
       
			
	// TODO add multi-planar support Currently it supports only YUV422 interlaced single-planar
	stitch_trans_buff[index].size = params.height[0] * params.pitch[0];
        if(-1 == NvBufferMemMap(stitch_trans_buff[index].dmabuff_fd, 0, NvBufferMem_Read_Write,  (void **)&stitch_trans_buff[index].start))
        {
            printf("%s|%d, stitch mmap buffer failed!\n", __FILE__, __LINE__);
        }
    }

Function stitch_exec() convert four ABGR32 image to one ABGR32 image , and store to mem stitch_trans_buff[stitch_idx].start .

//change for enc create
    input_params.layout = NvBufferLayout_Pitch;
    input_params.colorFormat = NvBufferColorFormat_NV12;//V4L2_PIX_FMT_YUV420M;
    input_params.nvbuf_tag = NvBufferTag_VIDEO_ENC;  
    while(1)
    {
        stitch_exec(src_d,src_img_exchange_flag,(char *)<i>stitch_trans_buff[stitch_idx].start</i>, g_show_img_len ,stitch_buff_pitch,result_rec.width,result_rec.height);
    
        NvBufferMemSyncForDevice(stitch_trans_buff[stitch_idx].dmabuff_fd, 0, (void **)&stitch_trans_buff[stitch_idx].start);
        if(m_VideoEncoder->output_plane.getNumQueuedBuffers() < m_VideoEncoder->output_plane.getNumBuffers())
        {
            enc_buf.index = stitch_idx;
            if(-1 == NvBufferCreateEx(&enc_fd, &input_params))
            {
                printf("%s|%d, nvBufferCreateEx failed:enc_fd:%d\n", __func__, __LINE__, enc_fd);
                abort();
            }
            printf("%s|%d, create enc_fd : %d\n", __func__, __LINE__, enc_fd);
        }
        else
        {
            m_VideoEncoder->output_plane.dqBuffer(enc_buf, &enc_buffer, NULL, 10);
            enc_fd = enc_buf.m.planes[0].m.fd;
            printf("%s|%d, get video encoder output_plane dqBuffer enc_buf fd : %d, enc_buffer used:%d\n", __func__, __LINE__, enc_fd, enc_buffer->planes[0].bytesused);
        }

        trans_params.transform_flag = NVBUFFER_TRANSFORM_FILTER;
        trans_params.transform_filter = NvBufferTransform_Filter_Smart;
        if(-1 == NvBufferTransform(stitch_trans_buff[stitch_idx].dmabuff_fd, enc_fd, &trans_params))
        {
            printf("%s|%d, trans from stitch_buff dma_fd ; %d to enc fd %d error\n", __func__ ,__LINE__, stitch_trans_buff[stitch_idx].dmabuff_fd, enc_fd);
        }

        enc_buf.m.planes[0].m.fd = enc_fd;
        enc_buf.m.planes[0].bytesused = 1;
        ret = m_VideoEncoder->output_plane.qBuffer(enc_buf, NULL);
        if(ret != 0)
        {
            printf("m_VideoEncoder->output_plane qBuffer failed one \n");
        }
    }

Not sure usage of method is right.

Thanks.

Hi jiamianshiye,
You should call these APIs like:

if (DO_CPU_PROCESS) {
            NvBufferParams par;
            NvBufferGetParams (fd, &par);
            void *ptr_y;
            uint8_t *ptr_cur;
            int i, j, a, c;
            [b]NvBufferMemMap(fd, Y_INDEX, NvBufferMem_Write, &ptr_y);
            NvBufferMemSyncForCpu(fd, Y_INDEX, &ptr_y);[/b]
            ptr_cur = (uint8_t *)ptr_y + par.pitch[Y_INDEX]*START_POS + START_POS;

            // overwrite some pixels to put an 'N' on each Y plane
            // scan array_n to decide which pixel should be overwritten
            for (i=0; i < FONT_SIZE; i++) {
                for (j=0; j < FONT_SIZE; j++) {
                    a = i>>SHIFT_BITS;
                    c = j>>SHIFT_BITS;
                    if (array_n[a][c])
                        (*ptr_cur) = 0xff; // white color
                    ptr_cur++;
                }
                ptr_cur = (uint8_t *)ptr_y + par.pitch[Y_INDEX]*(START_POS + i)  + START_POS;
            }
            [b]NvBufferMemSyncForDevice (fd, Y_INDEX, &ptr_y);
            NvBufferMemUnMap(fd, Y_INDEX, &ptr_y);[/b]
        }

Hi DaneLLL.
I stored XRGB32 data to file stitchRGB.rgb and use demo video_convert to transform this file to YUV420M format file, and then i use mplayer to play the YUV420M file, it performs good.

./video_convert ../stereo_4689_2688_1520/stitchRGB.rgb 3200 1280 XRGB32 6.yuv 3200 1280 YUV420M -im 4
mplayer -demuxer rawvideo -rawvideo w=3200:h=1280:format=i420 6.yuv -loop 0

Then i put XRGB32 data to my project, i copy XRGB32 raw data to NvVideoConverter in output_plane_callback(). Then i get data from capture_dqbuf_callback() . But log show that data length is not right. I set resolution to 3200x1280 for output_plane and caputre_plane, data format is from XRGB32 to YUV420M, if everything is good , i can get data in caputre_plane . But in fact , the log shows y/u/v/ planes were all not match my resolution. I calc data bytesused should be 320012803/2 = 6144000, not 4325376+1179648*2 . This data makes no sense. And cannot played by mplayer.

INFO: stitch_conv_capture_dqbuf_thread_callback(): (line:876) stitch capture buffer fd : 1130, bytes:4325376

INFO: stitch_conv_capture_dqbuf_thread_callback(): (line:892) pitch:3328, width : 3200, height:1280, data:0x7f680ad000, bytes:4325376

INFO: stitch_conv_capture_dqbuf_thread_callback(): (line:893) pitch:1792, width : 1600, height:640, data:0x7f6334b000, bytes:1179648

INFO: stitch_conv_capture_dqbuf_thread_callback(): (line:894) pitch:1792, width : 1600, height:640, data:0x7f6322b000, bytes:1179648

INFO: stitch_conv_capture_dqbuf_thread_callback(): (line:896) v4l2 index:1, bytes:4325376, fd:0

Thansk.

mplayer-stitch-yuv-3200x1280.jpg

My fault, This piece quote should be ignored. I have vertify capture_plane data is right. sorry.