Tx2nx vi to drm color has changed

I capture the 4k video on mipi0-3 rgb and send it to the drm.
I find that the color is changed.
Here is my test code

bool init_camera()
    struct v4l2_format fmt;

    /* Open camera device */
    cam_fd = open("/dev/video0", O_RDWR);
    if (cam_fd == -1)
        ERROR_RETURN("Failed to open camera device: %s (%d)", strerror(errno), errno);

    /* Set camera output format */
    memset(&fmt, 0, sizeof(fmt));
    fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    fmt.fmt.pix.width = width;
    fmt.fmt.pix.height = height;
    fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB32;
    fmt.fmt.pix.field = V4L2_FIELD_ANY;
    if (ioctl(cam_fd, VIDIOC_S_FMT, &fmt) < 0)
        ERROR_RETURN("Failed to set camera output format: %s (%d)", strerror(errno), errno);
    /* Get the real format in case the desired is not supported */
    memset(&fmt, 0, sizeof fmt);
    fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if (ioctl(cam_fd, VIDIOC_G_FMT, &fmt) < 0)
        ERROR_RETURN("Failed to get camera output format: %s (%d)", strerror(errno), errno);

    struct v4l2_streamparm streamparm;
    memset(&streamparm, 0x00, sizeof(struct v4l2_streamparm));
    streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    ioctl(cam_fd, VIDIOC_G_PARM, &streamparm);

    return true;

static bool request_camera_buff(int buffers_num)
    struct v4l2_requestbuffers rb;
    memset(&rb, 0, sizeof(rb));
    rb.count = buffers_num;
    rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    rb.memory = V4L2_MEMORY_DMABUF;
    if (ioctl(cam_fd, VIDIOC_REQBUFS, &rb) < 0)
        ERROR_RETURN("Failed to request v4l2 buffers: %s (%d)", strerror(errno), errno);
    if (rb.count != buffers_num)
        ERROR_RETURN("V4l2 buffer number is not as desired");

    for (unsigned int index = 0; index < buffers_num; index++)
        struct v4l2_buffer buf;
        memset(&buf, 0, sizeof buf);
        buf.index = index;
        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        buf.memory = V4L2_MEMORY_DMABUF;
        if (ioctl(cam_fd, VIDIOC_QUERYBUF, &buf) < 0)
            ERROR_RETURN("Failed to query buff: %s (%d)", strerror(errno), errno);

        /* TODO: add support for multi-planer
           Enqueue empty v4l2 buff into camera capture plane */
        buf.m.fd = cam_capture_dma_fd[index];
        if (ioctl(cam_fd, VIDIOC_QBUF, &buf) < 0)
            ERROR_RETURN("Failed to enqueue buffers: %s (%d)\n", strerror(errno), errno);

    return true;

static bool start_stream()
    enum v4l2_buf_type type;

    /* Start v4l2 streaming */
    if (ioctl(cam_fd, VIDIOC_STREAMON, &type) < 0)
        ERROR_RETURN("Failed to start streaming: %s (%d)", strerror(errno), errno);


    printf("Camera video streaming on ...\n");
    return true;

bool start_camera()
    NvBufferCreateParams input_params = {0};

    input_params.payloadType = NvBufferPayload_SurfArray;
    input_params.width = width;
    input_params.height = height;
    input_params.layout = NvBufferLayout_Pitch;
    input_params.colorFormat = NvBufferColorFormat_ARGB32;
    input_params.nvbuf_tag = NvBufferTag_CAMERA;

    /* Create buffer and provide it with camera */
    for (unsigned int index = 0; index < CAMERA_BUFFERS_NUM; index++)
        int fd;
        NvBufferParams params = {0};

        if (-1 == NvBufferCreateEx(&fd, &input_params))
            ERROR_RETURN("Failed to create NvBuffer");

        cam_capture_dma_fd[index] = fd;

        if (-1 == NvBufferGetParams(fd, &params))
            ERROR_RETURN("Failed to get NvBuffer parameters");
    if (!request_camera_buff(CAMERA_BUFFERS_NUM))
        ERROR_RETURN("Failed to set up camera buff");
    return true;

bool  init_drm()
    int ret = 0;
    int error = 0;
    uint32_t i;
    bool eos = false;
    struct drm_tegra_hdr_metadata_smpte_2086 metadata;

    drm_renderer = NvDrmRenderer::createDrmRenderer("renderer0",
            width, height, 0, 0, 0, 0, metadata, false);


    NvBufferCreateParams input_params = {0};
    input_params.payloadType = NvBufferPayload_SurfArray;
    input_params.width = width;
    input_params.height = height;
    input_params.layout = NvBufferLayout_Pitch;
    input_params.colorFormat = NvBufferColorFormat_NV12;
    input_params.nvbuf_tag = NvBufferTag_VIDEO_DEC;
    for (size_t i = 0; i < DRM_BUF_SIZE; i++)
        NvBufferCreateEx(&drm_buf_fd[i], &input_params);

    return true;

int main(int argc, char const *argv[])
    int ret = -1;

    if (!init_drm())
        LOG( "Could not init decoder");

    if (!init_camera())
        LOG( "Could not init camera");

    if (!start_camera())
        LOG( "Could not start camera");

    NvBufferTransformParams transParams = {0};
    transParams.transform_filter = NvBufferTransform_Filter_Smart;
    static int capture_cnt = 0;
    /* Decoder capture loop */
    while (1)
        fd_set fds;
		struct timeval tv;
        FD_ZERO (&fds);
		FD_SET(cam_fd, &fds);
		tv.tv_sec = 1;
		tv.tv_usec = 0;
        ret = select(cam_fd + 1, &fds, nullptr, nullptr, &tv);
        if (ret < 0)
        else if (ret==0)
            LOG("time out");

            struct v4l2_buffer v4l2_buf_cam;
            /* Dequeue a camera buff */
            memset(&v4l2_buf_cam, 0, sizeof(v4l2_buf_cam));
            v4l2_buf_cam.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
            v4l2_buf_cam.memory = V4L2_MEMORY_DMABUF;
            if (ioctl(cam_fd, VIDIOC_DQBUF, &v4l2_buf_cam) < 0)
                ERROR_RETURN("Failed to dequeue camera buff: %s (%d)", strerror(errno), errno);

            struct v4l2_buffer v4l2_buf;
            struct v4l2_plane planes[MAX_PLANES];

            NvBufferRect src_rect, dest_rect;
            src_rect.top = 0;
            src_rect.left = 0;
            src_rect.width = 3840;
            src_rect.height = 2160;
            dest_rect.top = 0;
            dest_rect.left = 0;
            dest_rect.width = 3840;
            dest_rect.height = 2160;

            NvBufferTransformParams transform_params;
            memset(&transform_params, 0, sizeof(transform_params));
            /* Indicates which of the transform parameters are valid. */
            transform_params.transform_flag = NVBUFFER_TRANSFORM_FILTER;
            transform_params.transform_flip = NvBufferTransform_None;
            transform_params.transform_filter = NvBufferTransform_Filter_Nearest;
            transform_params.src_rect = src_rect;
            transform_params.dst_rect = dest_rect;
            int fd = -1;
            if (capture_cnt < DRM_BUF_SIZE)
                fd = drm_buf_fd[capture_cnt];
                fd = drm_renderer->dequeBuffer();
            ret = NvBufferTransform(cam_capture_dma_fd[capture_cnt % CAMERA_BUFFERS_NUM], fd, &transform_params);

            if (capture_cnt%60==0)
                LOG("%d %d\n",capture_cnt);

            if (ioctl(cam_fd, VIDIOC_QBUF, &v4l2_buf_cam))
                ERROR_RETURN("Failed to queue camera buffers: %s (%d)", strerror(errno), errno);
    return 0;

I capture a picture from the command v4l2-ctl -d /dev/video0 --set-fmt-video=width=3840,height=2160,pixelformat=RGB32 --set-ctrl bypass_mode=0 --stream-mmap --stream-count=1 --stream-to=test.rgb
The picture is fine.
test.rgb (31.6 MB)
You can play it and you’d better play it without scaling that means pixel is 3840*2160

./ffplay -video_size 3840x2160 -pixel_format rgb32 test.rgb

If you are talking about the picture color is a little different from what you see on the monitor, then it is expected.

There is no guarantee that the the output color range is same as the original one. We have limited RGB and also full range RGB on jetson display.

Also, some conversion is done in the display hardware when the buffer is rendered.

No,it’s not the problem of rang of RGB.
I have do a test, I draw a vertical red line on the white background. I move it by one pixel and one pixel. I and see the red line will change the color (red,black,red,blcak…). But actually, the line’s color is not changed.
Here is the the experiment video.

Please clarify whether this issue is happened with display pipeline or due to the camera pipeline you are using.

NvBufferColorFormat_ARGB32 is B 8-bit G 8-bit R 8-bit A 8-bit. Please check if your source data is in the order.

If your source is in R 8-bit G 8-bit B 8-bit A 8-bit, please allocate NvBuffer in NvBufferColorFormat_ABGR32 and try again.

Hello, if I capture standard MIPI RGB888, There is no NvBufferColorFormat_RGB24 or RGB888
Here is my information

Video input : 0 (Camera 0: no power)
Format Video Capture:
	Width/Height      : 3840/2160
	Pixel Format      : 'AR24'
	Field             : None
	Bytes per Line    : 15360
	Size Image        : 33177600
	Colorspace        : sRGB
	Transfer Function : Default (maps to sRGB)
	YCbCr/HSV Encoding: Default (maps to ITU-R 601)
	Quantization      : Default (maps to Full Range)

You can check the website and you will know what does AR24 mean.


Could you dump your frame with AR24 format and attach it here?

I have uploaded the test.rgb on top
You can play.
/ffplay -video_size 3840x2160 -pixel_format rgb32 test.rgb

test.rgb looks to be B 8-bit G 8-bit R 8-bit A 8-bit. Could you call save_frame_to_file() in 12_camera_v4l2_cuda and check if you can get correct data in B 8-bit G 8-bit R 8-bit A 8-bit? By default NvBufferColorFormat_ARGB32 is not supported in the sample, so please customize the sample and check if captured data fits the result of executing v4l2-ctl command.