Hi Allen,
Here is a suggestion to your case:
// get RGBA buffer via createNvBuffer()
iNativeBuffer->createNvBuffer(RGBA)
// convert to NV12 via NVBufferTransform()
NvBufferTransform(RGBA->NV12)
Here is the patch to 10_camera_recording
diff --git a/multimedia_api/ll_samples/samples/10_camera_recording/main.cpp b/multimedia_api/ll_samples/samples/10_camera_recording/main.cpp
index 6f531b8..a57f193 100644
--- a/multimedia_api/ll_samples/samples/10_camera_recording/main.cpp
+++ b/multimedia_api/ll_samples/samples/10_camera_recording/main.cpp
@@ -218,7 +218,7 @@ bool ConsumerThread::threadExecute()
while (!m_gotError)
{
NvBuffer *buffer;
- int fd = -1;
+ int fd = -1, fd1 = -1;
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
@@ -266,11 +266,26 @@ bool ConsumerThread::threadExecute()
interface_cast<NV::IImageNativeBuffer>(iFrame->getImage());
if (!iNativeBuffer)
ORIGINATE_ERROR("IImageNativeBuffer not supported by Image.");
- fd = iNativeBuffer->createNvBuffer(STREAM_SIZE,
- NvBufferColorFormat_YUV420,
+ fd1 = iNativeBuffer->createNvBuffer(STREAM_SIZE,
+ NvBufferColorFormat_ABGR32,
(DO_CPU_PROCESS)?NvBufferLayout_Pitch:NvBufferLayout_BlockLinear);
if (VERBOSE_ENABLE)
- CONSUMER_PRINT("Acquired Frame. %d\n", fd);
+ CONSUMER_PRINT("Acquired Frame. %d\n", fd1);
+
+ NvBufferCreateParams input_params = {0};
+ input_params.payloadType = NvBufferPayload_SurfArray;
+ input_params.width = STREAM_SIZE.width();
+ input_params.height = STREAM_SIZE.height();
+ input_params.layout = NvBufferLayout_Pitch;
+ input_params.colorFormat = NvBufferColorFormat_NV12;
+ input_params.nvbuf_tag = NvBufferTag_CAMERA;
+ NvBufferCreateEx(&fd, &input_params);
+
+ NvBufferTransformParams transParams = {0};
+ transParams.transform_flag = NVBUFFER_TRANSFORM_FILTER;
+ transParams.transform_filter = NvBufferTransform_Filter_Smart;
+ NvBufferTransform(fd1, fd, &transParams);
+ NvBufferDestroy(fd1);
if (DO_CPU_PROCESS) {
NvBufferParams par;
We have verified it with
$ ./camera_recording -d 5 -c