I am trying to access Nvbuffer memory and convert it to opencv mat.
I am able to do the conversion but in the process of doing show the image is getting converted into a grey scale one with messed up dimensions. But if I do a ofstream file write with m_JpegEncoder I am getting proper image with right dimensions.
Here is piece of code:
while (true)
{
for (uint32_t i = 0; i < m_streams.size(); i++)
{
// Acquire a frame.
UniqueObj<Frame> frame(iFrameConsumers[i]->acquireFrame());
IFrame *iFrame = interface_cast<IFrame>(frame);
if (!iFrame)
break;
// Get the IImageNativeBuffer extension interface.
NV::IImageNativeBuffer *iNativeBuffer =
interface_cast<NV::IImageNativeBuffer>(iFrame->getImage());
if (!iNativeBuffer)
ORIGINATE_ERROR("IImageNativeBuffer not supported by Image.");
// If we don't already have a buffer, create one from this image.
// Otherwise, just blit to our buffer.
if (!m_dmabufs[i])
{
m_dmabufs[i] = iNativeBuffer->createNvBuffer(iEglOutputStreams[i]->getResolution(),
NvBufferColorFormat_YUV420,
NvBufferLayout_Pitch);
if (!m_dmabufs[i])
CONSUMER_PRINT("\tFailed to create NvBuffer\n");
}
else if (iNativeBuffer->copyToNvBuffer(m_dmabufs[i]) != STATUS_OK)
{
ORIGINATE_ERROR("Failed to copy frame to NvBuffer.");
}
}
//
if (m_streams.size() > 1)
{
// Composite multiple input to one frame
NvBufferComposite(m_dmabufs, m_compositedFrame, &m_compositeParam);
// [b]THIS WORKS BUT I AM GETTING A IMAGE THAT IS IN GRAYSCALE
// WITH DIMENSION MESSED UP[/b]
NvBufferParams params;
NvBufferGetParams(m_compositedFrame, ¶ms);
void *ptr_y;
uint8_t *ptr_cur;
int i, j, a, b;
NvBufferMemMap(m_compositedFrame, Y_INDEX, NvBufferMem_Write, &ptr_y);
NvBufferMemSyncForCpu(m_compositedFrame, Y_INDEX, &ptr_y);
ptr_cur = (uint8_t *)ptr_y + params.pitch[Y_INDEX]*START_POS + START_POS;
char *data_mem = (char*)ptr_cur;
cv::Mat imgbuf = cv::Mat(1920, 3840, CV_8UC3, data_mem, 3840);
std::cout<<"Img Buffer\n"<<imgbuf.dims<<"\n";
cv::imshow("img", imgbuf);
// <b>THIS WRITES IMAGE PERFECTLY</b>
unsigned char *buffer;
char* data;
std::ofstream *outputFile = new std::ofstream(filename.c_str());
struct Result cinfo;
unsigned long size = 1920*1920*2;
if (outputFile)
{
buffer = m_OutputBuffer;
cinfo = m_JpegEncoder->encodeFromFd(m_compositedFrame, JCS_YCbCr, &buffer, size);
outputFile->write((char *)cinfo.buf, cinfo.size);
std::string str(cinfo.buf, cinfo.buf + cinfo.size);
delete outputFile;
}
}
I have also tried mmap approch that did not worked either.
I know about NvEglRenderer and it is working perfectly, but I need the buffer in an opencv mat
This is mmap approch but yeilds the same GRAYSCALE IMAGE with dimension messed up
// NvBufferParams params0;
// NvBufferGetParams(m_compositedFrame, ¶ms0);
// int fsize0 = params0.pitch[0] * 1920;
// char *data_mem0 = (char*)mmap(0, 1920*1920*2, PROT_WRITE, MAP_SHARED, m_compositedFrame, params0.offset[0]);
// if (data_mem0 == MAP_FAILED)
// printf("mmap failed : %s\n", strerror(errno));
// cv::Mat imgbuf0 = cv::Mat(1920, 1920*2, CV_8UC3, data_mem0, 1920*2);
// cv::imshow("img0", imgbuf0);
I have also tried using converting char* buffer into a std::vector and then use that with cv::imdecode to get a opencv Mat but that did not worked either and I get a opencv expection.
If there is any way to do this please let me know.