I’d like to convert the image to an OpenCV cv::Mat object (BGR color space), but haven’t found the correct method for doing so. The following code snippet produces cv::Mat objects that, when presented by cv::imshow(), are fairly “scrambled”:
if (iImage->getBufferCount() == 2) {
// Convert the YUV image to the RGB color space. Note that OpenCV Matrix objects store the color
// channels in the order of blue, green, and red (hence the "bgr" naming convention).
Argus::Size2D<uint32_t> ySize = iImage2D->getSize(Y);
Argus::Size2D<uint32_t> uvSize = iImage2D->getSize(UV);
cv::Mat y(ySize.height(), ySize.width(), CV_8UC1, const_cast<void*>(iImage->mapBuffer(Y)), iImage2D->getStride(Y));
cv::Mat uv(uvSize.height(), uvSize.width(), CV_8UC2, const_cast<void*>(iImage->mapBuffer(UV)), iImage2D->getStride(UV));
cv::UMat yUMat = y.getUMat(cv::ACCESS_READ);
cv::UMat uvUMat = uv.getUMat(cv::ACCESS_READ);
cv::cvtColorTwoPlane(yUMat, uvUMat, bgr, cv::COLOR_YUV2BGR_NV12);
I’m using the original yuvJpeg sample code on the producer thread for defining the pixel format:
bool ConsumerThread::threadExecute() {
const unsigned int plane = 0;
const std::string capturedVideoFrames = "Captured video frames";
const auto *iStream = Argus::interface_cast<Argus::IEGLOutputStream>(m_stream);
const auto *iEglOutputStream = Argus::interface_cast<Argus::IEGLOutputStream>(m_stream);
const Argus::Size2D streamResolution = iEglOutputStream->getResolution();
// Wait until the producer has connected to the stream.
threads::enqueueLoggerMessage(threadName, "Waiting until producer is connected...");
if (iStream->waitUntilConnected() != Argus::STATUS_OK)
ORIGINATE_ERROR("Stream failed to connect.");
threads::enqueueLoggerMessage(threadName, "Producer has connected; continuing.");
// Continue until this thread is terminated.
threads::addMetric(capturedVideoFrames);
auto *iFrameConsumer = Argus::interface_cast<EGLStream::IFrameConsumer>(m_consumer);
cv::Mat bgr;
while (true) {
// The absence of a frame indicates this thread has been terminated.
Argus::UniqueObj<EGLStream::Frame> frame(iFrameConsumer->acquireFrame());
if (!frame)
break;
// Keep track of how many video frames per second we are consuming.
threads::incrementMetric(capturedVideoFrames);
// Create an IFrame interface to gain access to the Image in the Frame.
auto *iFrame = Argus::interface_cast<EGLStream::IFrame>(frame);
if (!iFrame) {
ORIGINATE_ERROR("Failed to get IFrame interface.");
}
// Copy the image into an OpenCV Mat object. Note that OpenCV Matrix objects store the color
// channels in the order of blue, green, and red (hence the "bgr" naming convention).
auto *iNativeBuffer = Argus::interface_cast<EGLStream::NV::IImageNativeBuffer>(iFrame->getImage());
if (!iNativeBuffer) {
ORIGINATE_ERROR("IImageNativeBuffer not supported by Image.");
}
if (m_dmabuf == -1) {
m_dmabuf = iNativeBuffer->createNvBuffer(
streamResolution,
NvBufferColorFormat_ABGR32,
NvBufferLayout_Pitch);
if (m_dmabuf == -1) {
ORIGINATE_ERROR("\tFailed to create NvBuffer\n");
}
}
if (iNativeBuffer->copyToNvBuffer(m_dmabuf) != Argus::STATUS_OK) {
ORIGINATE_ERROR("Failed to copy frame to NvBuffer.");
}
void *pdata = nullptr;
NvBufferParams params;
NvBufferGetParams(m_dmabuf, ¶ms);
NvBufferMemMap(m_dmabuf, plane, NvBufferMem_Read, &pdata);
NvBufferMemSyncForCpu(m_dmabuf, plane, &pdata);
cv::Mat imgbuf = cv::Mat(streamResolution.height(), streamResolution.width(), CV_8UC4, pdata, params.pitch[0]);
cv::cvtColor(imgbuf, bgr, cv::COLOR_RGBA2BGR);
// Notify all listeners that a video frame is available.
for (auto & listener : ConsumerThread::listeners) {
cv::Mat clonedImage = bgr.clone();
listener(clonedImage);
}
}
threads::enqueueLoggerMessage(threadName, "Done.");
PROPAGATE_ERROR(requestShutdown());
return true;
}
Many thanks for the tips identified in this thread.