Dose the CCM preserve white tones?

Hallo,

I have a question about the behavior of the CCM:
When I use a ccm of [0,0,0; 0,1,0; 0,0,1] I get a picture which looks like the red components are suppressed (as expected), but only in colored regions. In white or gray regions the colors is preserved (not as expected).

My question is now, why is the white still white after i have “removed” the red channel? Is the supplied CCM modified to perverse whites?

How do you apply the CCM? from argus API?

Yes I have used the libargus api to set the ccm.

JetPack version 4.6.2
L4T 32.7.2

Looks like misunderstand the Color Correction Matrix to Color Convert Matrix.
This function is color correction instead of color convert.

Than how will this matrix be applied?

My current understanding is: RGB_new = RGB_old * CCM and the RGB_new will be afterwards converted to YUV.
Is this correct?

https://www.imatest.com/docs/colormatrix/

So my basic understanding is correct.

However there are still some edge cases here I don’t understand what the ccm dose:

For example if I use a CCM of :
0 0 0
0 1 0
0 0 1

I get the following picture, where the white patches are still white (Witch shouldn’t be possible if the red channel is zero):

The generated picture also dosn’t really change if I take out some blue:
CCM:
0 0 0
0 0.9 0
0 0 0.1

However if I reduce green a little bit more I suddenly get a mostly black image:
CCM:
0 0 0
0 0.8 0
0 0 0.1

And lastly if i set the green value in between I get some kind of checkerboard pattern in my image:
CCM:
0 0 0
0 0.83287 0
0 0 0.1


(You my have tho download the image in open it in an app like gimp to see it)

I also use a fixed white balance, fixed exposure times and gains and a gamma of 2.4 for these images.

These images sum up the part oft the behavior of the CCM which I don’t understand.

Is there any information on this behavior?

We’re checking with intenral team to see if any suggestions.

@rbayr
Could you share the binary or source code for debugging.

Thanks

Here is a binary and the code to regenerate the described behavior with my camera:

CCMTest (435.3 KB)

#include <stdio.h>
#include <fstream>

#include <Argus/Argus.h>
#include <EGLStream/EGLStream.h>
#include <EGLStream/NV/ImageNativeBuffer.h>
#include <NvJpegEncoder.h>

using namespace Argus;
using namespace EGLStream;

// --------------- Config: ---------------

BayerTuple<float> AWBGains( 2.13667f, 1.0f, 1.00099f, 1.59856f);
BayerTuple<float> OpticalBlack( 0.054814453125f, 0.059814453125f, 0.058837890625f, 0.053837890625f);

uint64_t ExposureTime = 13672861;
Range<float> GainRange(0.0f, 1.0f);


// ---------------------------------------
#define LOG_ERROR(_file, _func, _line, _str, ...) \
    do { \
        fprintf(stderr, "Error generated. %s, %s:%d ", _file, _func, _line); \
        fprintf(stderr, _str, ##__VA_ARGS__); \
        fprintf(stderr, "\n"); \
    } \
    while (0)

#define ORIGINATE_ERROR(_str, ...) \
    do { \
        LOG_ERROR(__FILE__, __FUNCTION__, __LINE__, (_str), ##__VA_ARGS__); \
        return false; \
    } while (0)

const char* getArgusStatusMessage(Argus::Status status){
	switch (status) {
		case STATUS_OK:
			return "OK";
			break;
		case STATUS_INVALID_PARAMS:
				return "Invalid Params";
				break;
		case STATUS_INVALID_SETTINGS:
				return "Invalid Setting";
				break;
		case STATUS_UNAVAILABLE:
				return "Unavailable";
				break;
		case STATUS_OUT_OF_MEMORY:
				return "Out of memory";
				break;
		case STATUS_UNIMPLEMENTED:
				return "Unimplemented";
				break;
		case STATUS_TIMEOUT:
				return "Timeout";
				break;
		case STATUS_CANCELLED:
					return "Cancelled";
					break;
		case STATUS_DISCONNECTED:
					return "Disconnected";
					break;
		case STATUS_END_OF_STREAM:
					return "End of stream";
					break;
		default:
			return "Unknown";
			break;
	}
}

bool captuereFrame(ICameraProvider* iCameraProvider, CameraDevice* cam,  std::string filename, std::vector<float> & ccm){
	Argus::Status a_status;

	UniqueObj<CaptureSession> captureSession;
	ICaptureSession* iCaptureSession;
	UniqueObj<Request> request;
	IRequest* iRequest;

	//---------- Create Capture Session ----------//
	std::cout << "Create Capture Session ..." << std::endl;

	captureSession.reset(iCameraProvider->createCaptureSession(cam, &a_status));
	if(a_status != STATUS_OK) ORIGINATE_ERROR("Can not create capture session: %s", getArgusStatusMessage(a_status));

	iCaptureSession = interface_cast<ICaptureSession>(captureSession);

	//---------- Create Capture Request ----------//
	std::cout << "Create Capture Request ..." << std::endl;

	request.reset(iCaptureSession->createRequest(CAPTURE_INTENT_MANUAL, &a_status));
	if(a_status != STATUS_OK) ORIGINATE_ERROR("Can not create capture request: %s", getArgusStatusMessage(a_status));

	iRequest = interface_cast<IRequest>(request);


	ISourceSettings* iSourceSettings(interface_cast<ISourceSettings>(iRequest->getSourceSettings()));

	ISensorMode* iSensorMode(interface_cast<ISensorMode>(iSourceSettings->getSensorMode()));
	Size2D<uint32_t> cameraResolution = iSensorMode->getResolution();

	std::cout << "Create Stream Settings ..." << std::endl;

	UniqueObj<OutputStreamSettings> streamSettings;
	streamSettings.reset(iCaptureSession->createOutputStreamSettings(STREAM_TYPE_EGL, &a_status));
	if(a_status != STATUS_OK){
		ORIGINATE_ERROR("Can not create egl output stream settings: %s", getArgusStatusMessage(a_status));
	}

	IOutputStreamSettings* iStreamSettings(interface_cast<IOutputStreamSettings>(streamSettings));
	iStreamSettings->setCameraDevice(cam);

	IEGLOutputStreamSettings* iEGLOutputStreamSettings(interface_cast<IEGLOutputStreamSettings>(streamSettings));
	iEGLOutputStreamSettings->setEGLDisplay(eglGetDisplay(EGL_DEFAULT_DISPLAY));
	iEGLOutputStreamSettings->setMode(EGL_STREAM_MODE_MAILBOX);
	iEGLOutputStreamSettings->setPixelFormat(PIXEL_FMT_YCbCr_420_888);
	iEGLOutputStreamSettings->setResolution(cameraResolution);

	UniqueObj<OutputStream> outputstream;
	std::cout << "Create Output Stream ..." << std::endl;

	outputstream.reset(iCaptureSession->createOutputStream(streamSettings.get(), &a_status));
	if(a_status != STATUS_OK){
		ORIGINATE_ERROR("Can not create buffer output stream: %s", getArgusStatusMessage(a_status));
	}

	//---------- Apply Settings ----------//

	IAutoControlSettings* iAutoControlSettings(interface_cast<IAutoControlSettings>(iRequest->getAutoControlSettings()));

	iAutoControlSettings->setAwbMode(AWB_MODE_MANUAL);
	iAutoControlSettings->setWbGains(AWBGains);
	iAutoControlSettings->setIspDigitalGainRange(Range<float>(1.0f, 1.0f));

	iAutoControlSettings->setColorCorrectionMatrixEnable(true);
	iAutoControlSettings->setColorCorrectionMatrix(ccm);

	iSourceSettings->setOpticalBlackEnable(true);
	iSourceSettings->setOpticalBlack(OpticalBlack);

	iSourceSettings->setExposureTimeRange(Range<uint64_t>(ExposureTime, ExposureTime));
	iSourceSettings->setGainRange(GainRange);



	//---------- Enable Outputstream ----------//
	std::cout << "Enable Output Stream ..." << std::endl;

	a_status = iRequest->enableOutputStream(outputstream.get());
	if (a_status != STATUS_OK){
		ORIGINATE_ERROR("Failed to enable output stream; %s", getArgusStatusMessage(a_status));
	}

	//---------- Create JPEG Encoder ----------//
	std::cout << "Create JEPG Encoder ..." << std::endl;

	uint32_t buffer_size = cameraResolution.area();

	unsigned char* m_OutputBuffer(new unsigned char[buffer_size]);
	if (!m_OutputBuffer) ORIGINATE_ERROR("Failed to create output buffer for JPEG encoder.");

	NvJPEGEncoder* m_JpegEncoder(NvJPEGEncoder::createJPEGEncoder("jpegenc0"));
	if (!m_JpegEncoder) ORIGINATE_ERROR("Failed to create JPEGEncoder.");


	//---------- Capture one frame ----------//


	int dma_fd = -1;

	std::cout << "Create Frame  Consumer ..." << std::endl;
	UniqueObj<FrameConsumer> frameConsumer = UniqueObj<FrameConsumer>(FrameConsumer::create(outputstream.get()));
	IFrameConsumer* iFrameConsumer(interface_cast<IFrameConsumer>(frameConsumer.get()));

	std::cout << "Start Capture ..." << std::endl;
	iCaptureSession->capture(request.get(), TIMEOUT_INFINITE);

	std::cout << "Acquire Frame ..." << std::endl;

	UniqueObj<Frame> frame(iFrameConsumer->acquireFrame(TIMEOUT_INFINITE, &a_status));
	IFrame* iFrame(interface_cast<IFrame>(frame));

	if(a_status == STATUS_TIMEOUT){
		ORIGINATE_ERROR("Timeout Buffer\n");
		return true;
	}
	if(a_status != STATUS_OK)
	{
		ORIGINATE_ERROR("Error at Buffer request, Status: %s.\n", getArgusStatusMessage(a_status));
	}

	NV::IImageNativeBuffer *iNativeBuffer = interface_cast<NV::IImageNativeBuffer>(iFrame->getImage());
	if (dma_fd == -1)
	{
		dma_fd = iNativeBuffer->createNvBuffer(cameraResolution,
												 NvBufferColorFormat_NV12,
												 NvBufferLayout_BlockLinear,
												 NV::ROTATION_0);

		if (dma_fd == -1){
			ORIGINATE_ERROR("\tFailed to create NvBuffer\n");
		}
	}
	else if (iNativeBuffer->copyToNvBuffer(dma_fd, NV::ROTATION_0) != STATUS_OK)
	{
		ORIGINATE_ERROR("Failed to copy frame to NvBuffer.");
	}

	std::cout << "Save JPEG ..." << std::endl;
	std::ofstream *outputFile = new std::ofstream(filename);

	if (outputFile)
	{
		unsigned long size = buffer_size;
		unsigned char *buffer = m_OutputBuffer;
		m_JpegEncoder->encodeFromFd(dma_fd, JCS_YCbCr, &buffer, size, 75);
		outputFile->write((char *)buffer, size);
		delete outputFile;

		std::cout << "Saved Capture to: " << filename << std::endl;
	}

	std::cout << "Cleanup ..." << std::endl;
	delete m_JpegEncoder;
	delete m_OutputBuffer;
	frameConsumer.reset();
	outputstream.reset();
	streamSettings.reset();
	request.reset();
	captureSession.reset();
}

int main(int argc, char *argv[])
{

	Argus::Status a_status;

	std::cout << "Create Camera Provider ..." << std::endl;
	UniqueObj<CameraProvider> cameraProvider;

	cameraProvider.reset(CameraProvider::create(&a_status));
	if(a_status != STATUS_OK){
		ORIGINATE_ERROR("Can not create camera Provider");
	}

	std::vector<CameraDevice*> cameraDevices;
	ICameraProvider *iCameraProvider(interface_cast<ICameraProvider>(cameraProvider));
	iCameraProvider->getCameraDevices(&cameraDevices);
	if(cameraDevices.size() <= 0){
		ORIGINATE_ERROR("No Camera found");
	}

	std::vector<float> ccm1({1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0});
	captuereFrame(iCameraProvider, cameraDevices[0], "test_100_010_001.jpg", ccm1);

	std::vector<float> ccm2({0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0});
	captuereFrame(iCameraProvider, cameraDevices[0], "test_000_010_001.jpg", ccm2);

	std::vector<float> ccm3({0.0, 0.0, 0.0, 0.0, 0.7, 0.0, 0.0, 0.0, 1.0});
	captuereFrame(iCameraProvider, cameraDevices[0], "test_000_0070_001.jpg", ccm3);

	std::vector<float> ccm4({0.0, 0.0, 0.0, 0.0, 0.6, 0.0, 0.0, 0.0, 1.0});
	captuereFrame(iCameraProvider, cameraDevices[0], "test_000_0060_001.jpg", ccm4);

	std::vector<float> ccm5({0.0, 0.0, 0.0, 0.0, 0.65809, 0.0, 0.0, 0.0, 1.0});
	captuereFrame(iCameraProvider, cameraDevices[0], "test_000_00658090_001.jpg", ccm5);


	cameraProvider.reset();


	 return EXIT_SUCCESS;
}

May I know the version.

cat /etc/nv_tegra_release

R32 (release), REVISION: 7.3, GCID: 31982016, BOARD: t186ref, EABI: aarch64, DATE: Tue Nov 22 17:32:54 UTC 2022

Jetpack version is: 4.6.2