How to save image to file ?

Hi,
I have a image data in GPU memory. it is a EGLframe format, and I want to encode to file and compress quickly and read it to GPU or CPU quickly. If there is a better way than openCV imwrite and imread ? I have seen the demo in /tegramultimedia_api/sample/jpeg_encode . But it seems is a v4l2 format, how can I construct it ?

Thanks.

Hi ClancyLian,
The jpeg encoder supports YUV420 and NV12. If your format is different, it cannot help the case.

Hi,

I can convert the image data to YUV420. but I don’t know how to save it to file quickly and read it quickly.

Thanks.

Hi ClancyLian,
A possible solution for your case is to allocate NvBuffer:

/**
 * Use this method to allocate HW buffer.
 * @param[out] dmabuf_fd Returns `dmabuf_fd` of hardware buffer.
 * @param[in] width Hardware buffer width, in bytes.
 * @param[in] height Hardware buffer height, in bytes.
 * @param[in] layout Layout of buffer.
 * @param[in] colorFormat The `colorFormat` of buffer.
 *
 * @returns 0 for success, -1 for failure
 */
int NvBufferCreate (int *dmabuf_fd, int width, int height,
    NvBufferLayout layout, NvBufferColorFormat colorFormat);

and get eglimage via

/**
* This method must be used for getting `EGLImage` from `dmabuf-fd`.
*
* @param[in] display `EGLDisplay` object used during the creation of `EGLImage`.
* @param[in] dmabuf_fd `DMABUF FD` of buffer from which `EGLImage` to be created.
*
* @returns `EGLImageKHR` for success, `NULL` for failure
*/
EGLImageKHR NvEGLImageFromFd (EGLDisplay display, int dmabuf_fd);

copy your data into this eglimage and do

/**
     *
     * Encodes a JPEG image from a file descriptor (FD) of hardware
     * buffer memory.
     *
     * The application may allocate the memory for storing the JPEG
     * image. If the allocation is less than what is required, @c
     * libjpeg allocates more memory. The @a out_buf pointer and @a
     * out_buf_size are updated accordingly.
     *
     * Supports YUV420 and NV12 formats.
     *
     * @attention The application must free the @a out_buf memory.
     *
     * @param[out] fd Indicates the file descriptor (FD) of the hardware buffer.
     * @param[out] color_space Indicates the color_space to use for encoding.
     * @param[in] out_buf Specifies a pointer to the memory for the JPEG image.
     * @param[in] out_buf_size Specifies the size of the output buffer in bytes.
     * @return 0 for success, -1 otherwise.
     */
    int encodeFromFd(int fd, J_COLOR_SPACE color_space,
                     unsigned char **out_buf, unsigned long &out_buf_size);

It should be optimized path, although there is still eglimage copy.

Or other users may share experience.

Hi, DaneLLL

I have just try it. the code just like this

NvJPEGEncoder *jpegenc;
    jpegenc = NvJPEGEncoder::createJPEGEncoder("jpenenc");
    jpegenc->setCropRect(0, 0, 1920, 1080);

    int fd;
    if (-1 == NvBufferCreate(&fd, 1920, 1080, NvBufferLayout_Pitch, NvBufferColorFormat_YUV420)) {
        cout << "create nvbuffer failed" << endl;
    }

    EGLDisplay display = EGLDisplayAccessor::getInstance();
    EGLImageKHR eglImage = NvEGLImageFromFd (display, fd);
    if(eglImage == NULL) {
        cout << "create eglImage failed" << endl;
    }

    //TO DO
    //How to copy image data to eglImage ?

    unsigned long out_buf_size = 1920 * 1080 * 3 / 2;
    unsigned char *out_buf = new unsigned char[out_buf_size];
    int ret = jpegenc->encodeFromFd( fd, JCS_YCbCr, &out_buf, out_buf_size);
    TEST_ERROR(ret < 0, "Error while encoding from buffer", cleanup);

    std::string out_file_path = "tmp/1.jpg";
    std::ofstream * out_file = new ofstream(out_file_path);
    out_file->write((char *) out_buf, out_buf_size);

    out_file->close();
    delete[] out_buf;

I don’t konw how to copy the data into eglimage. Which function should I call ?

Thanks.

I think you can copy through cuda. Following is a code snippet.

CUgraphicsResource dstcuResource = 0;

        cuResult = cuGraphicsEGLRegisterImage(&dstcuResource, dstEglImage, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);

	if(cuResult != CUDA_SUCCESS)
	{
            ORIGINATE_ERROR("Unable to register CUDA resource (CUresult %s)",
            getCudaErrorString(cuResult));

	}

        CUeglFrame dst_cudaEGLFrame;
	memset(&dst_cudaEGLFrame, 0, sizeof(dst_cudaEGLFrame));
	cuResult = cuGraphicsResourceGetMappedEglFrame(&dst_cudaEGLFrame, dstcuResource, 0, 0);

	//PROPAGATE_ERROR(printCUDAEGLFrame(dst_cudaEGLFrame));

        if (cuResult != CUDA_SUCCESS)
        {
            ORIGINATE_ERROR("Unable to get the DST CUDA EGL frame (CUresult %s).",
                getCudaErrorString(cuResult));
        }

	//create SurfaceObject as Dst
        struct cudaResourceDesc  dst_cudaResourceDesc[2];
	for(int i = 0; i < 2; i++)
	{
            memset(&dst_cudaResourceDesc[i], 0, sizeof(struct cudaResourceDesc));
            dst_cudaResourceDesc[i].resType = cudaResourceTypeArray;
            dst_cudaResourceDesc[i].res.array.array = (cudaArray_t)dst_cudaEGLFrame.frame.pArray[i];
	}
	cudaSurfaceObject_t dst_cudaSurfObj[2] = {0};

	for(int i = 0; i < 2; i++)
            CUDA_CHECK(cudaCreateSurfaceObject(&dst_cudaSurfObj[i], &dst_cudaResourceDesc[i]));

_your_cuda_kernel_(src_cudaTexObj[i], dst_cudaSurfObj[i], width, height, channel);

Hi,

This is my test demo.But the out put image is a green picture. I don’t know why . NvBufferCreate is the NvBufferLayout_BlockLinear or NvBufferLayout_Pitch ? or there is exist other error in it ?

NvJPEGEncoder *jpegenc;
    jpegenc = NvJPEGEncoder::createJPEGEncoder("jpenenc");
    jpegenc->setCropRect(0, 0, 1920, 1080);

    int fd;
    if (-1 == NvBufferCreate(&fd, 1920, 1080, NvBufferLayout_BlockLinear, NvBufferColorFormat_YUV420)) {
        cout << "create nvbuffer failed" << endl;
    }

    EGLDisplay display = EGLDisplayAccessor::getInstance();
    EGLImageKHR eglImage = NvEGLImageFromFd (display, fd);
    if(eglImage == NULL) {
        cout << "create eglImage failed" << endl;
    }

    CUgraphicsResource resource;
    CUresult status;
    cudaFree(0);
    status = cuGraphicsEGLRegisterImage(&resource, eglImage, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
    if (status != CUDA_SUCCESS) {
        cout << "cuGraphicsEGLRegisterImage failed:" << status << endl;

    }

    CUeglFrame frame;
    memset(&frame, 0, sizeof(frame));
    status = cuGraphicsResourceGetMappedEglFrame(&frame, resource, 0, 0);
    if (status != CUDA_SUCCESS) {
        cout << "cuGraphicsEGLRegisterImage failed:" << status << endl;

    }

    cudaMemcpy(frame.frame.pArray[0], YUV, 1920 * 1080, cudaMemcpyDeviceToDevice);
    cudaMemcpy(frame.frame.pArray[1], YUV + 1920 * 1080, 1920 * 1080 / 4, cudaMemcpyDeviceToDevice);
    cudaMemcpy(frame.frame.pArray[2], YUV + 1920 * 1080 * 5 / 4, 1920 * 1080 / 4, cudaMemcpyDeviceToDevice);

    status = cuCtxSynchronize();

    unsigned long out_buf_size = 1920 * 1080 * 3 / 2;
    unsigned char *out_buf = new unsigned char[out_buf_size];
    int ret = jpegenc->encodeFromFd( fd, JCS_YCbCr, &out_buf, out_buf_size);
    //TEST_ERROR(ret < 0, "Error while encoding from buffer", cleanup);

    std::string out_file_path = "1.jpg";
    std::ofstream * out_file = new ofstream(out_file_path);
    out_file->write((char *) out_buf, out_buf_size);

    out_file->close();
    delete[] out_buf;

Not pretty sure if eglFrame can directly use cudaMemcpy. My usage was using cuResourcesDesc and SurfaceObject and then cudaMemcpy.

Hi,

I have one question : the YUV420 layout should be cudaArray or Pitch ?

Thanks.

It should work by using a cudaArray.

You can dump the information(structure) in EGLframe. This function is in argus samples. Please take a look.

bool printCUDAEGLFrame(const CUeglFrame &cudaEGLFrame)

Hi,

Now I can encode the image to file use cudaMemcpyToArray() function instead of cudaMemcpy(), the code like #7.

Now there is existing another problem : It just can encode first image. after second frame will fail. I just NvBufferCreate once, and use the same fd in all frame. and use the same eglImage.And every frame execute cuGraphicsResourceGetMappedEglFrame to get a CUeglFrame , and use cudaMemcpyToArray copy the data to CUeglFrame . and then encode to file, but only the first frame is right.

Hi ClancyLian,

fd value should change, eglImage should be recreated from fd again.

Hi,WayneWWW

How to change the fd value ? should I use NvBufferCreate(&fd, 1920, 1080, NvBufferLayout_BlockLinear, NvBufferColorFormat_YUV420) every time ?

Sorry that my comment was not right. Please ignore it.

Could you share your code now?

Hi ClancyLian,
Could you try to create and destroy every time?

/**
* This method must be used for destroying `hw_buffer`.
* @param[in] dmabuf_fd Specifies the `dmabuf_fd` `hw_buffer` to destroy.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferDestroy (int dmabuf_fd);

Hi,

in the construct function .it just execute once to create a nvbuffer for image.the fd is the member value.

jpegenc = NvJPEGEncoder::createJPEGEncoder("jpenenc");
    jpegenc->setCropRect(0, 0, FaceDetectionParams::cameraWidth, FaceDetectionParams::cameraHeight);

    if (-1 == NvBufferCreate(&fd, FaceDetectionParams::cameraWidth, FaceDetectionParams::cameraHeight,
                 NvBufferLayout_BlockLinear, NvBufferColorFormat_YUV420)) {
        printf("Create nvbuffer failed.\n");
    }

    display = EGLDisplayAccessor::getInstance();
    eglImage = NvEGLImageFromFd (display, fd);
    if(eglImage == NULL) {
        printf("Create eglImage failed.\n");
    }

    if (cudaMalloc(&YUVI420, FaceDetectionParams::cameraHeight * FaceDetectionParams::cameraWidth * 3 / 2)
        != cudaSuccess) {
        printf("Malloc YUVI420's cuda buffer failed.\n");
        throw;
    }

    outBufSize = FaceDetectionParams::cameraHeight * FaceDetectionParams::cameraWidth * 3 / 2;
    outBuf = new unsigned char[outBufSize];

And every frame past to this function:

//if the fd value not change, the recreate eglImage will the same value.
    NvDestroyEGLImage(display, eglImage);
    eglImage = NULL;
    eglImage = NvEGLImageFromFd (display, fd);
    if(eglImage == NULL) {
        printf("Create eglImage failed.\n");
    }

    time.reset();
    CUgraphicsResource resource;
    CUresult status;
    //cudaFree(0);
    status = cuGraphicsEGLRegisterImage(&resource, eglImage, CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD);
    if (status != CUDA_SUCCESS) {
        printf("cuGraphicsEGLRegisterImage failed: %d.\n", status);
    }
    memset(&frame, 0, sizeof(frame));
    status = cuGraphicsResourceGetMappedEglFrame(&frame, resource, 0, 0);
    if (status != CUDA_SUCCESS) {
        printf("cuGraphicsResourceGetMappedEglFrame failed: %d.\n", status);
    }
    // convert the RGBA to YUV420 use nvx
    convertRGBA2YUVI420(fcp.imgData, YUVI420, FaceDetectionParams::cameraWidth, FaceDetectionParams::cameraHeight);
    // copy the YUV420 to frame from cuGraphicsResourceGetMappedEglFrame.
    cudaMemcpyToArray((cudaArray_t)frame.frame.pArray[0], 0, 0, YUVI420,
            FaceDetectionParams::cameraWidth * FaceDetectionParams::cameraHeight, cudaMemcpyDeviceToDevice);
    cudaMemcpyToArray((cudaArray_t)frame.frame.pArray[1], 0, 0,
            YUVI420 + FaceDetectionParams::cameraWidth * FaceDetectionParams::cameraHeight,
            FaceDetectionParams::cameraWidth * FaceDetectionParams::cameraHeight / 4, cudaMemcpyDeviceToDevice);
    cudaMemcpyToArray((cudaArray_t)frame.frame.pArray[2], 0, 0,
            YUVI420 + FaceDetectionParams::cameraWidth * FaceDetectionParams::cameraHeight * 5 / 4,
            FaceDetectionParams::cameraWidth * FaceDetectionParams::cameraHeight / 4, cudaMemcpyDeviceToDevice);

    cudaDeviceSynchronize();

    //memset(outBuf, 0, sizeof(outBufSize));
    int ret = jpegenc->encodeFromFd( fd, JCS_YCbCr, &outBuf, outBufSize);
    if(ret == -1) {
        printf("jpeg encode failed.\n");
    }

    std::string outFilePath = "test/" + std::to_string(fcp.frameID) + "qq.jpg";
    std::ofstream * outFile = new ofstream(outFilePath);
    outFile->write((char *) outBuf, outBufSize);
    delete outFile;

    printf("test time info: %d ms.\n", time.elapsedMilliSeconds());

this is the code

Hi,

I have tried create and destroy every time use NvBufferDestroy and NvBufferCreate, but the result is the same as previous.

Could you make sure that each time the input data is valid?

Hi,

I have seen the demo in tegra_multimedia_api/sample/09__camera__jpeg__capture_, it seems only create fd only once

00115 bool ConsumerThread::threadExecute()
00116 {
00117     IStream *iStream = interface_cast<IStream>(m_stream);
00118     IFrameConsumer *iFrameConsumer = interface_cast<IFrameConsumer>(m_consumer);
00119 
00120     // Wait until the producer has connected to the stream.
00121     CONSUMER_PRINT("Waiting until producer is connected...\n");
00122     if (iStream->waitUntilConnected() != STATUS_OK)
00123         ORIGINATE_ERROR("Stream failed to connect.");
00124     CONSUMER_PRINT("Producer has connected; continuing.\n");
00125 
00126     while (true)
00127     {
00128         // Acquire a frame.
00129         UniqueObj<Frame> frame(iFrameConsumer->acquireFrame());
00130         IFrame *iFrame = interface_cast<IFrame>(frame);
00131         if (!iFrame)
00132             break;
00133 
00134         // Get the IImageNativeBuffer extension interface.
00135         NV::IImageNativeBuffer *iNativeBuffer =
00136             interface_cast<NV::IImageNativeBuffer>(iFrame->getImage());
00137         if (!iNativeBuffer)
00138             ORIGINATE_ERROR("IImageNativeBuffer not supported by Image.");
00139 
00140         // If we don't already have a buffer, create one from this image.
00141         // Otherwise, just blit to our buffer.
00142         if (m_dmabuf == -1)
00143         {
00144             m_dmabuf = iNativeBuffer->createNvBuffer(iStream->getResolution(),
00145                                                      NvBufferColorFormat_YUV420,
00146                                                      NvBufferLayout_BlockLinear);
00147             if (m_dmabuf == -1)
00148                 CONSUMER_PRINT("\tFailed to create NvBuffer\n");
00149         }
00150         else if (iNativeBuffer->copyToNvBuffer(m_dmabuf) != STATUS_OK)
00151         {
00152             ORIGINATE_ERROR("Failed to copy frame to NvBuffer.");
00153         }
00154 
00155         // Process frame.
00156         processV4L2Fd(m_dmabuf, iFrame->getNumber());
00157     }
00158 
00159     CONSUMER_PRINT("Done.\n");
00160 
00161     requestShutdown();
00162 
00163     return true;
00164 }
00287 bool CaptureConsumerThread::processV4L2Fd(int32_t fd, uint64_t frameNumber)
00288 {
00289     char filename[FILENAME_MAX];
00290     sprintf(filename, "output%03u.jpg", (unsigned) frameNumber);
00291 
00292     std::ofstream *outputFile = new std::ofstream(filename);
00293     if (outputFile)
00294     {
00295         unsigned long size = JPEG_BUFFER_SIZE;
00296         unsigned char *buffer = m_OutputBuffer;
00297         m_JpegEncoder->encodeFromFd(fd, JCS_YCbCr, &buffer, size);
00298         outputFile->write((char *)buffer, size);
00299         delete outputFile;
00300     }
00301 
00302     return true;
00303 }