How is the original image of NVBufSurface encoded as JPG via NVJPEG?

Hello everyone, I got raw image data from NVBufSurface, I want to encode this raw data as JPG by NVJPEG. But there was a problem with the color format, which caused the encoded image to display incorrectly.
Through NVJPEG encoding, the output format is V4L2_PIX_FMT_YUV420M, as if the output format of NVJPEG can only be V4L2_PIX_FMT_YUV420M or V4L2_PIX_FMT_NV12.
I tried changing the color format, but it didn’t work out right.This problem has been bothering me for many days. Please help me. Thanks a lot.

My code is as follows, I modified it according to the deepstream_nvdsanalytics_meta example.

extern “C” void save_frame_with_bbox(GstBuffer *buf)
{
NvDsMetaList *l_frame = NULL;
NvDsMetaList *l_obj = NULL;
NvDsObjectMeta *obj_meta = NULL;

// Get original raw data
GstMapInfo in_map_info = GST_MAP_INFO_INIT;
if (!gst_buffer_map(buf, &in_map_info, GST_MAP_READ))
{
g_print (“Error: Failed to map gst buffer\n”);
return;
}
NvBufSurface *surface = (NvBufSurface *)in_map_info.data;
gst_buffer_unmap (buf, &in_map_info);

uint32_t frame_data_size = 0;
unsigned char* src_data = NULL;
std::vectorcv::Rect objsRect;

NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
guint object_num = 0, person_num = 0;
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) {
obj_meta = (NvDsObjectMeta *)(l_obj->data);
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
//cv::Rect rect(obj_meta->rect_params.left, obj_meta->rect_params.top, obj_meta->rect_params.width, obj_meta->rect_params.height);
objsRect.emplace_back(obj_meta->rect_params.left, obj_meta->rect_params.top,
obj_meta->rect_params.width, obj_meta->rect_params.height);
person_num++;
object_num++;
}
}
g_print(“test: person_num=%d, object_num=%d, batchId=%d, srcId=%d\n”,
person_num, object_num, frame_meta->batch_id, frame_meta->source_id);
if (person_num == 0) {
char txt[32] = { 0 };
snprintf(txt, sizeof(txt), “%s-%d”, Key_Person_Detect, frame_meta->source_id);
RedisHelper::instance()->setCommand(txt, “0”);
continue;
}
else {
char txt[32] = { 0 };
snprintf(txt, sizeof(txt), “%s-%d”, Key_Person_Detect, frame_meta->source_id);
RedisHelper::instance()->setCommand(txt, “1”);
}

if (frame_data_size < surface->surfaceList[frame_meta->batch_id].dataSize) {
    if (src_data != NULL) {
        free(src_data);
        src_data = NULL;
    }
    src_data = (unsigned char*)malloc(surface->surfaceList[frame_meta->batch_id].dataSize);
    if (src_data == NULL) {
        g_print("Error: failed to malloc src_data \n");
        return;
    }
    frame_data_size = surface->surfaceList[frame_meta->batch_id].dataSize;
}
else {
    memset(src_data, 0, frame_data_size);
}

cudaMemcpy((void*)src_data,
        (void*)surface->surfaceList[frame_meta->batch_id].dataPtr,
        surface->surfaceList[frame_meta->batch_id].dataSize,
        cudaMemcpyDeviceToHost);

gint frame_width = (gint)surface->surfaceList[frame_meta->batch_id].width;
gint frame_height = (gint)surface->surfaceList[frame_meta->batch_id].height;
gint frame_step = surface->surfaceList[frame_meta->batch_id].pitch;
gint color_format = surface->surfaceList[frame_meta->batch_id].colorFormat;

JpegEncoder::instance()->encode(frame_meta->source_id, src_data, frame_data_size, frame_width, frame_height);

cv::Mat frame;
cv::Mat out_mat = cv::Mat(cv::Size(frame_width, frame_height), CV_8UC3);
switch (color_format)
{
case NVBUF_COLOR_FORMAT_NV12:
    frame = cv::Mat(frame_height * 3 / 2, frame_width, CV_8UC1, src_data, frame_step);
    cv::cvtColor(frame, out_mat, CV_YUV2BGR_NV12);
    std::cout << "test nv12 ! \n" << std::endl;
    break;
case NVBUF_COLOR_FORMAT_RGBA:
    frame = cv::Mat(frame_height, frame_width, CV_8UC4, src_data, frame_step);
    cv::cvtColor(frame, out_mat, CV_RGBA2BGR);
    break;
default:
    break;
}

std::cout << "test objsRect.size = " << objsRect.size() << "\n" << std::endl;
for (uint i = 0; i < objsRect.size(); ++i) {
    cv::rectangle(out_mat, objsRect[i], cv::Scalar(0,255,0), 1);
    std::cout << "test drwa rect " << i << "\n" << std::endl;
}

}
if (src_data != NULL) {
free(src_data);
src_data = NULL;
}
}
The relevant code of encode is as follows, I made the change based on the 05_jpeg_encode example in jetson_multimedia_api.
void JpegEncoder::encode(int srcId, unsigned char* src_data, uint data_len, int width, int height, int colorFmt)
{
width = 1920;
height = 1080;
colorFmt = V4L2_PIX_FMT_YUV420M; //V4L2_PIX_FMT_YUV420M; V4L2_PIX_FMT_NV12M
std::map<int, TContext*>::iterator iter = this->ctx_map.find(srcId);
if (iter == this->ctx_map.end())
{
TContext* ctx = new TContext();
ctx->set_context(srcId, width, height, colorFmt);
this->ctx_map.insert(std::make_pair(srcId, ctx));
}
else
{
iter->second->set_context(srcId, width, height, colorFmt);
}

g_print(“encode: srcId=%d, dataLen=%d, width=%d, height=%d\n”, srcId,data_len,width,height);
if (!this->jpeg_encode_proc(srcId, src_data, data_len))
{
g_print(“jpeg_encode_proc run failed\n”);
}
return;
}

int JpegEncoder::jpeg_encode_proc(int srcId, unsigned char* src_yuv_data, uint src_yuv_len)
{
int ret = 0;
int error = 0;

TContext *ctx = this->get_context(srcId);
if (!ctx){
return -1;
}

ctx->of_stream = new std::ofstream(ctx->outfile_path());
TEST_ERROR(!ctx->of_stream->is_open(), “Could not open output file”, cleanup);

ctx->jpegenc = NvJPEGEncoder::createJPEGEncoder(“jpenenc”);
TEST_ERROR(!ctx->jpegenc, “Could not create Jpeg Encoder”, cleanup);

// Case 2: Read YUV420 image from file system to CPU buffer, convert to hardware
// buffer memory (DMA buffer fd), encode by encodeFromFd() then write to file system.
// Note: While converting to hardware buffer, NvVideoConverter may convert YUV420 to NV12 depends on ctx.in_pixfmt.
ctx->convert = NvVideoConverter::createVideoConverter(“conv”);
TEST_ERROR(!ctx->convert, “Could not create Video Converter”, cleanup);

/* Set conv output plane format */ // V4L2_PIX_FMT_YUV420M
ret = ctx->convert->setOutputPlaneFormat(V4L2_PIX_FMT_YUV420M, ctx->in_width,
ctx->in_height,
V4L2_NV_BUFFER_LAYOUT_PITCH);
TEST_ERROR(ret < 0, “Could not set output plane format for conv”, cleanup);

/* Set conv capture plane format, YUV420 or NV12 */
ret = ctx->convert->setCapturePlaneFormat(ctx->in_pixfmt, ctx->in_width,
ctx->in_height,
V4L2_NV_BUFFER_LAYOUT_BLOCKLINEAR);
TEST_ERROR(ret < 0, “Could not set capture plane format for conv”, cleanup);

/* REQBUF, EXPORT and MAP conv output plane buffers */
ret = ctx->convert->output_plane.setupPlane(V4L2_MEMORY_MMAP, 1, true, false);
TEST_ERROR(ret < 0, “Error while setting up output plane for conv”, cleanup);

/* REQBUF and EXPORT conv capture plane buffers No need to MAP since buffer will be shared to next component and not read in application */
ret = ctx->convert->capture_plane.setupPlane(V4L2_MEMORY_MMAP, 1, !ctx->use_fd, false);
TEST_ERROR(ret < 0, “Error while setting up capture plane for conv”, cleanup);

/* conv output plane STREAMON */
ret = ctx->convert->output_plane.setStreamStatus(true);
TEST_ERROR(ret < 0, “Error in output plane streamon for conv”, cleanup);

/* conv capture plane STREAMON */
ret = ctx->convert->capture_plane.setStreamStatus(true);
TEST_ERROR(ret < 0, “Error in capture plane streamon for conv”, cleanup);

/* Register callback for dequeue thread on conv capture plane, this callback will encode YUV420 or NV12 image to JPEG and write to file system. */
ctx->convert->capture_plane.setDQThreadCallback(conv_capture_dqbuf_thread_callback);

// Start threads to dequeue buffers on conv capture plane
ctx->convert->capture_plane.startDQThread(ctx);

/* Enqueue all empty conv capture plane buffers, actually in this case, 1 buffer will be enqueued. */
for (uint32_t i = 0; i < ctx->convert->capture_plane.getNumBuffers(); i++) // getNumBuffers() = 1
{
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];

memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));

v4l2_buf.index = i;
v4l2_buf.m.planes = planes;

ret = ctx->convert->capture_plane.qBuffer(v4l2_buf, NULL);
if (ret < 0)
{
    std::cerr << "Error while queueing buffer at conv capture plane" << std::endl;
    abort(ctx);
    goto cleanup;
}

}

/* Read YUV420 image to conv output plane buffer and enqueue so conv can start processing. */
{
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
NvBuffer *buffer = ctx->convert->output_plane.getNthBuffer(0);

memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));

v4l2_buf.index = 0;
v4l2_buf.m.planes = planes;

for (uint32_t i = 0; i < buffer->n_planes; i++)
{
    NvBuffer::NvBufferPlane &plane = buffer->planes[i];
    plane.bytesused = 0;
    uint32_t bytes_writed = plane.fmt.bytesperpixel * plane.fmt.width;
    unsigned char* dstdata = plane.data;
    unsigned char* srcdata = src_yuv_data;

    for (uint32_t j = 0; j < plane.fmt.height; j++)
    {
        memcpy(dstdata, srcdata, bytes_writed);  // ???
        srcdata += bytes_writed;                 // ???
        dstdata += plane.fmt.stride;
    }
    plane.bytesused = plane.fmt.stride * plane.fmt.height;
}

ret = ctx->convert->output_plane.qBuffer(v4l2_buf, NULL);
if (ret < 0)
{
    std::cerr << "Error while queueing buffer at conv output plane" << std::endl;
    abort(ctx);
    goto cleanup;
}

}

/* Wait till all capture plane buffers on conv are dequeued */
ctx->convert->capture_plane.waitForDQThread(2000);

cleanup:
if (ctx->convert && ctx->convert->isInError())
{
std::cerr << “VideoConverter is in error” << std::endl;
error = 1;
}
if (ctx->got_error) {
error = 1;
}
// delete ctx.in_file;
if (ctx->of_stream){
delete ctx->of_stream;
}
/* Destructors do all the cleanup, unmapping and deallocating buffers and calling v4l2_close on fd */
if (ctx->convert){
delete ctx->convert;
}
if (ctx->jpegenc){
delete ctx->jpegenc;
}

return -error;
}

static bool conv_capture_dqbuf_thread_callback(struct v4l2_buffer *v4l2_buf, NvBuffer * buffer, NvBuffer * shared_buffer, void * arg)
{
TContext *ctx = (TContext *) arg;
unsigned long out_buf_size = ctx->in_width * ctx->in_height * 3 / 2;
unsigned char *out_buf = new unsigned char[out_buf_size];
int iterator_num = ctx->perf ? PERF_LOOP : 1;
int ret;

if (!v4l2_buf)
{
std::cerr << “Failed to dequeue buffer from conv capture plane” << std::endl;
abort(ctx);
delete[.] out_buf;
return false;
}

if (v4l2_buf->m.planes[0].bytesused > 0)
{
for (int i = 0; i < iterator_num; ++i)
{
ret = ctx->jpegenc->encodeFromFd(buffer->planes[0].fd, JCS_YCbCr, &out_buf,
out_buf_size, ctx->quality);
if (ret < 0)
{
std::cerr << “Error while encoding from fd” << std::endl;
ctx->got_error = true;
break;
}
else{
std::cout << “encodeFromFd success!” << std::endl;
}
}
if (ret >= 0)
{
ctx->net_send = false;
if (ctx->net_send)
{
// send to network
}
else
{
if (ctx->of_stream)
{
ctx->of_stream->write((char*)out_buf, out_buf_size);
}
}
}
}
delete[.] out_buf;
return false;
}

Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson NX)
• DeepStream Version: 5.0
• JetPack Version (valid for Jetson only): 4.4.1

Hi,
NvBufSurface is not support in NvJPEGEncoder. Please check explanation in
How can i get jpeg data from the plugin nvjpegenc? - #17 by DaneLLL