Access frame pointer in deepstream-app

I’m not sure why one gives only a single channel and the other gives all three.
It depends on the input stream type.

@ChrisDing

In my tests the input stream type was the same for both locations of code where the data is captured. But the number of channels returned was different.

In either case, I have it working as needed and expected, even though my code is technically blocking, it’s light enough to not be an issue. I will be rewriting the data capture code into a separate plugin similar to the dsexample sample. Just right now I’m building a basic application mainly for proof of concept and an initial version. So as far as the retrieval of the frame data goes, I have what I need from your example code.

Thank you for your help.

I was able to do this through fake neural network, which just copies input to output. And the I used bbox parsing function to get bytes of the image. Deepstream should have programmer-friendly interface to grab pixels of the stream.

Could you try this code?

#ifdef DUMP_IMG
    GstBuffer *buf = (GstBuffer *) info->data;
    NvDsMetaList * l_frame = NULL;
    NvDsMetaList * l_user_meta = NULL;
    NvDsUserMeta *user_meta = NULL;
    NvDsInferSegmentationMeta* seg_meta_data = NULL;
    // Get original raw data
    GstMapInfo in_map_info;
    char* src_data = NULL;
    if (!gst_buffer_map (buf, &in_map_info, GST_MAP_READ)) {
        g_print ("Error: Failed to map gst buffer\n");
        gst_buffer_unmap (buf, &in_map_info);
        return GST_PAD_PROBE_OK;
    }
    NvBufSurface *surface = (NvBufSurface *)in_map_info.data;

    NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);

    for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {
        NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
        /* Validate user meta */
        for (l_user_meta = frame_meta->frame_user_meta_list; l_user_meta != NULL;
            l_user_meta = l_user_meta->next) {
            user_meta = (NvDsUserMeta *) (l_user_meta->data);
            if (user_meta && user_meta->base_meta.meta_type == NVDSINFER_SEGMENTATION_META) {
                seg_meta_data = (NvDsInferSegmentationMeta*)user_meta->user_meta_data;
            }
        }

        src_data = (char*) malloc(surface->surfaceList[frame_meta->batch_id].dataSize);
        if(src_data == NULL) {
            g_print("Error: failed to malloc src_data \n");
            continue;
        }
#ifdef PLATFORM_TEGRA
        NvBufSurfaceMap (surface, -1, -1, NVBUF_MAP_READ);
        NvBufSurfacePlaneParams *pParams = &surface->surfaceList[frame_meta->batch_id].planeParams;
        unsigned int offset = 0;
        for(unsigned int num_planes=0; num_planes < pParams->num_planes; num_planes++){
            if(num_planes>0)
                offset += pParams->height[num_planes-1]*(pParams->bytesPerPix[num_planes-1]*pParams->width[num_planes-1]);
            for (unsigned int h = 0; h < pParams->height[num_planes]; h++) {
             memcpy((void *)(src_data+offset+h*pParams->bytesPerPix[num_planes]*pParams->width[num_planes]),
                    (void *)((char *)surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[num_planes]+h*pParams->pitch[num_planes]),
                    pParams->bytesPerPix[num_planes]*pParams->width[num_planes]
                    );
            }
        }
        NvBufSurfaceSyncForDevice (surface, -1, -1);
        NvBufSurfaceUnMap (surface, -1, -1);
#else
        cudaMemcpy((void*)src_data,
                   (void*)surface->surfaceList[frame_meta->batch_id].dataPtr,
                   surface->surfaceList[frame_meta->batch_id].dataSize,
                   cudaMemcpyDeviceToHost);
#endif
        dump_img(src_data,
                 surface->surfaceList[frame_meta->batch_id].width,
                 surface->surfaceList[frame_meta->batch_id].height,
                 seg_meta_data, frame_meta->source_id, frame_meta->frame_num);

        if(src_data != NULL) {
            free(src_data);
            src_data = NULL;
        }
    }
    gst_buffer_unmap (buf, &in_map_info);
#endif
2 Likes

Hi @bcao,
I’m trying to save both the frame when initially captured by the camera, with a timestamp, as well as it with the superimposed bounding boxes…both within the deepstream-app. Should I use the code in #24 and if so, which source file should it reside in?
Many thanks

Hi paulvf269,

Please open a new topic for your issue. Thanks

hi bcao:
Hello, can you share the dump_img function so that I can see if the image is saved correctly, thank you

Hello,
May you share dump_img function, also I got the error that the NvBufSurface Data type is not defined

1 Like

hi @cbstryker , did you ever manage to figure this out? I’m running into the same issue as you. , i get also gray-scale image can you share the solution with me thank you

@Rabeb This should help you get you what you need:

GstMapInfo in_map_info;
NvBufSurface *surface = NULL;

memset (&in_map_info, 0, sizeof (in_map_info));
if (!gst_buffer_map (buf, &in_map_info, GST_MAP_READ)) {
    g_print ("Error: Failed to map gst buffer\n");
    gst_buffer_unmap (buf, &in_map_info);
    return GST_PAD_PROBE_OK;
}
cudaError_t cuda_err;
 
NvBufSurfTransformRect src_rect, dst_rect;
surface = (NvBufSurface *) in_map_info.data;  

int batch_size= surface->batchSize;

src_rect.top   = 0;
src_rect.left  = 0;
src_rect.width = (guint) surface->surfaceList[0].width;
src_rect.height= (guint) surface->surfaceList[0].height;

dst_rect.top   = 0;
dst_rect.left  = 0;
dst_rect.width = (guint) surface->surfaceList[0].width;
dst_rect.height= (guint) surface->surfaceList[0].height;

NvBufSurfTransformParams nvbufsurface_params;
nvbufsurface_params.src_rect = &src_rect;
nvbufsurface_params.dst_rect = &dst_rect;
nvbufsurface_params.transform_flag =  NVBUFSURF_TRANSFORM_CROP_SRC | NVBUFSURF_TRANSFORM_CROP_DST;
nvbufsurface_params.transform_filter = NvBufSurfTransformInter_Default;

NvBufSurface *dst_surface = NULL;
NvBufSurfaceCreateParams nvbufsurface_create_params;

// An intermediate buffer for NV12/RGBA to BGR conversion  will be
// required. Can be skipped if custom algorithm can work directly on NV12/RGBA
nvbufsurface_create_params.gpuId  = surface->gpuId;
nvbufsurface_create_params.width  = (gint) surface->surfaceList[0].width;
nvbufsurface_create_params.height = (gint) surface->surfaceList[0].height;
nvbufsurface_create_params.size = 0;
nvbufsurface_create_params.colorFormat = NVBUF_COLOR_FORMAT_RGB;
nvbufsurface_create_params.layout = NVBUF_LAYOUT_PITCH;
nvbufsurface_create_params.memType = NVBUF_MEM_CUDA_UNIFIED;

cuda_err = cudaSetDevice (surface->gpuId);

cudaStream_t cuda_stream;

cuda_err=cudaStreamCreate (&cuda_stream);

int create_result = NvBufSurfaceCreate(&dst_surface,batch_size,&nvbufsurface_create_params);	

NvBufSurfTransformConfigParams transform_config_params;
NvBufSurfTransform_Error err;

transform_config_params.compute_mode = NvBufSurfTransformCompute_Default;
transform_config_params.gpu_id = surface->gpuId;
transform_config_params.cuda_stream = cuda_stream;
err = NvBufSurfTransformSetSessionParams (&transform_config_params);

NvBufSurfaceMemSet (dst_surface, 0, 0, 0);
err = NvBufSurfTransform (surface, dst_surface, &nvbufsurface_params);
if (err != NvBufSurfTransformError_Success) {
    g_print ("NvBufSurfTransform failed with error %d while converting buffer\n", err);
}
NvBufSurfaceMap (dst_surface, 0, 0, NVBUF_MAP_READ);
NvBufSurfaceSyncForCpu (dst_surface, 0, 0);

This is your pointer to the frame data:
dst_surface->surfaceList[0].mappedAddr.addr[0]

This is the size of the frame data:
dst_surface->surfaceList[0].dataSize

1 Like

thank you @cbstryker !!!

@bcao
@kayccc
@cshah
@cbstryker
@stiv.yakovenko
@neophyte1
Any updates please ?

@mahmoud.serour an update on what? All the questions in this thread have been answered.

May you share dump_img function, also I got the error that the NvBufSurface Data type is not defined

@mahmoud.serour I don’t know anything about that. My issue was with getting a grayscale image. I’ve posted the code that retrieves the correct image data, and none of it uses a “dump_img” function.

At this point you’re better off starting a new topic with explicit details about your specific issue.

Hey @mahmoud.serour,
Sorry if this is too late for you, I’m just posting this here for anyone who might want to use @bcao 's solution. In reply #24 there are two links below the code.
The second one includes a post where there’s this link where dump_img is included. It’s just a function that converts the image using OpenCV and dumps/stores it in a file (it’s in C++ so if you want this in C I suggest you follow @cstryker 's helpful solution). I’m copying the full code snippet here:
#ifdef DUMP_JPG
static unsigned char class2BGR = {
0, 0, 0, 0, 0, 128, 0, 128, 0,
0, 128, 128, 128, 0, 0, 128, 0, 128,
128, 128, 0, 128, 128, 128, 0, 0, 64,
0, 0, 192, 0, 128, 64, 0, 128, 192,
128, 0, 64, 128, 0, 192, 128, 128, 64,
128, 128, 192, 0, 64, 0, 0, 64, 128,
0, 192, 0, 0, 192, 128, 128, 64, 0,
192, 192, 0
};

static cv::Mat overlayColor(int* mask, int height, int width) {
unsigned char* buffer_R = reinterpret_cast<unsigned char*>(malloc(sizeof(unsigned char) * height * width));
unsigned char* buffer_G = reinterpret_cast<unsigned char*>(malloc(sizeof(unsigned char) * height * width));
unsigned char* buffer_B = reinterpret_cast<unsigned char*>(malloc(sizeof(unsigned char) * height * width));
unsigned char* buffer_A = reinterpret_cast<unsigned char*>(malloc(sizeof(unsigned char) * height * width));

g_print("buffer size = %d\n", (height * width));

for(int pix_id = 0; pix_id < width * height; pix_id++) {
    g_print("mask[pix_id] = %d\n", mask[pix_id]);
    unsigned char* color = class2BGR + ((mask[pix_id] + 3)* 3);
    buffer_B[pix_id] = color[0];
    buffer_G[pix_id] = color[1];
    buffer_R[pix_id] = color[2];
    buffer_A[pix_id] = 255;
}
std::vector<cv::Mat> channels;
channels.push_back(cv::Mat(height, width, CV_8UC1, buffer_B));
channels.push_back(cv::Mat(height, width, CV_8UC1, buffer_G));
channels.push_back(cv::Mat(height, width, CV_8UC1, buffer_R));
channels.push_back(cv::Mat(height, width, CV_8UC1, buffer_A));
cv::Mat merged;
cv::merge(channels, merged);

free(buffer_B);
free(buffer_G);
free(buffer_R);

return merged;
}

static void dump_jpg(char* src_data, int src_width, int src_height, NvDsInferSegmentationMeta* seg_meta, int source_id, int frame_number) {
char file_name[128];

cv::Mat map_mat = overlayColor((int*)seg_meta->class_map, seg_meta->height, seg_meta->width);
sprintf(file_name, "dump_map_stream%2d_frame%03d.jpg", source_id, frame_number);
cv::Mat map_resized;
cv::resize(map_mat, map_resized, cv::Size(src_width, src_height));
cv::imwrite(file_name, map_resized);

// NV12 source data
cv::Mat src_mat(src_height *3/2, src_width, CV_8UC1, (void*)src_data);
cv::Mat src_mat_BGRA;
cv::cvtColor(src_mat, src_mat_BGRA, CV_YUV2BGRA_NV12);
sprintf(file_name, "dump_orig_stream%2d_frame%03d.jpg", source_id, frame_number);
cv::imwrite(file_name, src_mat_BGRA);

cv::Mat dst_mat;

float alpha = 0.5;
float beta = 1.0 - alpha;
cv::addWeighted(src_mat_BGRA , alpha, map_resized, beta, 0.0, dst_mat);
sprintf(file_name, "dump_merged_stream%2d_%03d.jpg", source_id, frame_number);
cv::imwrite(file_name, dst_mat);
}
#endif

/* tiler_sink_pad_buffer_probe  will extract metadata received on segmentation
 *  src pad */
static GstPadProbeReturn
tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
gpointer u_data)
{
#ifdef DUMP_JPG
GstBuffer *buf = (GstBuffer *) info->data;
NvDsMetaList * l_frame = NULL;
NvDsMetaList * l_user_meta = NULL;
NvDsUserMeta *user_meta = NULL;
NvDsInferSegmentationMeta* seg_meta_data = NULL;
// Get original raw data
GstMapInfo in_map_info;
char* src_data = NULL;
if (!gst_buffer_map (buf, &in_map_info, GST_MAP_READ)) {
    g_print ("Error: Failed to map gst buffer\n");
    gst_buffer_unmap (buf, &in_map_info);
    return GST_PAD_PROBE_OK;
}
NvBufSurface *surface = (NvBufSurface *)in_map_info.data;

NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);

for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
  l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
    /* Validate user meta */
    for (l_user_meta = frame_meta->frame_user_meta_list; l_user_meta != NULL;
        l_user_meta = l_user_meta->next) {
        user_meta = (NvDsUserMeta *) (l_user_meta->data);
        if (user_meta && user_meta->base_meta.meta_type == NVDSINFER_SEGMENTATION_META) {
            seg_meta_data = (NvDsInferSegmentationMeta*)user_meta->user_meta_data;
        }
    }

    src_data = (char*) malloc(surface->surfaceList[frame_meta->batch_id].dataSize);
    if(src_data == NULL) {
        g_print("Error: failed to malloc src_data \n");
        continue;
    }
    cudaMemcpy((void*)src_data,
               (void*)surface->surfaceList[frame_meta->batch_id].dataPtr,
               surface->surfaceList[frame_meta->batch_id].dataSize,
               cudaMemcpyDeviceToHost);
    dump_jpg(src_data,
             surface->surfaceList[frame_meta->batch_id].width,
             surface->surfaceList[frame_meta->batch_id].height,
             seg_meta_data, frame_meta->source_id, frame_meta->frame_num);

    if(src_data != NULL) {
        free(src_data);
        src_data = NULL;
    }
}
gst_buffer_unmap (buf, &in_map_info);
#endif
return GST_PAD_PROBE_OK;
}
1 Like

thanks for the code @cbstryker . I’m having trouble storing the image in a file. How do I deal with this after getting the pointer and size? Do I use OpenCV or is there another way in C?
Thanks for your help.