Extract image data from NvDsFrameMeta in Deepstream

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) RTX4080
• DeepStream Version 6.4
Trying to extract image data from NvDsFrameMeta and encode to base64 char array so that I can send image data in mqtt.
I have the following code in deepstream_app.c.
The code is implemented in write_kitti_output.

The error is

deepstream_app.c:464:43: error: ‘NvDsFrameMeta’ {aka ‘struct _NvDsFrameMeta’} has no member named ‘input_buffer’
  464 |             GstBuffer *buffer = frame_meta->input_buffer;

How can I have buffer from frame_meta?

// Function to encode a byte array to base64
char *base64_encode(const unsigned char *src, size_t len) {
    static const char base64chars[] =
        "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
    char *out, *pos;
    const unsigned char *end, *in;
    size_t olen;

    olen = 4 * ((len + 2) / 3); // 3-byte blocks to 4-byte

    if (olen < len) {
        return NULL; // integer overflow
    }

    out = (char *)malloc(olen);
    if (out == NULL) {
        return NULL;
    }

    end = src + len;
    in = src;
    pos = out;
    while (end - in >= 3) {
        *pos++ = base64chars[in[0] >> 2];
        *pos++ = base64chars[((in[0] & 0x03) << 4) | (in[1] >> 4)];
        *pos++ = base64chars[((in[1] & 0x0f) << 2) | (in[2] >> 6)];
        *pos++ = base64chars[in[2] & 0x3f];
        in += 3;
    }

    if (end - in) {
        *pos++ = base64chars[in[0] >> 2];
        if (end - in == 1) {
            *pos++ = base64chars[(in[0] & 0x03) << 4];
            *pos++ = '=';
        } else {
            *pos++ = base64chars[((in[0] & 0x03) << 4) |
                                 (in[1] >> 4)];
            *pos++ = base64chars[(in[1] & 0x0f) << 2];
        }
        *pos++ = '=';
    }

    return out;
}



static void
write_kitti_output (AppCtx * appCtx, NvDsBatchMeta * batch_meta, NvBufSurface *ip_surf )
{
   for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
	NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;
	guint stream_id = frame_meta->pad_index; 
	for (NvDsMetaList * l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) {
	      NvDsObjectMeta *obj = (NvDsObjectMeta *) l_obj->data;
	      int left = (int)obj->rect_params.left;
	      int top = (int)obj->rect_params.top;
	      int right = (int)(left + obj->rect_params.width);
	      int bottom = (int)(top + obj->rect_params.height);
	      float confidence = obj->confidence;
	      //printf ("%s 0.0 0 0.0 %f %f %f %f 0.0 0.0 0.0 0.0 0.0 0.0 0.0 %f\n", obj->obj_label, left, top, right, bottom, confidence);
	      if(strcmp(obj->obj_label,"person")==0)
	      {    
		       
	      }
	 } 
        //Image data management
        GstBuffer *buffer = frame_meta->input_buffer;
        if (!buffer) {
           continue;
        } 
        guint buffer_size = gst_buffer_get_size(buffer);
        g_print("Buffer size: %u\n", buffer_size); 
        //Map the buffer to CUDA memory
        NvBufSurface *surface = NULL;
        if (!gst_nvbuffer_surface_map (buffer, &surface)) {
            g_printerr("Failed to map GstBuffer to NvBufSurface\n");
            continue;
        }
        // Copy the CUDA memory to CPU-accessible memory
        void *cpu_buffer = NULL;
        size_t cpu_size = surface->surfaceList[0].dataSize;
        cudaError_t cuda_err = cudaMallocHost(&cpu_buffer, cpu_size);
        if (cuda_err != cudaSuccess) {
            g_printerr("Failed to allocate CPU memory: %s\n", cudaGetErrorString(cuda_err));
            gst_buffer_unmap(buffer, surface);
            continue;
        }
        cudaMemcpy(cpu_buffer, surface->surfaceList[0].mappedAddr.addr[0], cpu_size, cudaMemcpyDeviceToHost);
        // Now you have the image data in cpu_buffer
        // You can perform further processing on it as needed
        char *base64Image = base64_encode(cpu_buffer, strlen((char *)cpu_buffer));
        if (base64Image == NULL) {
             fprintf(stderr, "Failed to encode image data to base64\n");
             return -1;
        }            
        // Free the CUDA memory
        cudaFreeHost(cpu_buffer);
        // Unmap the buffer
        gst_buffer_unmap(buffer, surface);             
     } 
      
     number_of_frames = 0; 
  }
    
}

There is no “input_buffer” in struct NvDsFrameMeta, you may check the definition in “sources/includes/nvdsmeta.h”.

You may refer to the code in topic " How can I extract images from surface objects if there is more than 1 stream src - Intelligent Video Analytics / DeepStream SDK - NVIDIA Developer Forums" to access the frame data.

The sample is implemented in gstds_example plugin.
I need to implement in deepstream_app.c.
So what was done is

static void
write_kitti_output (AppCtx * appCtx, NvDsBatchMeta * batch_meta, NvBufSurface *ip_surf )
{
  
    for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {   
	NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;
	if(frame_meta->frame_num % 2 == 0){
	    guint stream_id = frame_meta->pad_index; 
	    pthread_mutex_lock(&num_people_ptr_lock); 
	    for (int j = 0; j < num_people_ptr[stream_id].num_rois; j++) 
	       num_people_ptr[stream_id].rois[j].numpeople = 0; 
	    pthread_mutex_unlock(&num_people_ptr_lock);  
	    for (NvDsMetaList * l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) {
	      NvDsObjectMeta *obj = (NvDsObjectMeta *) l_obj->data;
	      int left = (int)obj->rect_params.left;
	      int top = (int)obj->rect_params.top;
	      int right = (int)(left + obj->rect_params.width);
	      int bottom = (int)(top + obj->rect_params.height);
	      float confidence = obj->confidence;
	      //printf ("%s 0.0 0 0.0 %f %f %f %f 0.0 0.0 0.0 0.0 0.0 0.0 0.0 %f\n", obj->obj_label, left, top, right, bottom, confidence);
	      if(strcmp(obj->obj_label,"person")==0)
	      {    
	      }
	      }	    
            
            
      // Copy the CUDA memory to CPU-accessible memory
      void *cpu_buffer = NULL;
      size_t cpu_size = ip_surf->surfaceList[frame_meta->batch_id].planeParams.height[0]*ip_surf->surfaceList[frame_meta->batch_id].planeParams.width[0]*4;
      cudaError_t cuda_err = cudaMallocHost(&cpu_buffer, cpu_size);
      if (cuda_err != cudaSuccess) {
                g_printerr("Failed to allocate CPU memory: %s\n", cudaGetErrorString(cuda_err));
                continue;
      }
      cudaMemcpy(cpu_buffer, ip_surf->surfaceList[frame_meta->batch_id].mappedAddr.addr[0], cpu_size, cudaMemcpyDeviceToHost);
      // Now you have the image data in cpu_buffer
      // You can perform further processing on it as needed
      char *base64Image = base64_encode(cpu_buffer, strlen((char *)cpu_buffer));
      if (base64Image == NULL) {
                 fprintf(stderr, "Failed to encode image data to base64\n");
                 return -1;
      }            
      memcpy(num_people_ptr[stream_id].base64Image_, base64Image, sizeof(base64Image));   
      g_print("size %d\n",  sizeof(base64Image));   
      // Free the CUDA memory
      cudaFreeHost(cpu_buffer);
       
  }
  number_of_frames = number_of_frames+1; 
}

But when check sizeof(base64Image)), it is 8. How can I get image data to cpu?

Please refer to our open source code deepstream-test4, it can support the feature.

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks