How to save the detected face in form of .jpg or .png

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) Jetson
• DeepStream Version6.0
**• JetPack Version (valid for Jetson only)**4.61
• TensorRT Version8.2
• NVIDIA GPU Driver Version (valid for GPU only)
**• Issue Type( questions, new requirements, bugs)**question
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)

Hi community
I used deepstream_tao/faciallandmarks as basis and then adding arcface for face recognition

i have tried to save every face detected with corresponding label as .jpg file but label is correct but image it save is green screen
can any body give me help

“”"

static GstPadProbeReturn
sgie_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer u_data) {
std::vector* dynamicListOfVectors = static_cast<std::vector*>(u_data);
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(GST_BUFFER(info->data));

GstMapInfo in_map_info;
memset (&in_map_info, 0, sizeof (in_map_info));

// Map the buffer contents and get the pointer to NvBufSurface.
if (!gst_buffer_map (GST_BUFFER (info->data), &in_map_info, GST_MAP_READ)) {
    g_printerr ("Failed to map GstBuffer\n");
    return GST_PAD_PROBE_PASS;
}
NvBufSurface *in_surf = (NvBufSurface *) in_map_info.data;



for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;

    for (NvDsMetaList *l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) {
        NvDsObjectMeta *obj_meta = (NvDsObjectMeta *)l_obj->data;

        for (NvDsMetaList *l_user = obj_meta->obj_user_meta_list; l_user != NULL; l_user = l_user->next) {
            NvDsUserMeta *user_meta = (NvDsUserMeta *)l_user->data;
            if (user_meta->base_meta.meta_type != NVDSINFER_TENSOR_OUTPUT_META)
                continue;

            NvDsInferTensorMeta *metaj = (NvDsInferTensorMeta *)user_meta->user_meta_data;
            if (metaj->unique_id == 2) {
                NvDsInferLayerInfo *info = &metaj->output_layers_info[0];
                info->buffer = metaj->out_buf_ptrs_host[0];

                float* arcface_output = (float*)info->buffer;
                // auto add = static_cast<float *>(*(metaj->out_buf_ptrs_host));
                // for (int i=0; i < 512; i++){
                // std::cout << *(add+i) << std::endl;
                // }
                int feature_length = 512;
                if (info->inferDims.d[0] == feature_length) {
                    std::vector<float> arc_out(arcface_output, arcface_output + feature_length);
                    float norm = std::sqrt(std::inner_product(arc_out.begin(), arc_out.end(), arc_out.begin(), 0.0f));
                    std::transform(arc_out.begin(), arc_out.end(), arc_out.begin(), [norm](float val) { return val / norm; });

                    float maxSimilarity = -2.0f;
                    int maxIndex = -1;
                    
                    // Loop over dynamicListOfVectors in chunks of 512
                    for (int i = 0; i < dynamicListOfVectors->size() / feature_length; i++) {
                        std::vector<float> gallery_embedding(
                            dynamicListOfVectors->begin() + i * feature_length,
                            dynamicListOfVectors->begin() + (i + 1) * feature_length);
                        std::cout << "Size of arc_out : " << arc_out.size() << std::endl;
                        std::cout << "Size of gallery_embedding in step " << i << ": " << gallery_embedding.size() << std::endl;
                        float norm = std::sqrt(std::inner_product(gallery_embedding.begin(), gallery_embedding.end(), gallery_embedding.begin(), 0.0f));
                        std::transform(gallery_embedding.begin(), gallery_embedding.end(), gallery_embedding.begin(), [norm](float val) { return val / norm; });

                        float similarity = computeCosineSimilarity(arc_out, gallery_embedding);
                        std::cout << "Similarity with vector " << i << ": " << similarity << std::endl;

                        if (similarity > maxSimilarity) {
                            maxSimilarity = similarity;
                            maxIndex = i;                   
                            
                            
                        }
                    }

                    std::string labelStr;
                    if (maxSimilarity > 0.18) {
                        labelStr = "index_" + std::to_string(maxIndex);
                    } else {
                        labelStr = "unknown";
                    }
                    // Convert NV12 to BGR
                    cv::Mat nv12Img(in_surf->surfaceList[0].height * 3 / 2, in_surf->surfaceList[0].width, CV_8UC1, in_surf->surfaceList[0].dataPtr);
                    cv::Mat bgrImg;
                    cv::cvtColor(nv12Img, bgrImg, cv::COLOR_YUV2BGR_NV12);

                    // Validate and crop to the detected face
                    NvOSD_RectParams *rect_params = &(obj_meta->rect_params);
                    if (rect_params->left >= 0 && rect_params->top >= 0 &&
                        rect_params->width > 0 && rect_params->height > 0 &&
                        rect_params->left + rect_params->width <= bgrImg.cols &&
                        rect_params->top + rect_params->height <= bgrImg.rows) {
                        cv::Rect rect(rect_params->left, rect_params->top, rect_params->width, rect_params->height);
                        cv::Mat croppedImg = bgrImg(rect);

                        // Save the face image with a dynamic filename based on labelStr
                        std::string filename = labelStr + ".jpg";
                        if (!cv::imwrite(filename, croppedImg)) {
                            std::cerr << "Failed to save image: " << filename << std::endl;
                        }
                    } else {
                        std::cerr << "Invalid coordinates for cropping." << std::endl;
                    }
                    // save_face_image(src_surf, obj_meta, labelStr);



                    NvDsClassifierMeta *classifier_meta = nvds_acquire_classifier_meta_from_pool(batch_meta);
                    classifier_meta->unique_component_id = 2;

                    NvDsLabelInfo *label_info = nvds_acquire_label_info_meta_from_pool(batch_meta);
                    label_info->result_class_id = -1;
                    strcpy(label_info->result_label, labelStr.c_str());

                    gchar *temp = obj_meta->text_params.display_text;
                    obj_meta->text_params.display_text = g_strconcat(temp, " ", label_info->result_label, nullptr);
                    g_free(temp);

                    nvds_add_label_info_meta_to_classifier(classifier_meta, label_info);
                    nvds_add_classifier_meta_to_object(obj_meta, classifier_meta);
                } else {
                    g_print("Error: Unexpected tensor dimensions for ArcFace output!\n");
                }
            }
        }
    }
}
gst_buffer_unmap(GST_BUFFER(info->data), &in_map_info);
return GST_PAD_PROBE_OK;

}

“”"

You can refer to our demo code to save the jpg image. sources\apps\sample_apps\deepstream-image-meta-test\deepstream_image_meta_test.c

can you kindly mention the name function or part of it(deepstream_image_meta_test.c) that exactly save the desired frame as jpg file?

static GstPadProbeReturn
pgie_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer ctx)
{
...
    if(frame_count <= 10) {
      NvDsObjEncUsrArgs frameData = { 0 };
      /* Preset */
      frameData.isFrame = 1;
      /* To be set by user */
      frameData.saveImg = save_img;
      frameData.attachUsrMeta = attach_user_meta;
      /* Set if Image scaling Required */
      frameData.scaleImg = FALSE;
      frameData.scaledWidth = 0;
      frameData.scaledHeight = 0;
      /* Quality */
      frameData.quality = 80;
      /* Main Function Call */
      nvds_obj_enc_process (ctx, &frameData, ip_surf, NULL, frame_meta);
    }
...
}

You can meet your needs by setting parameters of frameData.

1 Like

Hi
Thanks
the issue is resolved for me
But for future if anybody wants to save .jpg file of detected object i found solution easy by inspiring from “sources\apps\sample_apps\deepstream-image-meta-test\deepstream_image_meta_test.c”:
first:
modify the pgie_src_pad_buffer_probe as above
second modify osd_sink_pad_buffer_probe like the code in deepstream_image_meta_test.c
third:
check the headers and main function for appropriate include or initialization if needed

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.