Please provide complete information as applicable to your setup.
• Hardware Platform: Jetson AGX Orin
• DeepStream Version: 7.0
• JetPack Version: (6.0+b106 and 6.0+b87 both are installed) L4T 36.3.0
• TensorRT Version: 8.6.2
i am working on following kind of pipeline
face detection(generates bbox and kps) → face recognition(generates 512 embedding values) → face swap(generates swapped face image)
now my face swap model needs 2 input one is image and other is 1x512 dimensional embeddings from the face recognition model so i am using 2 custom preprocess libs for each input layer and everything is working fine for static embeddings that i added manually,
but when i am trying to access the embeddings from the recognition model in the preprocess lib for embedding input layer i am not able to get them.
i tried both ways 1. By attaching the raw tensor data as meta using the output tensor-meta=1
2. using the obj_meta->classifier_meta_list
but it is showing NULL in both cases and i am not able to understand why
please help me with this
these are the files i am using and the lib code that i modified;
main_app_config.txt (4.1 KB)
recog_config.txt (479 Bytes)
scrfd_config.txt (878 Bytes)
secondary_preprocess_nonimg.txt (2.5 KB)
secondary_preprocess.txt (2.5 KB)
NvDsPreProcessStatus CustomTensorPreparation(
CustomCtx *ctx, NvDsPreProcessBatch *batch, NvDsPreProcessCustomBuf *&buf,
CustomTensorParams &tensorParam, NvDsPreProcessAcquirer *acquirer)
{
// Ensure the folder exists
NvDsPreProcessStatus status = NVDSPREPROCESS_TENSOR_NOT_READY;
//make return status NOT ready
buf = acquirer->acquire(); // Acquire buffer from tensor pool
if (!buf){ std::cerr << "Error: Failed to acquire buffer from tensor pool." <<
std::endl;
return status; }
// initializing all pointers and variables
GstBuffer *inbuf = batch->inbuf;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(inbuf);
NvDsMetaList *l_frame = nullptr;
for (l_frame = batch_meta->frame_meta_list; l_frame != nullptr; l_frame =
l_frame->next) { // Iterate over frames in batch
NvDsFrameMeta *frame_meta = reinterpret_cast<NvDsFrameMeta *>.
(l_frame->data);
NvDsMetaList *l_obj = nullptr;
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) {
NvDsObjectMeta *obj_meta = (NvDsObjectMeta *) (l_obj->data);
if (!obj_meta) continue;
if (!obj_meta->classifier_meta_list){std::cout<<"no recog meta"<<std::endl;}// continue;
if (!obj_meta->obj_user_meta_list){std::cout<<"no user meta"<<std::endl;}// continue;
// Access classifier metadata
for(NvDsMetaList *l_classifier = obj_meta->classifier_meta_list; l_classifier != NULL; l_classifier = l_classifier->next) {
NvDsClassifierMeta *classifier_meta = (NvDsClassifierMeta *)l_classifier->data;
// if(classifier_meta->unique_component_id!=3) {std::cout<<"no recog meta";}//continue;
// Access label information
for (NvDsMetaList *l_label = classifier_meta->label_info_list; l_label != NULL; l_label = l_label->next) {
NvDsLabelInfo *label_info = (NvDsLabelInfo *)l_label->data;
// Print or store the label information
// Determine whether the label is stored in `result_label` or `pResult_label`
const gchar *label = nullptr;
if (label_info->pResult_label) {
// Use dynamically allocated label if available
label = label_info->pResult_label;
} else {
// Otherwise, use the fixed-size result_label
label = label_info->result_label;
}
g_print("Label: %s, Confidence: %f, labelid: %d \n",label,label_info->result_prob,label_info->label_id);
}
}
}
}
status = ctx->tensor_impl->syncStream();
if (status != NVDSPREPROCESS_SUCCESS) {
std::cerr << "Custom Lib: Cuda Stream Synchronization failed" << std::endl;
acquirer->release(buf);
}
return status;
}
NvDsPreProcessStatus
CustomTransformation(NvBufSurface *in_surf, NvBufSurface *out_surf,
CustomTransformParams ¶ms)
{
return NVDSPREPROCESS_SUCCESS;
}
however i checked that the raw tensor output of recog model is accessible in the gie_processing_done_buf_prob
function of the deepstream-app.