Please provide complete information as applicable to your setup.
**• Hardware Platform (Jetson / GPU)**Jetson Xavier AGX
• DeepStream Version5.1.0
**• JetPack Version (valid for Jetson only)**4.5-b129
• TensorRT Version7.1.3-1
**• Issue Type( questions, new requirements, bugs)**questions
I trained a Maskrcnn model with my own data on TLT by following this link:
The model has been deployed with deepstream-app sucessfully by following the example deepstream_app_source1_mrcnn.txt and config_infer_primary_mrcnn.txt.
My reqirement is to get a output video or image sequences with the inference masks on it, but without the bounding boxs and other information. Also I need to change the color and opacity of the masks to sth like white. So I refer to the deepstream_python_apps/apps/deepstream-ssd-parser at master · NVIDIA-AI-IOT/deepstream_python_apps · to register a pgie srcpad probe function to get the tensor meta and transfer it to numpy. But I don’t know how to save them as images as I required. Would you please give me some support?
Here’s my pgie_src_pad_buffer_probe:
def pgie_src_pad_buffer_probe(pad, info, u_data): gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list detection_params = DetectionParam(CLASS_NB, ACCURACY_ALL_CLASS) box_size_param = BoxSizeParam(IMAGE_HEIGHT, IMAGE_WIDTH, MIN_BOX_WIDTH, MIN_BOX_HEIGHT) nms_param = NmsParam(TOP_K, IOU_THRESHOLD) label_names = get_label_names_from_file("mrcnn_labels.txt") while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break l_user = frame_meta.frame_user_meta_list frame_number = frame_meta.frame_num while l_user is not None: try: # Note that l_user.data needs a cast to pyds.NvDsUserMeta # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. user_meta = pyds.NvDsUserMeta.cast(l_user.data) except StopIteration: break if ( user_meta.base_meta.meta_type != pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META ): continue tensor_meta = pyds.NvDsInferTensorMeta.cast(user_meta.user_meta_data) # INFO: [Implicit Engine Info]: layers num: 3 # 0 INPUT kFLOAT Input 3x832x1344 # 1 OUTPUT kFLOAT generate_detections 100x6 pyds.get_nvds_LayerInfo(tensor_meta, 0) # 2 OUTPUT kFLOAT mask_head/mask_fcn_logits/BiasAdd 100x14x28x28 frame_outputs =  for i in range(tensor_meta.num_output_layers): layer = pyds.get_nvds_LayerInfo(tensor_meta, i) # Convert NvDsInferLayerInfo buffer to numpy array ptr = ctypes.cast(pyds.get_ptr(layer.buffer), ctypes.POINTER(ctypes.c_float)) v = np.array(np.ctypeslib.as_array(ptr, shape=(layer.dims.numElements,)), copy=True) #print(v) frame_outputs.append(v) # # Boxes in the tensor meta should be in network resolution which is # # found in tensor_meta.network_info. Use this info to scale boxes to # # the input frame resolution. # layers_info =  # for i in range(tensor_meta.num_output_layers): # layer = pyds.get_nvds_LayerInfo(tensor_meta, i) # layers_info.append(layer) # frame_object_list = nvds_infer_parse_custom_tf_ssd( # layers_info, detection_params, box_size_param, nms_param # ) # try: # l_user = l_user.next # except StopIteration: # break # for frame_object in frame_object_list: # add_obj_meta_to_frame(frame_object, batch_meta, frame_meta, label_names) try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK