converting mask rcnn to tensor rt

Did you follow this:

Modify the conv2d_transpose conversion function in UFF, for example
/usr/local/lib/python3.5/dist-packages/uff/converters/tensorflow/converter_functions.py
or
/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter_functions.py.

uff_graph.conv_transpose(
    inputs[0], inputs[2], inputs[1],
    strides, padding,
    dilation=None, number_groups=number_groups,
    left_format=lhs_fmt, right_format=rhs_fmt,
    name=name, fields=fields
    )

Yes, I did and that is how I can successfully convert .h5 to .uff.

Here is how I convert my h5 to uff.

rom keras.models import model_from_json, Model
from keras import backend as K
from keras.layers import Input, Lambda
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from mrcnn.model import *
import mrcnn.model as modellib
from mrcnn.config import Config
import sys
import os
ROOT_DIR = os.path.abspath("./")
LOG_DIR = os.path.join(ROOT_DIR, "logs")
import argparse
import os
import uff

def parse_command_line_arguments(args=None):
    parser = argparse.ArgumentParser(prog='keras_to_trt', description='Convert trained keras .hdf5 model to trt .uff')

    parser.add_argument(
        '-w',
        '--weights',
        type=str,
        default=None,
        required=True,
        help="The checkpoint weights file of keras model."
    )

    parser.add_argument(
        '-o',
        '--output_file',
        type=str,
        default=None,
        required=True,
        help="The path to output .uff file."
    )

    parser.add_argument(
        '-l',
        '--list-nodes',
        action='store_true',
        help="show list of nodes contained in converted pb"
    )

    parser.add_argument(
        '-p',
        '--preprocessor',
        type=str,
        default=False,
        help="The preprocess function for converting tf node to trt plugin"
    )

    return parser.parse_args(args)

class CocoConfig(Config):

    NAME = 'nucleus'

    GPU_COUNT = 1

    # Adjust depending on your GPU memory
    IMAGES_PER_GPU = 4  # batch size

    # Number of classes (including background)
    NUM_CLASSES = 1 + 1  # Background + nucleus

    # Number of training and validation steps per epoch
    STEPS_PER_EPOCH = 1000
    VALIDATION_STEPS = 50

    # Don't exclude based on confidence. Since we have two classes
    # then 0.5 is the minimum anyway as it picks between nucleus and BG
    DETECTION_MIN_CONFIDENCE = 0

    # Backbone network architecture
    # Supported values are: resnet50, resnet101
    BACKBONE = 'resnet50'

    # Input image resizing
    # Random crops of size 512x512
    IMAGE_RESIZE_MODE = 'crop'
    IMAGE_MIN_DIM = 256  # 512
    IMAGE_MAX_DIM = 256  # 512
    #IMAGE_MIN_SCALE = 2.0

    # Length of square anchor side in pixels
    RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)

    # ROIs kept after non-maximum supression (training and inference)
    POST_NMS_ROIS_TRAINING = 1000
    POST_NMS_ROIS_INFERENCE = 20000 # 2000

    # Non-max suppression threshold to filter RPN proposals.
    # You can increase this during training to generate more propsals.
    RPN_NMS_THRESHOLD = 0.9

    # How many anchors per image to use for RPN training
    RPN_TRAIN_ANCHORS_PER_IMAGE = 64

    # Image mean (RGB)
    #MEAN_PIXEL = np.array([43.53, 39.56, 48.22])
    #MEAN_PIXEL = np.array([-0.65858824, -0.68972549, -0.62180392])
    MEAN_PIXEL = np.array([188.58, 154.34, 182.38])

    # If enabled, resizes instance masks to a smaller size to reduce
    # memory load. Recommended when using high-resolution images.
    USE_MINI_MASK = True
    MINI_MASK_SHAPE = (56, 56)  # (height, width) of the mini-mask

    # Number of ROIs per image to feed to classifier/mask heads
    # The Mask RCNN paper uses 512 but often the RPN doesn't generate
    # enough positive proposals to fill this and keep a positive:negative
    # ratio of 1:3. You can increase the number of proposals by adjusting
    # the RPN NMS threshold.
    TRAIN_ROIS_PER_IMAGE = 128

    # Maximum number of ground truth instances to use in one image
    MAX_GT_INSTANCES = 200

    # Max number of final detections per image
    DETECTION_MAX_INSTANCES = 4000 # 400

class InferenceConfig(CocoConfig):
    # Set batch size to 1 since we'll be running inference on
    # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
    GPU_COUNT = 1
    IMAGES_PER_GPU = 1

    # Don't resize imager for inferencing
    IMAGE_RESIZE_MODE = "pad64"
    # Non-max suppression threshold to filter RPN proposals.
    # You can increase this during training to generate more propsals.
    RPN_NMS_THRESHOLD = 0.7

def main(args=None):

    K.set_image_data_format('channels_first')
    K.set_learning_phase(0)

    args = parse_command_line_arguments(args)

    model_weights_path = args.weights
    output_file_path = args.output_file
    list_nodes = args.list_nodes

    config = InferenceConfig()
    config.display()

    model = modellib.MaskRCNN(mode="inference", model_dir=LOG_DIR, config=config).keras_model

    model.load_weights(model_weights_path, by_name=True)

model_A = Model(inputs=model.input, outputs=model.get_layer('mrcnn_mask').output)
    model_A.summary()

    output_nodes = ['mrcnn_detection', "mrcnn_mask/Sigmoid"]
    convert_model(model_A, output_file_path, output_nodes, preprocessor=args.preprocessor,
                  text=True, list_nodes=list_nodes)

def convert_model(inference_model, output_path, output_nodes=[], preprocessor=None, text=False,
                  list_nodes=False):
    # convert the keras model to pb
    orig_output_node_names = [node.op.name for node in inference_model.outputs]
    print("The output names of tensorflow graph nodes: {}".format(str(orig_output_node_names)))

    sess = K.get_session()

    constant_graph = graph_util.convert_variables_to_constants(
        sess,
        sess.graph.as_graph_def(),
        orig_output_node_names)

    temp_pb_path = "./nucleus_temp.pb"
    graph_io.write_graph(constant_graph, os.path.dirname(temp_pb_path), os.path.basename(temp_pb_path),
                         as_text=False)

    predefined_output_nodes = output_nodes
    if predefined_output_nodes != []:
        trt_output_nodes = predefined_output_nodes
    else:
        trt_output_nodes = orig_output_node_names

    # convert .pb to .uff
    uff.from_tensorflow_frozen_model(
        temp_pb_path,
        output_nodes=trt_output_nodes,
        preprocessor=preprocessor,
        text=text,
        list_nodes=list_nodes,
        output_filename=output_path,
        debug_mode = False
    )
    
    #os.remove(temp_pb_path)

if __name__ == "__main__":
    main()

configuration for Resnet50:

import graphsurgeon as gs
import tensorflow as tf

fpn_p5upsampled = gs.create_plugin_node("fpn_p5upsampled", op="ResizeNearest_TRT", dtype=tf.float32, scale=2.0)
fpn_p4upsampled = gs.create_plugin_node("fpn_p4upsampled", op="ResizeNearest_TRT", dtype=tf.float32, scale=2.0)
fpn_p3upsampled = gs.create_plugin_node("fpn_p3upsampled", op="ResizeNearest_TRT", dtype=tf.float32, scale=2.0)

roi = gs.create_plugin_node("ROI", op="ProposalLayer_TRT", prenms_topk=1024, keep_topk=1000, iou_threshold=0.7)
roi_align_classifier = gs.create_plugin_node("roi_align_classifier", op="PyramidROIAlign_TRT", pooled_size=7)
mrcnn_detection = gs.create_plugin_node("mrcnn_detection", op="DetectionLayer_TRT", num_classes=81, keep_topk=100, score_threshold=0.7, iou_threshold=0.3)
roi_align_mask = gs.create_plugin_node("roi_align_mask_trt", op="PyramidROIAlign_TRT", pooled_size=14)
mrcnn_detection_bboxes = gs.create_plugin_node("mrcnn_detection_bboxes", op="SpecialSlice_TRT")

namespace_plugin_map = {
"fpn_p5upsampled":fpn_p5upsampled,

"fpn_p4upsampled":fpn_p4upsampled,

"fpn_p3upsampled":fpn_p3upsampled,

"roi_align_classifier":roi_align_classifier,

"mrcnn_detection":mrcnn_detection,

"ROI":roi,

"roi_align_mask":roi_align_mask,

"lambda_1": mrcnn_detection_bboxes,

}

timedistributed_remove_list = [
        "mrcnn_class_conv1/Reshape/shape", "mrcnn_class_conv1/Reshape", "mrcnn_class_conv1/Reshape_1/shape", "mrcnn_class_conv1/Reshape_1",
        "mrcnn_class_bn1/Reshape/shape", "mrcnn_class_bn1/Reshape", "mrcnn_class_bn1/Reshape_5/shape", "mrcnn_class_bn1/Reshape_5",
        "mrcnn_class_conv2/Reshape/shape", "mrcnn_class_conv2/Reshape", "mrcnn_class_conv2/Reshape_1/shape", "mrcnn_class_conv2/Reshape_1",
        "mrcnn_class_bn2/Reshape/shape", "mrcnn_class_bn2/Reshape", "mrcnn_class_bn2/Reshape_5/shape", "mrcnn_class_bn2/Reshape_5",
        "mrcnn_class_logits/Reshape/shape", "mrcnn_class_logits/Reshape","mrcnn_class_logits/Reshape_1/shape", "mrcnn_class_logits/Reshape_1",
        "mrcnn_class/Reshape/shape", "mrcnn_class/Reshape","mrcnn_class/Reshape_1/shape", "mrcnn_class/Reshape_1",
        "mrcnn_bbox_fc/Reshape/shape", "mrcnn_bbox_fc/Reshape","mrcnn_bbox_fc/Reshape_1/shape", "mrcnn_bbox_fc/Reshape_1",

        "mrcnn_mask_conv1/Reshape/shape", "mrcnn_mask_conv1/Reshape", "mrcnn_mask_conv1/Reshape_1/shape", "mrcnn_mask_conv1/Reshape_1",
        "mrcnn_mask_bn1/Reshape/shape", "mrcnn_mask_bn1/Reshape", "mrcnn_mask_bn1/Reshape_5/shape", "mrcnn_mask_bn1/Reshape_5",
        "mrcnn_mask_conv2/Reshape/shape", "mrcnn_mask_conv2/Reshape", "mrcnn_mask_conv2/Reshape_1/shape", "mrcnn_mask_conv2/Reshape_1",
        "mrcnn_mask_bn2/Reshape/shape", "mrcnn_mask_bn2/Reshape", "mrcnn_mask_bn2/Reshape_5/shape", "mrcnn_mask_bn2/Reshape_5",
        "mrcnn_mask_conv3/Reshape/shape", "mrcnn_mask_conv3/Reshape", "mrcnn_mask_conv3/Reshape_1/shape", "mrcnn_mask_conv3/Reshape_1",
        "mrcnn_mask_bn3/Reshape/shape", "mrcnn_mask_bn3/Reshape", "mrcnn_mask_bn3/Reshape_5/shape", "mrcnn_mask_bn3/Reshape_5",
        "mrcnn_mask_conv4/Reshape/shape", "mrcnn_mask_conv4/Reshape", "mrcnn_mask_conv4/Reshape_1/shape", "mrcnn_mask_conv4/Reshape_1",
        "mrcnn_mask_bn4/Reshape/shape", "mrcnn_mask_bn4/Reshape", "mrcnn_mask_bn4/Reshape_5/shape", "mrcnn_mask_bn4/Reshape_5",
        "mrcnn_mask_deconv/Reshape/shape", "mrcnn_mask_deconv/Reshape", "mrcnn_mask_deconv/Reshape_1/shape", "mrcnn_mask_deconv/Reshape_1",
        "mrcnn_mask/Reshape/shape", "mrcnn_mask/Reshape", "mrcnn_mask/Reshape_1/shape", "mrcnn_mask/Reshape_1",
        ]

timedistributed_connect_pairs = [
        ("mrcnn_mask_deconv/Relu", "mrcnn_mask/convolution"), # mrcnn_mask_deconv -> mrcnn_mask
        ("activation_40/Relu", "mrcnn_mask_deconv/conv2d_transpose"), #active74 -> mrcnn_mask_deconv
        ("mrcnn_mask_bn4/batchnorm/add_1","activation_40/Relu"),  # mrcnn_mask_bn4 -> active74
        ("mrcnn_mask_conv4/BiasAdd", "mrcnn_mask_bn4/batchnorm/mul_1"), #mrcnn_mask_conv4 -> mrcnn_mask_bn4
        ("activation_39/Relu", "mrcnn_mask_conv4/convolution"), #active73 -> mrcnn_mask_conv4
        ("mrcnn_mask_bn3/batchnorm/add_1","activation_39/Relu"), #mrcnn_mask_bn3 -> active73
        ("mrcnn_mask_conv3/BiasAdd", "mrcnn_mask_bn3/batchnorm/mul_1"), #mrcnn_mask_conv3 -> mrcnn_mask_bn3
        ("activation_38/Relu", "mrcnn_mask_conv3/convolution"), #active72 -> mrcnn_mask_conv3
        ("mrcnn_mask_bn2/batchnorm/add_1","activation_38/Relu"), #mrcnn_mask_bn2 -> active72
        ("mrcnn_mask_conv2/BiasAdd", "mrcnn_mask_bn2/batchnorm/mul_1"), #mrcnn_mask_conv2 -> mrcnn_mask_bn2
        ("activation_37/Relu", "mrcnn_mask_conv2/convolution"), #active71 -> mrcnn_mask_conv2
        ("mrcnn_mask_bn1/batchnorm/add_1","activation_37/Relu"), #mrcnn_mask_bn1 -> active71
        ("mrcnn_mask_conv1/BiasAdd", "mrcnn_mask_bn1/batchnorm/mul_1"), #mrcnn_mask_conv1 -> mrcnn_mask_bn1
        ("roi_align_mask_trt", "mrcnn_mask_conv1/convolution"), #roi_align_mask -> mrcnn_mask_conv1
        ("mrcnn_class_bn2/batchnorm/add_1","activation_35/Relu"), # mrcnn_class_bn2 -> active 69
        ("mrcnn_class_conv2/BiasAdd", "mrcnn_class_bn2/batchnorm/mul_1"), # mrcnn_class_conv2 -> mrcnn_class_bn2
        ("activation_37/Relu", "mrcnn_class_conv2/convolution"), # active 68 -> mrcnn_class_conv2
        ("mrcnn_class_bn1/batchnorm/add_1","activation_37/Relu"), # mrcnn_class_bn1 -> active 68
        ("mrcnn_class_conv1/BiasAdd", "mrcnn_class_bn1/batchnorm/mul_1"), # mrcnn_class_conv1 -> mrcnn_class_bn1
        ("roi_align_classifier", "mrcnn_class_conv1/convolution"), # roi_align_classifier -> mrcnn_class_conv1
        ]

dense_compatible_patch =["pool_squeeze/Squeeze", "pool_squeeze/Squeeze_1", #No need to squeeze the dimensions for TRT Dense Layer
        "mrcnn_bbox/Shape", "mrcnn_bbox/strided_slice/stack", # mrcnn_bbox(Reshape): No need to reshape, cause we can process it as 1-D array in detectionlayer's kernel
        "mrcnn_bbox/strided_slice/stack_1", "mrcnn_bbox/strided_slice/stack_2",
        "mrcnn_bbox/strided_slice", "mrcnn_bbox/Reshape/shape/1",
        "mrcnn_bbox/Reshape/shape/2", "mrcnn_bbox/Reshape/shape/3",
        "mrcnn_bbox/Reshape/shape", "mrcnn_bbox/Reshape"]

dense_compatible_connect_pairs = [
        ("activation_35/Relu","mrcnn_bbox_fc/MatMul"), #activation_35 -> mrcnn_bbox_fc
        ("activation_35/Relu", "mrcnn_class_logits/MatMul"), #activation_35 -> mrcnn_class_logits
        ("mrcnn_class_logits/BiasAdd", "mrcnn_class/Softmax"), #mrcnn_class_logits -> mrcnn_class
        ("mrcnn_class/Softmax", "mrcnn_detection"), #mrcnn_class -> mrcnn_detection
        ("mrcnn_bbox_fc/BiasAdd", "mrcnn_detection"), #mrcnn_bbox_fc -> mrcnn_detection
        ]

def connect(dynamic_graph, connections_list):

    for node_a_name, node_b_name in connections_list:
        if node_a_name not in dynamic_graph.node_map[node_b_name].input:
            dynamic_graph.node_map[node_b_name].input.insert(0, node_a_name)

def preprocess(dynamic_graph):
    # Now create a new graph by collapsing namespaces
    dynamic_graph.collapse_namespaces(namespace_plugin_map, unique_inputs=True)
    dynamic_graph.remove(timedistributed_remove_list)
    dynamic_graph.remove(dense_compatible_patch)
    dynamic_graph.remove(['input_anchors', 'input_image_meta'])

    connect(dynamic_graph, timedistributed_connect_pairs)
    connect(dynamic_graph, dense_compatible_connect_pairs)

and then:
python mrcnn_to_trt_single_nucleus.py -w mask_rcnn_nucleus_0080.h5 -o mask_rcnn_nucleus_0080.uff -p configResnet50.py

.h5 is trained using Resnet50 from pretrained imagenet weight.

The another error occurs when I used .uff to create engine in python.

There is the code:

import tensorrt as trt
import uff
from tensorrt import UffParser

G_LOGGER = trt.Logger(trt.Logger.INFO)
trt.init_libnvinfer_plugins(G_LOGGER, '')
config_path = './configResnet50.py'
model_file = './uff/mask_rcnn_nucleus_0080.uff'

output_nodes = ['mrcnn_detection', "mrcnn_mask/Sigmoid"]

trt_output_nodes = output_nodes

INPUT_NODE = "input_image"
INPUT_SIZE = [3, 256, 256]

with trt.Builder(G_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
    parser.register_input(INPUT_NODE, INPUT_SIZE)
    parser.register_output(output_nodes[0])
    parser.register_output(output_nodes[1])
    parser.parse(model_file, network)

The another error occurs when I used .uff to create engine in python
What’s the error ?

I use your

  1. h5 model: mask_rcnn_nucleus_0080.h5 - Google Drive
  2. mrcnn_to_trt_single_resnet50.py in #22
  3. config.py in #22

and change (It’s not necessary because of mrcnn_to_trt_single_resnet50.py → “class CocoConfig(Config):” )

diff --git a/mrcnn/config.py b/mrcnn/config.py
old mode 100644
new mode 100755
index 6f91170..3e6020b
--- a/mrcnn/config.py
+++ b/mrcnn/config.py
@@ -52,7 +52,7 @@ class Config(object):
     # You can also provide a callable that should have the signature
     # of model.resnet_graph. If you do so, you need to supply a callable
     # to COMPUTE_BACKBONE_SHAPE as well
-    BACKBONE = "resnet101"
+    BACKBONE = "resnet50"

I can generate uff model
$ ls -la /home/cding/tmp/mask_rcnn_nucleus_0080.uff
-rw-rw-r-- 1 cding cding 178893115 Dec 25 18:49 /home/cding/tmp/mask_rcnn_nucleus_0080.uff

Log:

$ python ./mrcnn_to_trt_single_resnet50.py -w /home/cding/segmentation/maskRCNN_model_conver/mask_rcnn_nucleus_0080.h5 -o /home/cding/tmp/mask_rcnn_nucleus_0080.uff -p ./config.py        
Using TensorFlow backend.

Configurations:
BACKBONE                       resnet50
BACKBONE_STRIDES               [4, 8, 16, 32, 64]
BATCH_SIZE                     1
BBOX_STD_DEV                   [0.1 0.1 0.2 0.2]
COMPUTE_BACKBONE_SHAPE         None
DETECTION_MAX_INSTANCES        4000
DETECTION_MIN_CONFIDENCE       0
DETECTION_NMS_THRESHOLD        0.3
FPN_CLASSIF_FC_LAYERS_SIZE     1024
GPU_COUNT                      1
GRADIENT_CLIP_NORM             5.0
IMAGES_PER_GPU                 1
IMAGE_CHANNEL_COUNT            3
IMAGE_MAX_DIM                  256
IMAGE_META_SIZE                14
IMAGE_MIN_DIM                  256
IMAGE_MIN_SCALE                0
IMAGE_RESIZE_MODE              pad64
IMAGE_SHAPE                    [256 256   3]
LEARNING_MOMENTUM              0.9
LEARNING_RATE                  0.001
LOSS_WEIGHTS                   {'mrcnn_mask_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'rpn_class_loss': 1.0}
MASK_POOL_SIZE                 14
MASK_SHAPE                     [28, 28]
MAX_GT_INSTANCES               200
MEAN_PIXEL                     [188.58 154.34 182.38]
MINI_MASK_SHAPE                (56, 56)
NAME                           nucleus
NUM_CLASSES                    2
POOL_SIZE                      7
POST_NMS_ROIS_INFERENCE        20000
POST_NMS_ROIS_TRAINING         1000
PRE_NMS_LIMIT                  6000
ROI_POSITIVE_RATIO             0.33
RPN_ANCHOR_RATIOS              [0.5, 1, 2]
RPN_ANCHOR_SCALES              (8, 16, 32, 64, 128)
RPN_ANCHOR_STRIDE              1
RPN_BBOX_STD_DEV               [0.1 0.1 0.2 0.2]
RPN_NMS_THRESHOLD              0.7
RPN_TRAIN_ANCHORS_PER_IMAGE    64
STEPS_PER_EPOCH                1000
TOP_DOWN_PYRAMID_SIZE          256
TRAIN_BN                       False
TRAIN_ROIS_PER_IMAGE           128
USE_MINI_MASK                  True
USE_RPN_ROIS                   True
VALIDATION_STEPS               50
WEIGHT_DECAY                   0.0001


WARNING:tensorflow:From /home/cding/.virtualenvs/virtual-py2/local/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
2019-12-25 18:49:12.413967: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 AVX512F FMA
2019-12-25 18:49:12.583316: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x55777154c850 executing computations on platform CUDA. Devices:
2019-12-25 18:49:12.583388: I tensorflow/compiler/xla/service/service.cc:158]   StreamExecutor device (0): Tesla P4, Compute Capability 6.1
2019-12-25 18:49:12.606954: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 1700000000 Hz
2019-12-25 18:49:12.607904: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x5577719c17f0 executing computations on platform Host. Devices:
2019-12-25 18:49:12.607964: I tensorflow/compiler/xla/service/service.cc:158]   StreamExecutor device (0): <undefined>, <undefined>
2019-12-25 18:49:12.608319: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties: 
name: Tesla P4 major: 6 minor: 1 memoryClockRate(GHz): 1.1135
pciBusID: 0000:ae:00.0
totalMemory: 7.93GiB freeMemory: 7.78GiB
2019-12-25 18:49:12.608364: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0
2019-12-25 18:49:12.611050: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-12-25 18:49:12.611087: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990]      0 
2019-12-25 18:49:12.611118: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0:   N 
2019-12-25 18:49:12.611341: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 7569 MB memory) -> physical GPU (device: 0, name: Tesla P4, pci bus id: 0000:ae:00.0, compute capability: 6.1)
WARNING:tensorflow:From /home/cding/segmentation/Mask_RCNN/mrcnn/model.py:783: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_image (InputLayer)        (None, 3, 1024, 1024 0                                            
__________________________________________________________________________________________________
zero_padding2d_1 (ZeroPadding2D (None, 3, 1030, 1030 0           input_image[0][0]                
__________________________________________________________________________________________________
conv1 (Conv2D)                  (None, 64, 512, 512) 9472        zero_padding2d_1[0][0]           
__________________________________________________________________________________________________
bn_conv1 (BatchNorm)            (None, 64, 512, 512) 256         conv1[0][0]                      
__________________________________________________________________________________________________
activation_1 (Activation)       (None, 64, 512, 512) 0           bn_conv1[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 64, 256, 256) 0           activation_1[0][0]               
__________________________________________________________________________________________________
res2a_branch2a (Conv2D)         (None, 64, 256, 256) 4160        max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
bn2a_branch2a (BatchNorm)       (None, 64, 256, 256) 256         res2a_branch2a[0][0]             
__________________________________________________________________________________________________
activation_2 (Activation)       (None, 64, 256, 256) 0           bn2a_branch2a[0][0]              
__________________________________________________________________________________________________
res2a_branch2b (Conv2D)         (None, 64, 256, 256) 36928       activation_2[0][0]               
__________________________________________________________________________________________________
bn2a_branch2b (BatchNorm)       (None, 64, 256, 256) 256         res2a_branch2b[0][0]             
__________________________________________________________________________________________________
activation_3 (Activation)       (None, 64, 256, 256) 0           bn2a_branch2b[0][0]              
__________________________________________________________________________________________________
res2a_branch2c (Conv2D)         (None, 256, 256, 256 16640       activation_3[0][0]               
__________________________________________________________________________________________________
res2a_branch1 (Conv2D)          (None, 256, 256, 256 16640       max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
bn2a_branch2c (BatchNorm)       (None, 256, 256, 256 1024        res2a_branch2c[0][0]             
__________________________________________________________________________________________________
bn2a_branch1 (BatchNorm)        (None, 256, 256, 256 1024        res2a_branch1[0][0]              
__________________________________________________________________________________________________
add_1 (Add)                     (None, 256, 256, 256 0           bn2a_branch2c[0][0]              
                                                                 bn2a_branch1[0][0]               
__________________________________________________________________________________________________
res2a_out (Activation)          (None, 256, 256, 256 0           add_1[0][0]                      
__________________________________________________________________________________________________
res2b_branch2a (Conv2D)         (None, 64, 256, 256) 16448       res2a_out[0][0]                  
__________________________________________________________________________________________________
bn2b_branch2a (BatchNorm)       (None, 64, 256, 256) 256         res2b_branch2a[0][0]             
__________________________________________________________________________________________________
activation_4 (Activation)       (None, 64, 256, 256) 0           bn2b_branch2a[0][0]              
__________________________________________________________________________________________________
res2b_branch2b (Conv2D)         (None, 64, 256, 256) 36928       activation_4[0][0]               
__________________________________________________________________________________________________
bn2b_branch2b (BatchNorm)       (None, 64, 256, 256) 256         res2b_branch2b[0][0]             
__________________________________________________________________________________________________
activation_5 (Activation)       (None, 64, 256, 256) 0           bn2b_branch2b[0][0]              
__________________________________________________________________________________________________
res2b_branch2c (Conv2D)         (None, 256, 256, 256 16640       activation_5[0][0]               
__________________________________________________________________________________________________
bn2b_branch2c (BatchNorm)       (None, 256, 256, 256 1024        res2b_branch2c[0][0]             
__________________________________________________________________________________________________
add_2 (Add)                     (None, 256, 256, 256 0           bn2b_branch2c[0][0]              
                                                                 res2a_out[0][0]                  
__________________________________________________________________________________________________
res2b_out (Activation)          (None, 256, 256, 256 0           add_2[0][0]                      
__________________________________________________________________________________________________
res2c_branch2a (Conv2D)         (None, 64, 256, 256) 16448       res2b_out[0][0]                  
__________________________________________________________________________________________________
bn2c_branch2a (BatchNorm)       (None, 64, 256, 256) 256         res2c_branch2a[0][0]             
__________________________________________________________________________________________________
activation_6 (Activation)       (None, 64, 256, 256) 0           bn2c_branch2a[0][0]              
__________________________________________________________________________________________________
res2c_branch2b (Conv2D)         (None, 64, 256, 256) 36928       activation_6[0][0]               
__________________________________________________________________________________________________
bn2c_branch2b (BatchNorm)       (None, 64, 256, 256) 256         res2c_branch2b[0][0]             
__________________________________________________________________________________________________
activation_7 (Activation)       (None, 64, 256, 256) 0           bn2c_branch2b[0][0]              
__________________________________________________________________________________________________
res2c_branch2c (Conv2D)         (None, 256, 256, 256 16640       activation_7[0][0]               
__________________________________________________________________________________________________
bn2c_branch2c (BatchNorm)       (None, 256, 256, 256 1024        res2c_branch2c[0][0]             
__________________________________________________________________________________________________
add_3 (Add)                     (None, 256, 256, 256 0           bn2c_branch2c[0][0]              
                                                                 res2b_out[0][0]                  
__________________________________________________________________________________________________
res2c_out (Activation)          (None, 256, 256, 256 0           add_3[0][0]                      
__________________________________________________________________________________________________
res3a_branch2a (Conv2D)         (None, 128, 128, 128 32896       res2c_out[0][0]                  
__________________________________________________________________________________________________
bn3a_branch2a (BatchNorm)       (None, 128, 128, 128 512         res3a_branch2a[0][0]             
__________________________________________________________________________________________________
activation_8 (Activation)       (None, 128, 128, 128 0           bn3a_branch2a[0][0]              
__________________________________________________________________________________________________
res3a_branch2b (Conv2D)         (None, 128, 128, 128 147584      activation_8[0][0]               
__________________________________________________________________________________________________
bn3a_branch2b (BatchNorm)       (None, 128, 128, 128 512         res3a_branch2b[0][0]             
__________________________________________________________________________________________________
activation_9 (Activation)       (None, 128, 128, 128 0           bn3a_branch2b[0][0]              
__________________________________________________________________________________________________
res3a_branch2c (Conv2D)         (None, 512, 128, 128 66048       activation_9[0][0]               
__________________________________________________________________________________________________
res3a_branch1 (Conv2D)          (None, 512, 128, 128 131584      res2c_out[0][0]                  
__________________________________________________________________________________________________
bn3a_branch2c (BatchNorm)       (None, 512, 128, 128 2048        res3a_branch2c[0][0]             
__________________________________________________________________________________________________
bn3a_branch1 (BatchNorm)        (None, 512, 128, 128 2048        res3a_branch1[0][0]              
__________________________________________________________________________________________________
add_4 (Add)                     (None, 512, 128, 128 0           bn3a_branch2c[0][0]              
                                                                 bn3a_branch1[0][0]               
__________________________________________________________________________________________________
res3a_out (Activation)          (None, 512, 128, 128 0           add_4[0][0]                      
__________________________________________________________________________________________________
res3b_branch2a (Conv2D)         (None, 128, 128, 128 65664       res3a_out[0][0]                  
__________________________________________________________________________________________________
bn3b_branch2a (BatchNorm)       (None, 128, 128, 128 512         res3b_branch2a[0][0]             
__________________________________________________________________________________________________
activation_10 (Activation)      (None, 128, 128, 128 0           bn3b_branch2a[0][0]              
__________________________________________________________________________________________________
res3b_branch2b (Conv2D)         (None, 128, 128, 128 147584      activation_10[0][0]              
__________________________________________________________________________________________________
bn3b_branch2b (BatchNorm)       (None, 128, 128, 128 512         res3b_branch2b[0][0]             
__________________________________________________________________________________________________
activation_11 (Activation)      (None, 128, 128, 128 0           bn3b_branch2b[0][0]              
__________________________________________________________________________________________________
res3b_branch2c (Conv2D)         (None, 512, 128, 128 66048       activation_11[0][0]              
__________________________________________________________________________________________________
bn3b_branch2c (BatchNorm)       (None, 512, 128, 128 2048        res3b_branch2c[0][0]             
__________________________________________________________________________________________________
add_5 (Add)                     (None, 512, 128, 128 0           bn3b_branch2c[0][0]              
                                                                 res3a_out[0][0]                  
__________________________________________________________________________________________________
res3b_out (Activation)          (None, 512, 128, 128 0           add_5[0][0]                      
__________________________________________________________________________________________________
res3c_branch2a (Conv2D)         (None, 128, 128, 128 65664       res3b_out[0][0]                  
__________________________________________________________________________________________________
bn3c_branch2a (BatchNorm)       (None, 128, 128, 128 512         res3c_branch2a[0][0]             
__________________________________________________________________________________________________
activation_12 (Activation)      (None, 128, 128, 128 0           bn3c_branch2a[0][0]              
__________________________________________________________________________________________________
res3c_branch2b (Conv2D)         (None, 128, 128, 128 147584      activation_12[0][0]              
__________________________________________________________________________________________________
bn3c_branch2b (BatchNorm)       (None, 128, 128, 128 512         res3c_branch2b[0][0]             
__________________________________________________________________________________________________
activation_13 (Activation)      (None, 128, 128, 128 0           bn3c_branch2b[0][0]              
__________________________________________________________________________________________________
res3c_branch2c (Conv2D)         (None, 512, 128, 128 66048       activation_13[0][0]              
__________________________________________________________________________________________________
bn3c_branch2c (BatchNorm)       (None, 512, 128, 128 2048        res3c_branch2c[0][0]             
__________________________________________________________________________________________________
add_6 (Add)                     (None, 512, 128, 128 0           bn3c_branch2c[0][0]              
                                                                 res3b_out[0][0]                  
__________________________________________________________________________________________________
res3c_out (Activation)          (None, 512, 128, 128 0           add_6[0][0]                      
__________________________________________________________________________________________________
res3d_branch2a (Conv2D)         (None, 128, 128, 128 65664       res3c_out[0][0]                  
__________________________________________________________________________________________________
bn3d_branch2a (BatchNorm)       (None, 128, 128, 128 512         res3d_branch2a[0][0]             
__________________________________________________________________________________________________
activation_14 (Activation)      (None, 128, 128, 128 0           bn3d_branch2a[0][0]              
__________________________________________________________________________________________________
res3d_branch2b (Conv2D)         (None, 128, 128, 128 147584      activation_14[0][0]              
__________________________________________________________________________________________________
bn3d_branch2b (BatchNorm)       (None, 128, 128, 128 512         res3d_branch2b[0][0]             
__________________________________________________________________________________________________
activation_15 (Activation)      (None, 128, 128, 128 0           bn3d_branch2b[0][0]              
__________________________________________________________________________________________________
res3d_branch2c (Conv2D)         (None, 512, 128, 128 66048       activation_15[0][0]              
__________________________________________________________________________________________________
bn3d_branch2c (BatchNorm)       (None, 512, 128, 128 2048        res3d_branch2c[0][0]             
__________________________________________________________________________________________________
add_7 (Add)                     (None, 512, 128, 128 0           bn3d_branch2c[0][0]              
                                                                 res3c_out[0][0]                  
__________________________________________________________________________________________________
res3d_out (Activation)          (None, 512, 128, 128 0           add_7[0][0]                      
__________________________________________________________________________________________________
res4a_branch2a (Conv2D)         (None, 256, 64, 64)  131328      res3d_out[0][0]                  
__________________________________________________________________________________________________
bn4a_branch2a (BatchNorm)       (None, 256, 64, 64)  1024        res4a_branch2a[0][0]             
__________________________________________________________________________________________________
activation_16 (Activation)      (None, 256, 64, 64)  0           bn4a_branch2a[0][0]              
__________________________________________________________________________________________________
res4a_branch2b (Conv2D)         (None, 256, 64, 64)  590080      activation_16[0][0]              
__________________________________________________________________________________________________
bn4a_branch2b (BatchNorm)       (None, 256, 64, 64)  1024        res4a_branch2b[0][0]             
__________________________________________________________________________________________________
activation_17 (Activation)      (None, 256, 64, 64)  0           bn4a_branch2b[0][0]              
__________________________________________________________________________________________________
res4a_branch2c (Conv2D)         (None, 1024, 64, 64) 263168      activation_17[0][0]              
__________________________________________________________________________________________________
res4a_branch1 (Conv2D)          (None, 1024, 64, 64) 525312      res3d_out[0][0]                  
__________________________________________________________________________________________________
bn4a_branch2c (BatchNorm)       (None, 1024, 64, 64) 4096        res4a_branch2c[0][0]             
__________________________________________________________________________________________________
bn4a_branch1 (BatchNorm)        (None, 1024, 64, 64) 4096        res4a_branch1[0][0]              
__________________________________________________________________________________________________
add_8 (Add)                     (None, 1024, 64, 64) 0           bn4a_branch2c[0][0]              
                                                                 bn4a_branch1[0][0]               
__________________________________________________________________________________________________
res4a_out (Activation)          (None, 1024, 64, 64) 0           add_8[0][0]                      
__________________________________________________________________________________________________
res4b_branch2a (Conv2D)         (None, 256, 64, 64)  262400      res4a_out[0][0]                  
__________________________________________________________________________________________________
bn4b_branch2a (BatchNorm)       (None, 256, 64, 64)  1024        res4b_branch2a[0][0]             
__________________________________________________________________________________________________
activation_18 (Activation)      (None, 256, 64, 64)  0           bn4b_branch2a[0][0]              
__________________________________________________________________________________________________
res4b_branch2b (Conv2D)         (None, 256, 64, 64)  590080      activation_18[0][0]              
__________________________________________________________________________________________________
bn4b_branch2b (BatchNorm)       (None, 256, 64, 64)  1024        res4b_branch2b[0][0]             
__________________________________________________________________________________________________
activation_19 (Activation)      (None, 256, 64, 64)  0           bn4b_branch2b[0][0]              
__________________________________________________________________________________________________
res4b_branch2c (Conv2D)         (None, 1024, 64, 64) 263168      activation_19[0][0]              
__________________________________________________________________________________________________
bn4b_branch2c (BatchNorm)       (None, 1024, 64, 64) 4096        res4b_branch2c[0][0]             
__________________________________________________________________________________________________
add_9 (Add)                     (None, 1024, 64, 64) 0           bn4b_branch2c[0][0]              
                                                                 res4a_out[0][0]                  
__________________________________________________________________________________________________
res4b_out (Activation)          (None, 1024, 64, 64) 0           add_9[0][0]                      
__________________________________________________________________________________________________
res4c_branch2a (Conv2D)         (None, 256, 64, 64)  262400      res4b_out[0][0]                  
__________________________________________________________________________________________________
bn4c_branch2a (BatchNorm)       (None, 256, 64, 64)  1024        res4c_branch2a[0][0]             
__________________________________________________________________________________________________
activation_20 (Activation)      (None, 256, 64, 64)  0           bn4c_branch2a[0][0]              
__________________________________________________________________________________________________
res4c_branch2b (Conv2D)         (None, 256, 64, 64)  590080      activation_20[0][0]              
__________________________________________________________________________________________________
bn4c_branch2b (BatchNorm)       (None, 256, 64, 64)  1024        res4c_branch2b[0][0]             
__________________________________________________________________________________________________
activation_21 (Activation)      (None, 256, 64, 64)  0           bn4c_branch2b[0][0]              
__________________________________________________________________________________________________
res4c_branch2c (Conv2D)         (None, 1024, 64, 64) 263168      activation_21[0][0]              
__________________________________________________________________________________________________
bn4c_branch2c (BatchNorm)       (None, 1024, 64, 64) 4096        res4c_branch2c[0][0]             
__________________________________________________________________________________________________
add_10 (Add)                    (None, 1024, 64, 64) 0           bn4c_branch2c[0][0]              
                                                                 res4b_out[0][0]                  
__________________________________________________________________________________________________
res4c_out (Activation)          (None, 1024, 64, 64) 0           add_10[0][0]                     
__________________________________________________________________________________________________
res4d_branch2a (Conv2D)         (None, 256, 64, 64)  262400      res4c_out[0][0]                  
__________________________________________________________________________________________________
bn4d_branch2a (BatchNorm)       (None, 256, 64, 64)  1024        res4d_branch2a[0][0]             
__________________________________________________________________________________________________
activation_22 (Activation)      (None, 256, 64, 64)  0           bn4d_branch2a[0][0]              
__________________________________________________________________________________________________
res4d_branch2b (Conv2D)         (None, 256, 64, 64)  590080      activation_22[0][0]              
__________________________________________________________________________________________________
bn4d_branch2b (BatchNorm)       (None, 256, 64, 64)  1024        res4d_branch2b[0][0]             
__________________________________________________________________________________________________
activation_23 (Activation)      (None, 256, 64, 64)  0           bn4d_branch2b[0][0]              
__________________________________________________________________________________________________
res4d_branch2c (Conv2D)         (None, 1024, 64, 64) 263168      activation_23[0][0]              
__________________________________________________________________________________________________
bn4d_branch2c (BatchNorm)       (None, 1024, 64, 64) 4096        res4d_branch2c[0][0]             
__________________________________________________________________________________________________
add_11 (Add)                    (None, 1024, 64, 64) 0           bn4d_branch2c[0][0]              
                                                                 res4c_out[0][0]                  
__________________________________________________________________________________________________
res4d_out (Activation)          (None, 1024, 64, 64) 0           add_11[0][0]                     
__________________________________________________________________________________________________
res4e_branch2a (Conv2D)         (None, 256, 64, 64)  262400      res4d_out[0][0]                  
__________________________________________________________________________________________________
bn4e_branch2a (BatchNorm)       (None, 256, 64, 64)  1024        res4e_branch2a[0][0]             
__________________________________________________________________________________________________
activation_24 (Activation)      (None, 256, 64, 64)  0           bn4e_branch2a[0][0]              
__________________________________________________________________________________________________
res4e_branch2b (Conv2D)         (None, 256, 64, 64)  590080      activation_24[0][0]              
__________________________________________________________________________________________________
bn4e_branch2b (BatchNorm)       (None, 256, 64, 64)  1024        res4e_branch2b[0][0]             
__________________________________________________________________________________________________
activation_25 (Activation)      (None, 256, 64, 64)  0           bn4e_branch2b[0][0]              
__________________________________________________________________________________________________
res4e_branch2c (Conv2D)         (None, 1024, 64, 64) 263168      activation_25[0][0]              
__________________________________________________________________________________________________
bn4e_branch2c (BatchNorm)       (None, 1024, 64, 64) 4096        res4e_branch2c[0][0]             
__________________________________________________________________________________________________
add_12 (Add)                    (None, 1024, 64, 64) 0           bn4e_branch2c[0][0]              
                                                                 res4d_out[0][0]                  
__________________________________________________________________________________________________
res4e_out (Activation)          (None, 1024, 64, 64) 0           add_12[0][0]                     
__________________________________________________________________________________________________
res4f_branch2a (Conv2D)         (None, 256, 64, 64)  262400      res4e_out[0][0]                  
__________________________________________________________________________________________________
bn4f_branch2a (BatchNorm)       (None, 256, 64, 64)  1024        res4f_branch2a[0][0]             
__________________________________________________________________________________________________
activation_26 (Activation)      (None, 256, 64, 64)  0           bn4f_branch2a[0][0]              
__________________________________________________________________________________________________
res4f_branch2b (Conv2D)         (None, 256, 64, 64)  590080      activation_26[0][0]              
__________________________________________________________________________________________________
bn4f_branch2b (BatchNorm)       (None, 256, 64, 64)  1024        res4f_branch2b[0][0]             
__________________________________________________________________________________________________
activation_27 (Activation)      (None, 256, 64, 64)  0           bn4f_branch2b[0][0]              
__________________________________________________________________________________________________
res4f_branch2c (Conv2D)         (None, 1024, 64, 64) 263168      activation_27[0][0]              
__________________________________________________________________________________________________
bn4f_branch2c (BatchNorm)       (None, 1024, 64, 64) 4096        res4f_branch2c[0][0]             
__________________________________________________________________________________________________
add_13 (Add)                    (None, 1024, 64, 64) 0           bn4f_branch2c[0][0]              
                                                                 res4e_out[0][0]                  
__________________________________________________________________________________________________
res4f_out (Activation)          (None, 1024, 64, 64) 0           add_13[0][0]                     
__________________________________________________________________________________________________
res5a_branch2a (Conv2D)         (None, 512, 32, 32)  524800      res4f_out[0][0]                  
__________________________________________________________________________________________________
bn5a_branch2a (BatchNorm)       (None, 512, 32, 32)  2048        res5a_branch2a[0][0]             
__________________________________________________________________________________________________
activation_28 (Activation)      (None, 512, 32, 32)  0           bn5a_branch2a[0][0]              
__________________________________________________________________________________________________
res5a_branch2b (Conv2D)         (None, 512, 32, 32)  2359808     activation_28[0][0]              
__________________________________________________________________________________________________
bn5a_branch2b (BatchNorm)       (None, 512, 32, 32)  2048        res5a_branch2b[0][0]             
__________________________________________________________________________________________________
activation_29 (Activation)      (None, 512, 32, 32)  0           bn5a_branch2b[0][0]              
__________________________________________________________________________________________________
res5a_branch2c (Conv2D)         (None, 2048, 32, 32) 1050624     activation_29[0][0]              
__________________________________________________________________________________________________
res5a_branch1 (Conv2D)          (None, 2048, 32, 32) 2099200     res4f_out[0][0]                  
__________________________________________________________________________________________________
bn5a_branch2c (BatchNorm)       (None, 2048, 32, 32) 8192        res5a_branch2c[0][0]             
__________________________________________________________________________________________________
bn5a_branch1 (BatchNorm)        (None, 2048, 32, 32) 8192        res5a_branch1[0][0]              
__________________________________________________________________________________________________
add_14 (Add)                    (None, 2048, 32, 32) 0           bn5a_branch2c[0][0]              
                                                                 bn5a_branch1[0][0]               
__________________________________________________________________________________________________
res5a_out (Activation)          (None, 2048, 32, 32) 0           add_14[0][0]                     
__________________________________________________________________________________________________
res5b_branch2a (Conv2D)         (None, 512, 32, 32)  1049088     res5a_out[0][0]                  
__________________________________________________________________________________________________
bn5b_branch2a (BatchNorm)       (None, 512, 32, 32)  2048        res5b_branch2a[0][0]             
__________________________________________________________________________________________________
activation_30 (Activation)      (None, 512, 32, 32)  0           bn5b_branch2a[0][0]              
__________________________________________________________________________________________________
res5b_branch2b (Conv2D)         (None, 512, 32, 32)  2359808     activation_30[0][0]              
__________________________________________________________________________________________________
bn5b_branch2b (BatchNorm)       (None, 512, 32, 32)  2048        res5b_branch2b[0][0]             
__________________________________________________________________________________________________
activation_31 (Activation)      (None, 512, 32, 32)  0           bn5b_branch2b[0][0]              
__________________________________________________________________________________________________
res5b_branch2c (Conv2D)         (None, 2048, 32, 32) 1050624     activation_31[0][0]              
__________________________________________________________________________________________________
bn5b_branch2c (BatchNorm)       (None, 2048, 32, 32) 8192        res5b_branch2c[0][0]             
__________________________________________________________________________________________________
add_15 (Add)                    (None, 2048, 32, 32) 0           bn5b_branch2c[0][0]              
                                                                 res5a_out[0][0]                  
__________________________________________________________________________________________________
res5b_out (Activation)          (None, 2048, 32, 32) 0           add_15[0][0]                     
__________________________________________________________________________________________________
res5c_branch2a (Conv2D)         (None, 512, 32, 32)  1049088     res5b_out[0][0]                  
__________________________________________________________________________________________________
bn5c_branch2a (BatchNorm)       (None, 512, 32, 32)  2048        res5c_branch2a[0][0]             
__________________________________________________________________________________________________
activation_32 (Activation)      (None, 512, 32, 32)  0           bn5c_branch2a[0][0]              
__________________________________________________________________________________________________
res5c_branch2b (Conv2D)         (None, 512, 32, 32)  2359808     activation_32[0][0]              
__________________________________________________________________________________________________
bn5c_branch2b (BatchNorm)       (None, 512, 32, 32)  2048        res5c_branch2b[0][0]             
__________________________________________________________________________________________________
activation_33 (Activation)      (None, 512, 32, 32)  0           bn5c_branch2b[0][0]              
__________________________________________________________________________________________________
res5c_branch2c (Conv2D)         (None, 2048, 32, 32) 1050624     activation_33[0][0]              
__________________________________________________________________________________________________
bn5c_branch2c (BatchNorm)       (None, 2048, 32, 32) 8192        res5c_branch2c[0][0]             
__________________________________________________________________________________________________
add_16 (Add)                    (None, 2048, 32, 32) 0           bn5c_branch2c[0][0]              
                                                                 res5b_out[0][0]                  
__________________________________________________________________________________________________
res5c_out (Activation)          (None, 2048, 32, 32) 0           add_16[0][0]                     
__________________________________________________________________________________________________
fpn_c5p5 (Conv2D)               (None, 256, 32, 32)  524544      res5c_out[0][0]                  
__________________________________________________________________________________________________
fpn_p5upsampled (UpSampling2D)  (None, 256, 64, 64)  0           fpn_c5p5[0][0]                   
__________________________________________________________________________________________________
fpn_c4p4 (Conv2D)               (None, 256, 64, 64)  262400      res4f_out[0][0]                  
__________________________________________________________________________________________________
fpn_p4add (Add)                 (None, 256, 64, 64)  0           fpn_p5upsampled[0][0]            
                                                                 fpn_c4p4[0][0]                   
__________________________________________________________________________________________________
fpn_p4upsampled (UpSampling2D)  (None, 256, 128, 128 0           fpn_p4add[0][0]                  
__________________________________________________________________________________________________
fpn_c3p3 (Conv2D)               (None, 256, 128, 128 131328      res3d_out[0][0]                  
__________________________________________________________________________________________________
fpn_p3add (Add)                 (None, 256, 128, 128 0           fpn_p4upsampled[0][0]            
                                                                 fpn_c3p3[0][0]                   
__________________________________________________________________________________________________
fpn_p3upsampled (UpSampling2D)  (None, 256, 256, 256 0           fpn_p3add[0][0]                  
__________________________________________________________________________________________________
fpn_c2p2 (Conv2D)               (None, 256, 256, 256 65792       res2c_out[0][0]                  
__________________________________________________________________________________________________
fpn_p2add (Add)                 (None, 256, 256, 256 0           fpn_p3upsampled[0][0]            
                                                                 fpn_c2p2[0][0]                   
__________________________________________________________________________________________________
fpn_p5 (Conv2D)                 (None, 256, 32, 32)  590080      fpn_c5p5[0][0]                   
__________________________________________________________________________________________________
fpn_p2 (Conv2D)                 (None, 256, 256, 256 590080      fpn_p2add[0][0]                  
__________________________________________________________________________________________________
fpn_p3 (Conv2D)                 (None, 256, 128, 128 590080      fpn_p3add[0][0]                  
__________________________________________________________________________________________________
fpn_p4 (Conv2D)                 (None, 256, 64, 64)  590080      fpn_p4add[0][0]                  
__________________________________________________________________________________________________
fpn_p6 (MaxPooling2D)           (None, 256, 16, 16)  0           fpn_p5[0][0]                     
__________________________________________________________________________________________________
rpn_model (Model)               multiple             1189394     fpn_p2[0][0]                     
                                                                 fpn_p3[0][0]                     
                                                                 fpn_p4[0][0]                     
                                                                 fpn_p5[0][0]                     
                                                                 fpn_p6[0][0]                     
__________________________________________________________________________________________________
rpn_class (Concatenate)         (None, 261888, 2)    0           rpn_model[1][1]                  
                                                                 rpn_model[2][1]                  
                                                                 rpn_model[3][1]                  
                                                                 rpn_model[4][1]                  
                                                                 rpn_model[5][1]                  
__________________________________________________________________________________________________
rpn_bbox (Concatenate)          (None, 261888, 4)    0           rpn_model[1][2]                  
                                                                 rpn_model[2][2]                  
                                                                 rpn_model[3][2]                  
                                                                 rpn_model[4][2]                  
                                                                 rpn_model[5][2]                  
__________________________________________________________________________________________________
input_anchors (InputLayer)      (None, None, 4)      0                                            
__________________________________________________________________________________________________
ROI (ProposalLayer)             (None, 20000, 4)     0           rpn_class[0][0]                  
                                                                 rpn_bbox[0][0]                   
                                                                 input_anchors[0][0]              
__________________________________________________________________________________________________
input_image_meta (InputLayer)   (None, 14)           0                                            
__________________________________________________________________________________________________
roi_align_classifier (PyramidRO (None, 20000, 256, 7 0           ROI[0][0]                        
                                                                 input_image_meta[0][0]           
                                                                 fpn_p2[0][0]                     
                                                                 fpn_p3[0][0]                     
                                                                 fpn_p4[0][0]                     
                                                                 fpn_p5[0][0]                     
__________________________________________________________________________________________________
mrcnn_class_conv1 (TimeDistribu (None, 20000, 1024,  12846080    roi_align_classifier[0][0]       
__________________________________________________________________________________________________
mrcnn_class_bn1 (TimeDistribute (None, 20000, 1024,  4096        mrcnn_class_conv1[0][0]          
__________________________________________________________________________________________________
activation_34 (Activation)      (None, 20000, 1024,  0           mrcnn_class_bn1[0][0]            
__________________________________________________________________________________________________
mrcnn_class_conv2 (TimeDistribu (None, 20000, 1024,  1049600     activation_34[0][0]              
__________________________________________________________________________________________________
mrcnn_class_bn2 (TimeDistribute (None, 20000, 1024,  4096        mrcnn_class_conv2[0][0]          
__________________________________________________________________________________________________
activation_35 (Activation)      (None, 20000, 1024,  0           mrcnn_class_bn2[0][0]            
__________________________________________________________________________________________________
pool_squeeze (Lambda)           (None, 20000, 1024)  0           activation_35[0][0]              
__________________________________________________________________________________________________
mrcnn_class_logits (TimeDistrib (None, 20000, 2)     2050        pool_squeeze[0][0]               
__________________________________________________________________________________________________
mrcnn_bbox_fc (TimeDistributed) (None, 20000, 8)     8200        pool_squeeze[0][0]               
__________________________________________________________________________________________________
mrcnn_class (TimeDistributed)   (None, 20000, 2)     0           mrcnn_class_logits[0][0]         
__________________________________________________________________________________________________
mrcnn_bbox (Reshape)            (None, 20000, 2, 4)  0           mrcnn_bbox_fc[0][0]              
__________________________________________________________________________________________________
mrcnn_detection (DetectionLayer (None, 4000, 6)      0           ROI[0][0]                        
                                                                 mrcnn_class[0][0]                
                                                                 mrcnn_bbox[0][0]                 
                                                                 input_image_meta[0][0]           
__________________________________________________________________________________________________
lambda_1 (Lambda)               (None, 4000, 4)      0           mrcnn_detection[0][0]            
__________________________________________________________________________________________________
roi_align_mask (PyramidROIAlign (None, 4000, 256, 14 0           lambda_1[0][0]                   
                                                                 input_image_meta[0][0]           
                                                                 fpn_p2[0][0]                     
                                                                 fpn_p3[0][0]                     
                                                                 fpn_p4[0][0]                     
                                                                 fpn_p5[0][0]                     
__________________________________________________________________________________________________
mrcnn_mask_conv1 (TimeDistribut (None, 4000, 256, 14 590080      roi_align_mask[0][0]             
__________________________________________________________________________________________________
mrcnn_mask_bn1 (TimeDistributed (None, 4000, 256, 14 1024        mrcnn_mask_conv1[0][0]           
__________________________________________________________________________________________________
activation_37 (Activation)      (None, 4000, 256, 14 0           mrcnn_mask_bn1[0][0]             
__________________________________________________________________________________________________
mrcnn_mask_conv2 (TimeDistribut (None, 4000, 256, 14 590080      activation_37[0][0]              
__________________________________________________________________________________________________
mrcnn_mask_bn2 (TimeDistributed (None, 4000, 256, 14 1024        mrcnn_mask_conv2[0][0]           
__________________________________________________________________________________________________
activation_38 (Activation)      (None, 4000, 256, 14 0           mrcnn_mask_bn2[0][0]             
__________________________________________________________________________________________________
mrcnn_mask_conv3 (TimeDistribut (None, 4000, 256, 14 590080      activation_38[0][0]              
__________________________________________________________________________________________________
mrcnn_mask_bn3 (TimeDistributed (None, 4000, 256, 14 1024        mrcnn_mask_conv3[0][0]           
__________________________________________________________________________________________________
activation_39 (Activation)      (None, 4000, 256, 14 0           mrcnn_mask_bn3[0][0]             
__________________________________________________________________________________________________
mrcnn_mask_conv4 (TimeDistribut (None, 4000, 256, 14 590080      activation_39[0][0]              
__________________________________________________________________________________________________
mrcnn_mask_bn4 (TimeDistributed (None, 4000, 256, 14 1024        mrcnn_mask_conv4[0][0]           
__________________________________________________________________________________________________
activation_40 (Activation)      (None, 4000, 256, 14 0           mrcnn_mask_bn4[0][0]             
__________________________________________________________________________________________________
mrcnn_mask_deconv (TimeDistribu (None, 4000, 256, 28 262400      activation_40[0][0]              
__________________________________________________________________________________________________
mrcnn_mask (TimeDistributed)    (None, 4000, 2, 28,  514         mrcnn_mask_deconv[0][0]          
==================================================================================================
Total params: 44,662,942
Trainable params: 44,603,678
Non-trainable params: 59,264
__________________________________________________________________________________________________
The output names of tensorflow graph nodes: [u'mrcnn_mask/Reshape_1']
WARNING:tensorflow:From ./mrcnn_to_trt_single_resnet50.py:173: convert_variables_to_constants (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.compat.v1.graph_util.convert_variables_to_constants
WARNING:tensorflow:From /home/cding/.virtualenvs/virtual-py2/local/lib/python2.7/site-packages/tensorflow/python/framework/graph_util_impl.py:245: extract_sub_graph (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.compat.v1.graph_util.extract_sub_graph
WARNING:tensorflow:From /home/cding/.virtualenvs/virtual-py2/local/lib/python2.7/site-packages/uff/converters/tensorflow/conversion_helpers.py:185: __init__ (from tensorflow.python.platform.gfile) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.gfile.GFile.
UFF Version 0.5.5
=== Automatically deduced input nodes ===
[name: "input_image"
op: "Placeholder"
attr {
  key: "dtype"
  value {
    type: DT_FLOAT
  }
}
attr {
  key: "shape"
  value {
    shape {
      dim {
        size: -1
      }
      dim {
        size: 3
      }
      dim {
        size: 1024
      }
      dim {
        size: 1024
      }
    }
  }
}
]
=========================================

Using output node mrcnn_detection
Using output node mrcnn_mask/Sigmoid
Converting to UFF graph
Warning: No conversion function registered for layer: PyramidROIAlign_TRT yet.
Converting roi_align_mask_trt as custom op: PyramidROIAlign_TRT
Warning: No conversion function registered for layer: ResizeNearest_TRT yet.
Converting fpn_p5upsampled as custom op: ResizeNearest_TRT
Warning: No conversion function registered for layer: ResizeNearest_TRT yet.
Converting fpn_p4upsampled as custom op: ResizeNearest_TRT
Warning: No conversion function registered for layer: ResizeNearest_TRT yet.
Converting fpn_p3upsampled as custom op: ResizeNearest_TRT
Warning: No conversion function registered for layer: SpecialSlice_TRT yet.
Converting mrcnn_detection_bboxes as custom op: SpecialSlice_TRT
Warning: No conversion function registered for layer: DetectionLayer_TRT yet.
Converting mrcnn_detection as custom op: DetectionLayer_TRT
Warning: No conversion function registered for layer: ProposalLayer_TRT yet.
Converting ROI as custom op: ProposalLayer_TRT
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: No conversion function registered for layer: PyramidROIAlign_TRT yet.
Converting roi_align_classifier as custom op: PyramidROIAlign_TRT
No. nodes: 1751
UFF Output written to /home/cding/tmp/mask_rcnn_nucleus_0080.uff
UFF Text Output written to /home/cding/tmp/mask_rcnn_nucleus_0080.pbtxt

I will check trt to engine code later.

Did you replace libnvinfer_plugin.so generated from https://github.com/NVIDIA/TensorRT/. Serveral plugins are needed when engine build

Check your trt to engine code, get error in uffParser

[TensorRT] ERROR: UffParser: Unsupported number of graph 0

/* for now the UffParser only support 1 graph in a MetaGraph
     * we might consider support several graphs in the future */
    if (meta.graphs_size() != 1)
        RETURN_AND_LOG_ERROR(false, "Unsupported number of graph " + to_string(meta.graphs_size()));

This may be python version problem.

Please change your config file: (also can refer to https://github.com/NVIDIA/TensorRT/issues/240)

--- a/samples/opensource/sampleUffMaskRCNN/converted_res50/config.py
+++ b/samples/opensource/sampleUffMaskRCNN/converted_res50/config.py
@@ -68,8 +68,8 @@ timedistributed_connect_pairs = [
         ("roi_align_mask_trt", "mrcnn_mask_conv1/convolution"), #roi_align_mask -> mrcnn_mask_conv1
         ("mrcnn_class_bn2/batchnorm/add_1","activation_35/Relu"), # mrcnn_class_bn2 -> active 69
         ("mrcnn_class_conv2/BiasAdd", "mrcnn_class_bn2/batchnorm/mul_1"), # mrcnn_class_conv2 -> mrcnn_class_bn2
-        ("activation_37/Relu", "mrcnn_class_conv2/convolution"), # active 68 -> mrcnn_class_conv2
-        ("mrcnn_class_bn1/batchnorm/add_1","activation_37/Relu"), # mrcnn_class_bn1 -> active 68
+        ("activation_34/Relu", "mrcnn_class_conv2/convolution"), # active 68 -> mrcnn_class_conv2
+        ("mrcnn_class_bn1/batchnorm/add_1","activation_34/Relu"), # mrcnn_class_bn1 -> active 68
         ("mrcnn_class_conv1/BiasAdd", "mrcnn_class_bn1/batchnorm/mul_1"), # mrcnn_class_conv1 -> mrcnn_class_bn1
         ("roi_align_classifier", "mrcnn_class_conv1/convolution"), # roi_align_classifier -> mrcnn_class_conv1
         ]

Then sampleUffMaskRCNN in https://github.com/NVIDIA/TensorRT/tree/master/samples/opensource/sampleUffMaskRCNN will have no problem, including uffparser and trt build engine.
You need to modify mrcnn_config.h in “samples/opensource/sampleUffMaskRCNN/mrcnn_config.h”
and in proposalLayer https://github.com/NVIDIA/TensorRT/blob/master/plugin/proposalLayerPlugin/mrcnn_config.h – rebuild a new libnvinfer_plugin.so

diff --git a/mrcnn_config.h b/mrcnn_config.h
index aee9394..0ae70e5 100644
--- a/mrcnn_config.h
+++ b/mrcnn_config.h
@@ -23,7 +23,7 @@ using namespace nvinfer1;
 
 namespace MaskRCNNConfig
 {
-static const nvinfer1::DimsCHW IMAGE_SHAPE{3, 1024, 1024};
+static const nvinfer1::DimsCHW IMAGE_SHAPE{3, 256, 256};
 
 // Pooled ROIs
 static const int POOL_SIZE = 7;
@@ -57,7 +57,7 @@ static const int FPN_CLASSIF_FC_LAYERS_SIZE = 1024;
 static const int TOP_DOWN_PYRAMID_SIZE = 256;
 
 // Number of classification classes (including background)
-static const int NUM_CLASSES = 1 + 80; // COCO has 80 classes
+static const int NUM_CLASSES = 1 + 1; // COCO has 80 classes
 
 // Length of square anchor side in pixels
 static const std::vector<float> RPN_ANCHOR_SCALES = {32, 64, 128, 256, 512};
@@ -94,7 +94,7 @@ static const std::vector<std::string> CLASS_NAMES = {
     "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush",
 };
 
-static const std::string MODEL_NAME = "mrcnn_nchw.uff";
+static const std::string MODEL_NAME = "mask_rcnn_nucleus_0080.uff";
 static const std::string MODEL_INPUT = "input_image";
 static const DimsCHW MODEL_INPUT_SHAPE = IMAGE_SHAPE;
 static const std::vector<std::string> MODEL_OUTPUTS = {"mrcnn_detection", "mrcnn_mask/Sigmoid"};

Thanks you so much for your patient and clear step instructions.

I modified my config.py to be the same as #27, and then regenerated .uff file.

And follow #27 to modify proposalLayer https://github.com/NVIDIA/TensorRT/blob/master/plugin/proposalLayerPlugin/mrcnn_config.h
and mrcnn_config.h in “samples/opensource/sampleUffMaskRCNN/mrcnn_config.h” as well of following three fields:

  1. IMAGE_SHAPE
  2. NUM_CLASSES
  3. MODEL_NAME

and then rebuild libnvinfer_plugin.so and sample_uff_maskRCNN engine.

When I run
…/build/out/sample_uff_maskRCNN -d data/
I got Logs:

&&&& RUNNING TensorRT.sample_maskrcnn # ../build/out/sample_uff_maskRCNN -d data/
[11/26/2019-16:02:21] [I] Building and running a GPU inference engine for Mask RCNN
[11/26/2019-16:02:24] [E] [TRT] mrcnn_mask_conv2/convolution: kernel weights has count 589824 but 2359296 was expected
[11/26/2019-16:02:24] [E] [TRT] mrcnn_mask_conv2/convolution: count of 589824 weights in kernel, but kernel dimensions (3,3) with 1024 input channels, 256 output channels and 1 groups were specified. Expected Weights count is 1024 * 3*3 * 256 / 1 = 2359296
[11/26/2019-16:02:24] [E] [TRT] UffParser: Parser error: mrcnn_mask_conv2/BiasAdd: The input to the Scale Layer is required to have a minimum of 3 dimensions.

Did I miss something?
Again I appreciate for your help and Merry Christmas!

I think the below change does not work. The network input size is still 1024x1024. Let me check how to change input size.

IMAGE_MIN_DIM = 256  # 512
IMAGE_MAX_DIM = 256  # 512

Did you try your h5 model by Mask_RCNN/samples/demo.ipynb ? I remember I did not add the above change to CocoConfig and ran your h5 model by demo.ipynb successfully.
Please note, don’t apply the patch “0001-Update-the-Mask_RCNN-model-from-NHWC-to-NCHW.patch” when you run demo.ipynb

With your h5 model, I can convert it to uff and run sampleUffMaskRCNN (1024x1024 input size) successfully. (Refer to https://github.com/NVIDIA/TensorRT/issues/240)

h5 model can be converted to .uff successfully by following #22,#27, but for next step which is to run sampleUffMaskRCNN (as you mentioned in #27 ‘You need to modify mrcnn_config.h in “samples/opensource/sampleUffMaskRCNN/mrcnn_config.h”
and in proposalLayer https://github.com/NVIDIA/TensorRT/blob/master/plugin/proposalLayerPlugin/mrcnn_config.h – rebuild a new libnvinfer_plugin.so’) sampleUffMaskRCNN is rebuilt from #27 changes

I got error in #28.

Would you mind to share your converted .uff from my h5 model so I can check if I convert to proper .uff.

my h5 model using pre-trained imagenet weight.

Again thank you for your time!

Summary:
How to deploy mrcnn model (GitHub - matterport/Mask_RCNN: Mask R-CNN for object detection and instance segmentation on Keras and TensorFlow) with resnet50 backbone and classNum change in h5 model ?

tensorflow-gpu: 1.9.0
Keras: 2.1.3
cuda-9.0
cudnn 7.0
python 2.7
H5 model: mask_rcnn_nucleus_0080.h5 - Google Drive names it “mask_rcnn_coco_restnet50.h5”

1. Check by samples/demo.ipynb
a. Code change

diff --git a/mrcnn/config.py b/mrcnn/config.py
--- a/mrcnn/config.py
+++ b/mrcnn/config.py
@@ -52,7 +52,7 @@ class Config(object):
     # You can also provide a callable that should have the signature
     # of model.resnet_graph. If you do so, you need to supply a callable
     # to COMPUTE_BACKBONE_SHAPE as well
-    BACKBONE = "resnet101"
+    BACKBONE = "resnet50"
 
     # Only useful if you supply a callable to BACKBONE. Should compute
     # the shape of each layer of the FPN Pyramid.

diff --git a/samples/coco_config/coco_config.py b/samples/coco_config/coco_config.py
--- a/samples/coco_config/coco_config.py
+++ b/samples/coco_config/coco_config.py
@@ -49,7 +49,7 @@ from mrcnn.config import Config
 from mrcnn import model as modellib, utils
 
 # Path to trained weights file
-COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
+COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco_restnet50.h5")
 
 # Directory to save logs and model checkpoints, if not provided
 # through the command line argument --logs
@@ -77,5 +77,5 @@ class CocoConfig(Config):
     # GPU_COUNT = 8
 
     # Number of classes (including background)
-    NUM_CLASSES = 1 + 80  # COCO has 80 classes
+    NUM_CLASSES = 1 + 1  # COCO has 1 classes

b. Note: Don’t apply the patch “0001-Update-the-Mask_RCNN-model-from-NHWC-to-NCHW.patch”

2. Convert H5 to uff
a. Refer to https://github.com/NVIDIA/TensorRT/tree/master/samples/opensource/sampleUffMaskRCNN
b. Different config.py

# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import graphsurgeon as gs
import tensorflow as tf

fpn_p5upsampled = gs.create_plugin_node("fpn_p5upsampled", op="ResizeNearest_TRT", dtype=tf.float32, scale=2.0)
fpn_p4upsampled = gs.create_plugin_node("fpn_p4upsampled", op="ResizeNearest_TRT", dtype=tf.float32, scale=2.0)
fpn_p3upsampled = gs.create_plugin_node("fpn_p3upsampled", op="ResizeNearest_TRT", dtype=tf.float32, scale=2.0)

roi = gs.create_plugin_node("ROI", op="ProposalLayer_TRT", prenms_topk=1024, keep_topk=1000, iou_threshold=0.7)
roi_align_classifier = gs.create_plugin_node("roi_align_classifier", op="PyramidROIAlign_TRT", pooled_size=7)
mrcnn_detection = gs.create_plugin_node("mrcnn_detection", op="DetectionLayer_TRT", num_classes=81, keep_topk=100, score_threshold=0.7, iou_threshold=0.3)
roi_align_mask = gs.create_plugin_node("roi_align_mask_trt", op="PyramidROIAlign_TRT", pooled_size=14)
mrcnn_detection_bboxes = gs.create_plugin_node("mrcnn_detection_bboxes", op="SpecialSlice_TRT")

namespace_plugin_map = {
"fpn_p5upsampled":fpn_p5upsampled,

"fpn_p4upsampled":fpn_p4upsampled,

"fpn_p3upsampled":fpn_p3upsampled,

"roi_align_classifier":roi_align_classifier,

"mrcnn_detection":mrcnn_detection,

"ROI":roi,

"roi_align_mask":roi_align_mask,

"lambda_1": mrcnn_detection_bboxes,

}

timedistributed_remove_list = [
        "mrcnn_class_conv1/Reshape/shape", "mrcnn_class_conv1/Reshape", "mrcnn_class_conv1/Reshape_1/shape", "mrcnn_class_conv1/Reshape_1",
        "mrcnn_class_bn1/Reshape/shape", "mrcnn_class_bn1/Reshape", "mrcnn_class_bn1/Reshape_5/shape", "mrcnn_class_bn1/Reshape_5",
        "mrcnn_class_conv2/Reshape/shape", "mrcnn_class_conv2/Reshape", "mrcnn_class_conv2/Reshape_1/shape", "mrcnn_class_conv2/Reshape_1",
        "mrcnn_class_bn2/Reshape/shape", "mrcnn_class_bn2/Reshape", "mrcnn_class_bn2/Reshape_5/shape", "mrcnn_class_bn2/Reshape_5",
        "mrcnn_class_logits/Reshape/shape", "mrcnn_class_logits/Reshape","mrcnn_class_logits/Reshape_1/shape", "mrcnn_class_logits/Reshape_1",
        "mrcnn_class/Reshape/shape", "mrcnn_class/Reshape","mrcnn_class/Reshape_1/shape", "mrcnn_class/Reshape_1",
        "mrcnn_bbox_fc/Reshape/shape", "mrcnn_bbox_fc/Reshape","mrcnn_bbox_fc/Reshape_1/shape", "mrcnn_bbox_fc/Reshape_1",

        "mrcnn_mask_conv1/Reshape/shape", "mrcnn_mask_conv1/Reshape", "mrcnn_mask_conv1/Reshape_1/shape", "mrcnn_mask_conv1/Reshape_1",
        "mrcnn_mask_bn1/Reshape/shape", "mrcnn_mask_bn1/Reshape", "mrcnn_mask_bn1/Reshape_5/shape", "mrcnn_mask_bn1/Reshape_5",
        "mrcnn_mask_conv2/Reshape/shape", "mrcnn_mask_conv2/Reshape", "mrcnn_mask_conv2/Reshape_1/shape", "mrcnn_mask_conv2/Reshape_1",
        "mrcnn_mask_bn2/Reshape/shape", "mrcnn_mask_bn2/Reshape", "mrcnn_mask_bn2/Reshape_5/shape", "mrcnn_mask_bn2/Reshape_5",
        "mrcnn_mask_conv3/Reshape/shape", "mrcnn_mask_conv3/Reshape", "mrcnn_mask_conv3/Reshape_1/shape", "mrcnn_mask_conv3/Reshape_1",
        "mrcnn_mask_bn3/Reshape/shape", "mrcnn_mask_bn3/Reshape", "mrcnn_mask_bn3/Reshape_5/shape", "mrcnn_mask_bn3/Reshape_5",
        "mrcnn_mask_conv4/Reshape/shape", "mrcnn_mask_conv4/Reshape", "mrcnn_mask_conv4/Reshape_1/shape", "mrcnn_mask_conv4/Reshape_1",
        "mrcnn_mask_bn4/Reshape/shape", "mrcnn_mask_bn4/Reshape", "mrcnn_mask_bn4/Reshape_5/shape", "mrcnn_mask_bn4/Reshape_5",
        "mrcnn_mask_deconv/Reshape/shape", "mrcnn_mask_deconv/Reshape", "mrcnn_mask_deconv/Reshape_1/shape", "mrcnn_mask_deconv/Reshape_1",
        "mrcnn_mask/Reshape/shape", "mrcnn_mask/Reshape", "mrcnn_mask/Reshape_1/shape", "mrcnn_mask/Reshape_1",
        ]

timedistributed_connect_pairs = [
        ("mrcnn_mask_deconv/Relu", "mrcnn_mask/convolution"), # mrcnn_mask_deconv -> mrcnn_mask
        ("activation_40/Relu", "mrcnn_mask_deconv/conv2d_transpose"), #active74 -> mrcnn_mask_deconv
        ("mrcnn_mask_bn4/batchnorm/add_1","activation_40/Relu"),  # mrcnn_mask_bn4 -> active74
        ("mrcnn_mask_conv4/BiasAdd", "mrcnn_mask_bn4/batchnorm/mul_1"), #mrcnn_mask_conv4 -> mrcnn_mask_bn4
        ("activation_39/Relu", "mrcnn_mask_conv4/convolution"), #active73 -> mrcnn_mask_conv4
        ("mrcnn_mask_bn3/batchnorm/add_1","activation_39/Relu"), #mrcnn_mask_bn3 -> active73
        ("mrcnn_mask_conv3/BiasAdd", "mrcnn_mask_bn3/batchnorm/mul_1"), #mrcnn_mask_conv3 -> mrcnn_mask_bn3
        ("activation_38/Relu", "mrcnn_mask_conv3/convolution"), #active72 -> mrcnn_mask_conv3
        ("mrcnn_mask_bn2/batchnorm/add_1","activation_38/Relu"), #mrcnn_mask_bn2 -> active72
        ("mrcnn_mask_conv2/BiasAdd", "mrcnn_mask_bn2/batchnorm/mul_1"), #mrcnn_mask_conv2 -> mrcnn_mask_bn2
        ("activation_37/Relu", "mrcnn_mask_conv2/convolution"), #active71 -> mrcnn_mask_conv2
        ("mrcnn_mask_bn1/batchnorm/add_1","activation_37/Relu"), #mrcnn_mask_bn1 -> active71
        ("mrcnn_mask_conv1/BiasAdd", "mrcnn_mask_bn1/batchnorm/mul_1"), #mrcnn_mask_conv1 -> mrcnn_mask_bn1
        ("roi_align_mask_trt", "mrcnn_mask_conv1/convolution"), #roi_align_mask -> mrcnn_mask_conv1

("mrcnn_class_bn2/batchnorm/add_1","activation_35/Relu"), # mrcnn_class_bn2 -> active 69
        ("mrcnn_class_conv2/BiasAdd", "mrcnn_class_bn2/batchnorm/mul_1"), # mrcnn_class_conv2 -> mrcnn_class_bn2
        ("activation_34/Relu", "mrcnn_class_conv2/convolution"), # active 68 -> mrcnn_class_conv2
        ("mrcnn_class_bn1/batchnorm/add_1","activation_34/Relu"), # mrcnn_class_bn1 -> active 68
        ("mrcnn_class_conv1/BiasAdd", "mrcnn_class_bn1/batchnorm/mul_1"), # mrcnn_class_conv1 -> mrcnn_class_bn1
        ("roi_align_classifier", "mrcnn_class_conv1/convolution"), # roi_align_classifier -> mrcnn_class_conv1
        ]

dense_compatible_patch =["pool_squeeze/Squeeze", "pool_squeeze/Squeeze_1", #No need to squeeze the dimensions for TRT Dense Layer
        "mrcnn_bbox/Shape", "mrcnn_bbox/strided_slice/stack", # mrcnn_bbox(Reshape): No need to reshape, cause we can process it as 1-D array in detectionlayer's kernel
        "mrcnn_bbox/strided_slice/stack_1", "mrcnn_bbox/strided_slice/stack_2",
        "mrcnn_bbox/strided_slice", "mrcnn_bbox/Reshape/shape/1",
        "mrcnn_bbox/Reshape/shape/2", "mrcnn_bbox/Reshape/shape/3",
        "mrcnn_bbox/Reshape/shape", "mrcnn_bbox/Reshape"]

dense_compatible_connect_pairs = [
        ("activation_35/Relu","mrcnn_bbox_fc/MatMul"), #activation_69 -> mrcnn_bbox_fc
        ("activation_35/Relu", "mrcnn_class_logits/MatMul"), #activation_69 -> mrcnn_class_logits
        ("mrcnn_class_logits/BiasAdd", "mrcnn_class/Softmax"), #mrcnn_class_logits -> mrcnn_class
        ("mrcnn_class/Softmax", "mrcnn_detection"), #mrcnn_class -> mrcnn_detection
        ("mrcnn_bbox_fc/BiasAdd", "mrcnn_detection"), #mrcnn_bbox_fc -> mrcnn_detection
        ]

def connect(dynamic_graph, connections_list):

    for node_a_name, node_b_name in connections_list:
        if node_a_name not in dynamic_graph.node_map[node_b_name].input:
            dynamic_graph.node_map[node_b_name].input.insert(0, node_a_name)

def preprocess(dynamic_graph):
    # Now create a new graph by collapsing namespaces
    dynamic_graph.collapse_namespaces(namespace_plugin_map, unique_inputs=True)
    dynamic_graph.remove(timedistributed_remove_list)
    dynamic_graph.remove(dense_compatible_patch)
    dynamic_graph.remove(['input_anchors', 'input_image_meta'])

    connect(dynamic_graph, timedistributed_connect_pairs)
    connect(dynamic_graph, dense_compatible_connect_pairs)

Different mrcnn_to_trt_single.py

#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

from keras.models import model_from_json, Model
from keras import backend as K
from keras.layers import Input, Lambda
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from mrcnn.model import *
import mrcnn.model as modellib
from mrcnn.config import Config
import sys
import os
ROOT_DIR = os.path.abspath("./")
LOG_DIR = os.path.join(ROOT_DIR, "logs")
import argparse
import os
import uff

def parse_command_line_arguments(args=None):
    parser = argparse.ArgumentParser(prog='keras_to_trt', description='Convert trained keras .hdf5 model to trt .uff')

    parser.add_argument(
        '-w',
        '--weights',
        type=str,
        default=None,
        required=True,
        help="The checkpoint weights file of keras model."
    )

    parser.add_argument(
        '-o',
        '--output_file',
        type=str,
        default=None,
        required=True,
        help="The path to output .uff file."
    )

    parser.add_argument(
        '-l',
        '--list-nodes',
        action='store_true',
        help="show list of nodes contained in converted pb"
    )

    parser.add_argument(
        '-p',
        '--preprocessor',
        type=str,
        default=False,
        help="The preprocess function for converting tf node to trt plugin"
    )

    return parser.parse_args(args)

class CocoConfig(Config):
    """Configuration for training on MS COCO.
    Derives from the base Config class and overrides values specific
    to the COCO dataset.
    """
    # Give the configuration a recognizable name
    NAME = "coco"

    # We use a GPU with 12GB memory, which can fit two images.
    # Adjust down if you use a smaller GPU.
    IMAGES_PER_GPU = 2

    # Uncomment to train on 8 GPUs (default is 1)
    # GPU_COUNT = 8

    # Number of classes (including background)
    NUM_CLASSES = 1 + 1  # COCO has 1 classes
    
    BACKBONE = 'resnet50'

class InferenceConfig(CocoConfig):
    # Set batch size to 1 since we'll be running inference on
    # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
    GPU_COUNT = 1
    IMAGES_PER_GPU = 1
    BACKBONE = 'resnet50' # added ResNet50

def main(args=None):

    K.set_image_data_format('channels_first')
    K.set_learning_phase(0)

    args = parse_command_line_arguments(args)

    model_weights_path = args.weights
    output_file_path = args.output_file
    list_nodes = args.list_nodes

    config = InferenceConfig()
    config.display()

    model = modellib.MaskRCNN(mode="inference", model_dir=LOG_DIR, config=config).keras_model

    model.load_weights(model_weights_path, by_name=True)

model_A = Model(inputs=model.input, outputs=model.get_layer('mrcnn_mask').output)
    model_A.summary()

    output_nodes = ['mrcnn_detection', "mrcnn_mask/Sigmoid"]
    convert_model(model_A, output_file_path, output_nodes, preprocessor=args.preprocessor,
                  text=True, list_nodes=list_nodes)

def convert_model(inference_model, output_path, output_nodes=[], preprocessor=None, text=False,
                  list_nodes=False):
    # convert the keras model to pb
    orig_output_node_names = [node.op.name for node in inference_model.outputs]
    print("The output names of tensorflow graph nodes: {}".format(str(orig_output_node_names)))

    sess = K.get_session()

    constant_graph = graph_util.convert_variables_to_constants(
        sess,
        sess.graph.as_graph_def(),
        orig_output_node_names)

    temp_pb_path = "~/temp.pb"
    graph_io.write_graph(constant_graph, os.path.dirname(temp_pb_path), os.path.basename(temp_pb_path),
                         as_text=False)

    predefined_output_nodes = output_nodes
    if predefined_output_nodes != []:
        trt_output_nodes = predefined_output_nodes
    else:
        trt_output_nodes = orig_output_node_names

    # convert .pb to .uff
    uff.from_tensorflow_frozen_model(
        temp_pb_path,
        output_nodes=trt_output_nodes,
        preprocessor=preprocessor,
        text=text,
        list_nodes=list_nodes,
        output_filename=output_path,
        debug_mode = False
    )

    os.remove(temp_pb_path)

if __name__ == "__main__":
    main()

c. $ python mrcnn_to_trt_single.py -w /path/to/data/mask_rcnn_coco_restnet50.h5 -o /path/to/data/mrcnn_nchw_resnet50.uff -p ./config.py

3. Run by https://github.com/NVIDIA/TensorRT/tree/master/samples/opensource/sampleUffMaskRCNN
a. Code change

--- a/mrcnn_config.h
+++ b/mrcnn_config.h
@@ -57,7 +57,7 @@ static const int FPN_CLASSIF_FC_LAYERS_SIZE = 1024;
 static const int TOP_DOWN_PYRAMID_SIZE = 256;
 
 // Number of classification classes (including background)
-static const int NUM_CLASSES = 1 + 80; // COCO has 80 classes
+static const int NUM_CLASSES = 1 + 1; // COCO has 1 classes
 
 // Length of square anchor side in pixels
 static const std::vector<float> RPN_ANCHOR_SCALES = {32, 64, 128, 256, 512};
@@ -94,7 +94,7 @@ static const std::vector<std::string> CLASS_NAMES = {
     "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush",
 };
 
-static const std::string MODEL_NAME = "mrcnn_nchw.uff";
+static const std::string MODEL_NAME = "mrcnn_nchw_resnet50.uff";

b. Run the sample

4. Deepstream deploy
https://github.com/NVIDIA-AI-IOT/deepstream_4.x_apps
User can implement mask osd referring to dsexample plugin.

I can successfully run 1, 2 but still get error on 3 step, what TensorRT version are you using?

You can follow GitHub - NVIDIA/TensorRT: TensorRT is a C++ library for high performance inference on NVIDIA GPUs and deep learning accelerators. and run default https://github.com/NVIDIA/TensorRT/tree/master/samples/opensource/sampleUffMaskRCNN firstly.
My branch version is release/5.1

Thank you so much for your patient, I get one more question, I should keep TensorRT and TensorRT OSS at the same version right? If that is the case, it seems TensorRTOSS release/5.1 (while using TensorRT 5.1.5.0 GA for CentOS/RedHat7)does not have /sampleUffMaskRCNN yet. Did I misunderstand something?

Again thank you for the help!

By the way, I have successfully run https://github.com/NVIDIA/TensorRT/tree/master/samples/opensource/sampleUffMaskRCNN on Ubuntu18.04 with tensorrt/OSS 6.0 and I can get expected result as introduction in that samples.

Could all the issue coming from OS difference, I just naively guess.

So excited to see you are talking about sampleUffMaskrcnn, I am also doing the same job. I use my own dataset to train the maskrcnn, and get the h5 file.I have finished everything…But the inference result of Tensorrt is different with the result of keras. And i compared the output of rpn output,NHWC format keras and TensorRT ,they are different.But NCHW format kera is the same as TensorRT. so the performance is not so good in TensorRT. there are only part of the objects show in TensorRT.Any suggestions?@ChrisDing@tymiao1220
Tensorrt6.
keras=2.1.3
tensorflow=1.12.0

Hi 991185721,

Please open a new topic for your issue. Thanks

Hi ChrisDing,

I am finally able to create ‘myown.engine’, thanks for all guidance.
May I ask one more question:
When convert to .uff, I set DETECTION_MAX_INSTANCES to be 4000 to detect more instance(which is used to train model),

./mrcnn2uff.py
from mrcnn.config import Config
class CocoConfig(Config):
    NAME = 'nucleus'
    DETECTION_MAX_INSTANCES = 4000
class InferenceConfig(CocoConfig):
    IMAGE_RESIZE_MODE = "pad64"
    GPU_COUNT = 1
    IMAGES_PER_GPU = 1
def main():
    K.set_image_data_format('channels_first')
    K.set_learning_phase(0)

    args = parse_command_line_arguments(args)

    model_weights_path = args.weights
    output_file_path = args.output_file
    list_nodes = args.list_nodes

    config = InferenceConfig()
    config.display()

    model = modellib.MaskRCNN(mode="inference", model_dir=LOG_DIR, config=config).keras_model

    model.load_weights(model_weights_path, by_name=True)


    model_A = Model(inputs=model.input, outputs=model.get_layer('mrcnn_mask').output)
    model_A.summary()

    output_nodes = ['mrcnn_detection', "mrcnn_mask/Sigmoid"]
    convert_model(model_A, output_file_path, output_nodes, preprocessor=args.preprocessor,
                  text=True, list_nodes=list_nodes) 
def convert_model(inference_model, output_path, output_nodes=[], preprocessor=None, text=False,
                  list_nodes=False):
    orig_output_node_names = [node.op.name for node in inference_model.outputs]
    print("The output names of tensorflow graph nodes: {}".format(str(orig_output_node_names)))

    sess = K.get_session()

    constant_graph = graph_util.convert_variables_to_constants(
        sess,
        sess.graph.as_graph_def(),
        orig_output_node_names)
    temp_pb_path = 'nucleus_temp.pb'
    graph_io.write_graph(constant_graph, os.path.dirname(temp_pb_path), os.path.basename(temp_pb_path),
                         as_text=False)
    predefined_output_nodes = output_nodes
    if predefined_output_nodes != []:
        trt_output_nodes = predefined_output_nodes
    else:
        trt_output_nodes = orig_output_node_names

    # convert .pb to .uff
    uff.from_tensorflow_frozen_model(
        temp_pb_path,
        output_nodes=trt_output_nodes,
        preprocessor=preprocessor,
        text=text,
        list_nodes=list_nodes,
        output_filename=output_path,
        debug_mode = False
    )
    os.remove(temp_pb_path)

but it seems when I build engine, and try to find shape of bindings

for binding in engine:
    print(engine.get_binding_shape(binding))

The mrcnn_detection shape is still (100, 6) and not correctly set to (4000, 6),
as well as mrcnn_mask/Sigmoid shape(still (100, 2, 28, 28) expected to be (4000, 2, 28, 28))

is there something I need to change to

Never mind, I forget to change keep_topk in configure file.

Hi @tymiao1220,

Have you managed to run inference on TensorRT? I would like to have some feedback as I’m stuck with this problem: converting the MaskRCNN (many implementations) to TensorRT engine.

Thank you a lot.

Is the trt to engine method still recommended ? If yes, can you please guide me on how to get from UFF to trt engine. Thanks 😊