error parse the layer of detection_out

Hello,
I have a question that whether the TensorRT will delete my concat layer in caffe model or not?
when i call the function of createSSDDetectionOutputPlugin to parse my prototxt file ,it always show the error logs as follows:

sample_face_detection: nvPluginsLegacy.cpp:1068: virtual nvinfer1::Dims nvinfer1::plugin::ConcatLegacy::getOutputDimensions(int, const nvinfer1::Dims*, int): <b>Assertion `nbInputDims >= 1' </b>failed.
Aborted (core dumped)
 I doubt that whether my bottom layers which inputs is disappeared after the optimize of TensorRT, for the reason that its bootom layers are all the type of concat.
 Is my suspicion correct? Otherwise ,how can i do to solve this problem ?

 this is my definition of detection_out layer:
layer {
  name: "detection_out"
  type: "DetectionOutput"
  bottom: "mbox_loc"      <u><b>-- type:Concat</u></b>
  bottom: "mbox_conf"     <u><b>-- type:Concat</u></b>
  bottom: "mbox_priorbox" <u><b>-- type:Concat</u></b>
  top: "detection_out"
  include {
    phase: TEST
  }
  detection_output_param {
    num_classes: 2
    share_location: true
    background_label_id: 0
    nms_param {
      nms_threshold: 0.45
      top_k: 100
    }
    code_type: CENTER_SIZE
    keep_top_k: 100
    confidence_threshold: 0.5
  }

Hello,

To help us debug, can you share a small repro containing the source and model/protobuf that demonstrate the error you are seeing?

regards,
NVIDIA Enterprise Support

The error happened to concat plugin layer and it’s not yet stepping through graph optimization at that phase.
TRT 5.0 can support concat in any axis (except for batch), so you don’t need to set concat as plugin layer now and just leave it as-is,

layer {
  name: "mbox_loc"
  type: "Concat"
  bottom: "conv4_3_norm_mbox_loc_flat"
  bottom: "fc7_mbox_loc_flat"
  bottom: "conv6_2_mbox_loc_flat"
  bottom: "conv7_2_mbox_loc_flat"
  bottom: "conv8_2_mbox_loc_flat"
  bottom: "conv9_2_mbox_loc_flat"
  top: "mbox_loc"
  concat_param {
    axis: 1
  }
}

Hello,
the concat layer is created as you told,but the problem above remains unsolved, the error log show that my concat layer’s OutputDimensions is zero, how can i do to solve it ?

#0  0x0000007fafa1a4d8 in __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:51
#1  0x0000007fafa1b8b4 in __GI_abort () at abort.c:79
#2  0x0000007fafa13b44 in __assert_fail_base (fmt=0x7fafb0f0c0 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n", assertion=assertion@entry=0x7fafe49a00 "nbInputDims >= 1", file=file@entry=0x7fafe49148 "nvPluginsLegacy.cpp", line=line@entry=1068, function=function@entry=0x7fafe487a0 "virtual nvinfer1::Dims nvinfer1::plugin::ConcatLegacy::getOutputDimensions(int, const nvinfer1::Dims*, int)") at assert.c:92
#3  0x0000007fafa13bc4 in __GI___assert_fail (assertion=0x7fafe49a00 "nbInputDims >= 1", file=0x7fafe49148 "nvPluginsLegacy.cpp", line=1068, function=0x7fafe487a0 "virtual nvinfer1::Dims nvinfer1::plugin::ConcatLegacy::getOutputDimensions(int, const nvinfer1::Dims*, int)") at assert.c:101
#4  0x0000007fafe27f30 in nvinfer1::plugin::PermuteLegacy::serialize(void*) ()
    at /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so.5
#5  0x0000007fb057e954 in nvinfer1::Network::addPluginExt(nvinfer1::ITensor* const*, int, nvinfer1::IPluginExt&) () at /usr/lib/aarch64-linux-gnu/libnvinfer.so.5
#6  0x0000007fb00b9924 in  () at /usr/lib/aarch64-linux-gnu/libnvparsers.so.5
#7  0x00000055555634b4 in caffeToGIEModel(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&, unsigned int, nvcaffeparser1::IPluginFactoryExt*, nvinfer1::IHostMemory*&) ()
#8  0x0000005555568578 in main ()

this is my prototxt file:

name: "FaceBoxes"
input: "data"
input_shape {
  dim: 1
  dim: 3
  dim: 1024
  dim: 1024
}

#conv1
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  convolution_param {
    num_output: 24
    pad: 0
    kernel_size: 7
	stride: 4
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv1/bn"
  type: "BatchNorm"
  bottom: "conv1"
  top: "conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv1/neg"
  type: "Power"
  bottom: "conv1"
  top: "conv1/neg"
  power_param {
    power: 1
    scale: -1.0
    shift: 0
  }
}

layer {
  name: "conv1/concat"
  type: "Concat"
  bottom: "conv1"
  bottom: "conv1/neg"
  top: "conv1_CR"
}

layer {
  name: "conv1/scale"
  type: "Scale"
  bottom: "conv1_CR"
  top: "conv1_CR"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 2.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv1/relu"
  type: "ReLU"
  bottom: "conv1_CR"
  top: "conv1_CR"
}

layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1_CR"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
#conv2
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  convolution_param {
    num_output: 64
    pad: 0
    kernel_size: 5
	stride: 2
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv2/bn"
  type: "BatchNorm"
  bottom: "conv2"
  top: "conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv2/neg"
  type: "Power"
  bottom: "conv2"
  top: "conv2/neg"
  power_param {
    power: 1
    scale: -1.0
    shift: 0
  }
}

layer {
  name: "conv2/concat"
  type: "Concat"
  bottom: "conv2"
  bottom: "conv2/neg"
  top: "conv2_CR"
}

layer {
  name: "conv2/scale"
  type: "Scale"
  bottom: "conv2_CR"
  top: "conv2_CR"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 2.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv2/relu"
  type: "ReLU"
  bottom: "conv2_CR"
  top: "conv2_CR"
}

layer {
  name: "pool2"
  type: "Pooling"
  bottom: "conv2_CR"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
#Inception1
layer {
  name: "conv3/incep0/conv"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv3/incep0/bn"
  type: "BatchNorm"
  bottom: "conv3/incep0/conv"
  top: "conv3/incep0/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep0/bn_scale"
  type: "Scale"
  bottom: "conv3/incep0/conv"
  top: "conv3/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep0/relu"
  type: "ReLU"
  bottom: "conv3/incep0/conv"
  top: "conv3/incep0/conv"
}

layer {
  name: "conv3/incep1/conv1"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv3/incep1/bn1"
  type: "BatchNorm"
  bottom: "conv3/incep1/conv1"
  top: "conv3/incep1/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep1/bn_scale1"
  type: "Scale"
  bottom: "conv3/incep1/conv1"
  top: "conv3/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep1/relu1"
  type: "ReLU"
  bottom: "conv3/incep1/conv1"
  top: "conv3/incep1/conv1"
}

layer {
  name: "conv3/incep1/conv2"
  type: "Convolution"
  bottom: "conv3/incep1/conv1"
  top: "conv3/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
	weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv3/incep1/bn2"
  type: "BatchNorm"
  bottom: "conv3/incep1/conv2"
  top: "conv3/incep1/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep1/bn_scale2"
  type: "Scale"
  bottom: "conv3/incep1/conv2"
  top: "conv3/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep1/relu2"
  type: "ReLU"
  bottom: "conv3/incep1/conv2"
  top: "conv3/incep1/conv2"
}

layer {
  name: "conv3/incep2/conv1"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv3/incep2/bn1"
  type: "BatchNorm"
  bottom: "conv3/incep2/conv1"
  top: "conv3/incep2/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep2/bn_scale1"
  type: "Scale"
  bottom: "conv3/incep2/conv1"
  top: "conv3/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep2/relu1"
  type: "ReLU"
  bottom: "conv3/incep2/conv1"
  top: "conv3/incep2/conv1"
}

layer {
  name: "conv3/incep2/conv2"
  type: "Convolution"
  bottom: "conv3/incep2/conv1"
  top: "conv3/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv3/incep2/bn2"
  type: "BatchNorm"
  bottom: "conv3/incep2/conv2"
  top: "conv3/incep2/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep2/bn_scale2"
  type: "Scale"
  bottom: "conv3/incep2/conv2"
  top: "conv3/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep2/relu2"
  type: "ReLU"
  bottom: "conv3/incep2/conv2"
  top: "conv3/incep2/conv2"
}

layer {
  name: "conv3/incep2/conv3"
  type: "Convolution"
  bottom: "conv3/incep2/conv2"
  top: "conv3/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv3/incep2/bn3"
  type: "BatchNorm"
  bottom: "conv3/incep2/conv3"
  top: "conv3/incep2/conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep2/bn_scale3"
  type: "Scale"
  bottom: "conv3/incep2/conv3"
  top: "conv3/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep2/relu3"
  type: "ReLU"
  bottom: "conv3/incep2/conv3"
  top: "conv3/incep2/conv3"
}

layer {
  name: "conv3/incep3/pool"
  type: "Pooling"
  bottom: "pool2"
  top: "conv3/incep3/pool"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 1
    pad: 1
  }
}

layer {
  name: "conv3/incep3/conv"
  type: "Convolution"
  bottom: "conv3/incep3/pool"
  top: "conv3/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv3/incep3/bn"
  type: "BatchNorm"
  bottom: "conv3/incep3/conv"
  top: "conv3/incep3/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep3/bn_scale"
  type: "Scale"
  bottom: "conv3/incep3/conv"
  top: "conv3/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep3/relu"
  type: "ReLU"
  bottom: "conv3/incep3/conv"
  top: "conv3/incep3/conv"
}

layer {
  name: "conv3/incep"
  type: "Concat"
  bottom: "conv3/incep0/conv"
  bottom: "conv3/incep1/conv2"
  bottom: "conv3/incep2/conv3"
  bottom: "conv3/incep3/conv"
  top: "conv3/incep"
}
#Inception2
layer {
  name: "conv4/incep0/conv"
  type: "Convolution"
  bottom: "conv3/incep"
  top: "conv4/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv4/incep0/bn"
  type: "BatchNorm"
  bottom: "conv4/incep0/conv"
  top: "conv4/incep0/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep0/bn_scale"
  type: "Scale"
  bottom: "conv4/incep0/conv"
  top: "conv4/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep0/relu"
  type: "ReLU"
  bottom: "conv4/incep0/conv"
  top: "conv4/incep0/conv"
}

layer {
  name: "conv4/incep1/conv1"
  type: "Convolution"
  bottom: "conv3/incep"
  top: "conv4/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv4/incep1/bn1"
  type: "BatchNorm"
  bottom: "conv4/incep1/conv1"
  top: "conv4/incep1/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep1/bn_scale1"
  type: "Scale"
  bottom: "conv4/incep1/conv1"
  top: "conv4/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep1/relu1"
  type: "ReLU"
  bottom: "conv4/incep1/conv1"
  top: "conv4/incep1/conv1"
}

layer {
  name: "conv4/incep1/conv2"
  type: "Convolution"
  bottom: "conv4/incep1/conv1"
  top: "conv4/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv4/incep1/bn2"
  type: "BatchNorm"
  bottom: "conv4/incep1/conv2"
  top: "conv4/incep1/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep1/bn_scale2"
  type: "Scale"
  bottom: "conv4/incep1/conv2"
  top: "conv4/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep1/relu2"
  type: "ReLU"
  bottom: "conv4/incep1/conv2"
  top: "conv4/incep1/conv2"
}

layer {
  name: "conv4/incep2/conv1"
  type: "Convolution"
  bottom: "conv3/incep"
  top: "conv4/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv4/incep2/bn1"
  type: "BatchNorm"
  bottom: "conv4/incep2/conv1"
  top: "conv4/incep2/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep2/bn_scale1"
  type: "Scale"
  bottom: "conv4/incep2/conv1"
  top: "conv4/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep2/relu1"
  type: "ReLU"
  bottom: "conv4/incep2/conv1"
  top: "conv4/incep2/conv1"
}

layer {
  name: "conv4/incep2/conv2"
  type: "Convolution"
  bottom: "conv4/incep2/conv1"
  top: "conv4/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv4/incep2/bn2"
  type: "BatchNorm"
  bottom: "conv4/incep2/conv2"
  top: "conv4/incep2/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep2/bn_scale2"
  type: "Scale"
  bottom: "conv4/incep2/conv2"
  top: "conv4/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep2/relu2"
  type: "ReLU"
  bottom: "conv4/incep2/conv2"
  top: "conv4/incep2/conv2"
}

layer {
  name: "conv4/incep2/conv3"
  type: "Convolution"
  bottom: "conv4/incep2/conv2"
  top: "conv4/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv4/incep2/bn3"
  type: "BatchNorm"
  bottom: "conv4/incep2/conv3"
  top: "conv4/incep2/conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep2/bn_scale3"
  type: "Scale"
  bottom: "conv4/incep2/conv3"
  top: "conv4/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep2/relu3"
  type: "ReLU"
  bottom: "conv4/incep2/conv3"
  top: "conv4/incep2/conv3"
}

layer {
  name: "conv4/incep3/pool"
  type: "Pooling"
  bottom: "conv3/incep"
  top: "conv4/incep3/pool"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 1
    pad: 1
  }
}

layer {
  name: "conv4/incep3/conv"
  type: "Convolution"
  bottom: "conv4/incep3/pool"
  top: "conv4/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv4/incep3/bn"
  type: "BatchNorm"
  bottom: "conv4/incep3/conv"
  top: "conv4/incep3/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep3/bn_scale"
  type: "Scale"
  bottom: "conv4/incep3/conv"
  top: "conv4/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep3/relu"
  type: "ReLU"
  bottom: "conv4/incep3/conv"
  top: "conv4/incep3/conv"
}

layer {
  name: "conv4/incep"
  type: "Concat"
  bottom: "conv4/incep0/conv"
  bottom: "conv4/incep1/conv2"
  bottom: "conv4/incep2/conv3"
  bottom: "conv4/incep3/conv"
  top: "conv4/incep"
}

layer {
  name: "conv5/incep0/conv"
  type: "Convolution"
  bottom: "conv4/incep"
  top: "conv5/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv5/incep0/bn"
  type: "BatchNorm"
  bottom: "conv5/incep0/conv"
  top: "conv5/incep0/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep0/bn_scale"
  type: "Scale"
  bottom: "conv5/incep0/conv"
  top: "conv5/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep0/relu"
  type: "ReLU"
  bottom: "conv5/incep0/conv"
  top: "conv5/incep0/conv"
}

layer {
  name: "conv5/incep1/conv1"
  type: "Convolution"
  bottom: "conv4/incep"
  top: "conv5/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv5/incep1/bn1"
  type: "BatchNorm"
  bottom: "conv5/incep1/conv1"
  top: "conv5/incep1/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep1/bn_scale1"
  type: "Scale"
  bottom: "conv5/incep1/conv1"
  top: "conv5/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep1/relu1"
  type: "ReLU"
  bottom: "conv5/incep1/conv1"
  top: "conv5/incep1/conv1"
}

layer {
  name: "conv5/incep1/conv2"
  type: "Convolution"
  bottom: "conv5/incep1/conv1"
  top: "conv5/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv5/incep1/bn2"
  type: "BatchNorm"
  bottom: "conv5/incep1/conv2"
  top: "conv5/incep1/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep1/bn_scale2"
  type: "Scale"
  bottom: "conv5/incep1/conv2"
  top: "conv5/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep1/relu2"
  type: "ReLU"
  bottom: "conv5/incep1/conv2"
  top: "conv5/incep1/conv2"
}

layer {
  name: "conv5/incep2/conv1"
  type: "Convolution"
  bottom: "conv4/incep"
  top: "conv5/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv5/incep2/bn1"
  type: "BatchNorm"
  bottom: "conv5/incep2/conv1"
  top: "conv5/incep2/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep2/bn_scale1"
  type: "Scale"
  bottom: "conv5/incep2/conv1"
  top: "conv5/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep2/relu1"
  type: "ReLU"
  bottom: "conv5/incep2/conv1"
  top: "conv5/incep2/conv1"
}

layer {
  name: "conv5/incep2/conv2"
  type: "Convolution"
  bottom: "conv5/incep2/conv1"
  top: "conv5/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv5/incep2/bn2"
  type: "BatchNorm"
  bottom: "conv5/incep2/conv2"
  top: "conv5/incep2/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep2/bn_scale2"
  type: "Scale"
  bottom: "conv5/incep2/conv2"
  top: "conv5/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep2/relu2"
  type: "ReLU"
  bottom: "conv5/incep2/conv2"
  top: "conv5/incep2/conv2"
}

layer {
  name: "conv5/incep2/conv3"
  type: "Convolution"
  bottom: "conv5/incep2/conv2"
  top: "conv5/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv5/incep2/bn3"
  type: "BatchNorm"
  bottom: "conv5/incep2/conv3"
  top: "conv5/incep2/conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep2/bn_scale3"
  type: "Scale"
  bottom: "conv5/incep2/conv3"
  top: "conv5/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep2/relu3"
  type: "ReLU"
  bottom: "conv5/incep2/conv3"
  top: "conv5/incep2/conv3"
}

layer {
  name: "conv5/incep3/pool"
  type: "Pooling"
  bottom: "conv4/incep"
  top: "conv5/incep3/pool"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 1
    pad: 1
  }
}

layer {
  name: "conv5/incep3/conv"
  type: "Convolution"
  bottom: "conv5/incep3/pool"
  top: "conv5/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv5/incep3/bn"
  type: "BatchNorm"
  bottom: "conv5/incep3/conv"
  top: "conv5/incep3/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep3/bn_scale"
  type: "Scale"
  bottom: "conv5/incep3/conv"
  top: "conv5/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep3/relu"
  type: "ReLU"
  bottom: "conv5/incep3/conv"
  top: "conv5/incep3/conv"
}

layer {
  name: "conv5/incep"
  type: "Concat"
  bottom: "conv5/incep0/conv"
  bottom: "conv5/incep1/conv2"
  bottom: "conv5/incep2/conv3"
  bottom: "conv5/incep3/conv"
  top: "conv5/incep"
}

layer {
  name: "Inception3/conv/loc1"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/loc1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/loc1/perm"
  type: "Permute"
  bottom: "Inception3/conv/loc1"
  top: "Inception3/conv/loc1/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/loc1/flat"
  type: "Reshape"
  bottom: "Inception3/conv/loc1/perm"
  top: "Inception3/conv/loc1/flat"
  reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
} 

layer {
  name: "Inception3/conv/conf1"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/conf1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/conf1/perm"
  type: "Permute"
  bottom: "Inception3/conv/conf1"
  top: "Inception3/conv/conf1/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/conf1/flat"
  type: "Reshape"
  bottom: "Inception3/conv/conf1/perm"
  top: "Inception3/conv/conf1/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "Inception3/conv/priorbox1"
  type: "IPlugin"
  bottom: "conv5/incep"
  bottom: "data"
  top: "Inception3/conv/priorbox1"
  prior_box_param {
    min_size: 32
    aspect_ratio: 1
    flip: true
    clip: true
    variance: 0.1
    variance: 0.1
    variance: 0.2
    variance: 0.2
  }
}

layer {
  name: "Inception3/conv/loc2"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/loc2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/loc2/perm"
  type: "Permute"
  bottom: "Inception3/conv/loc2"
  top: "Inception3/conv/loc2/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/loc2/flat"
  type: "Reshape"
  bottom: "Inception3/conv/loc2/perm"
  top: "Inception3/conv/loc2/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
} 

layer {
  name: "Inception3/conv/conf2"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/conf2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/conf2/perm"
  type: "Permute"
  bottom: "Inception3/conv/conf2"
  top: "Inception3/conv/conf2/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/conf2/flat"
  type: "Reshape"
  bottom: "Inception3/conv/conf2/perm"
  top: "Inception3/conv/conf2/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "Inception3/conv/priorbox2"
  type: "IPlugin"
  bottom: "conv5/incep"
  bottom: "data"
  top: "Inception3/conv/priorbox2"
  prior_box_param {
    min_size: 64
    aspect_ratio: 1
    flip: true
    clip: true
    variance: 0.1
    variance: 0.1
    variance: 0.2
    variance: 0.2
  }
}

layer {
  name: "Inception3/conv/loc3"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/loc3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/loc3/perm"
  type: "Permute"
  bottom: "Inception3/conv/loc3"
  top: "Inception3/conv/loc3/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/loc3/flat"
  type: "Reshape"
  bottom: "Inception3/conv/loc3/perm"
  top: "Inception3/conv/loc3/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
} 

layer {
  name: "Inception3/conv/conf3"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/conf3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/conf3/perm"
  type: "Permute"
  bottom: "Inception3/conv/conf3"
  top: "Inception3/conv/conf3/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/conf3/flat"
  type: "Reshape"
  bottom: "Inception3/conv/conf3/perm"
  top: "Inception3/conv/conf3/flat"
   reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "Inception3/conv/priorbox3"
  type: "IPlugin"
  bottom: "conv5/incep"
  bottom: "data"
  top: "Inception3/conv/priorbox3"
  prior_box_param {
    min_size: 128
    aspect_ratio: 1
    flip: true
    clip: true
    variance: 0.1
    variance: 0.1
    variance: 0.2
    variance: 0.2
  }
}

layer {
  name: "conv6_1"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "conv6_1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 128
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv6/bn1"
  type: "BatchNorm"
  bottom: "conv6_1"
  top: "conv6_1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv6/bn_scale1"
  type: "Scale"
  bottom: "conv6_1"
  top: "conv6_1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv6/relu1"
  type: "ReLU"
  bottom: "conv6_1"
  top: "conv6_1"
}

layer {
  name: "conv6_2"
  type: "Convolution"
  bottom: "conv6_1"
  top: "conv6_2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 256
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 2
  }
}

layer {
  name: "conv6/bn2"
  type: "BatchNorm"
  bottom: "conv6_2"
  top: "conv6_2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv6/bn_scale2"
  type: "Scale"
  bottom: "conv6_2"
  top: "conv6_2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv6/relu2"
  type: "ReLU"
  bottom: "conv6_2"
  top: "conv6_2"
}

layer {
  name: "conv6/loc"
  type: "Convolution"
  bottom: "conv6_2"
  top: "conv6/loc"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv6/loc/perm"
  type: "Permute"
  bottom: "conv6/loc"
  top: "conv6/loc/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "conv6/loc/perm/flat"
  type: "Reshape"
  bottom: "conv6/loc/perm"
  top: "conv6/loc/perm/flat"
   reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "conv6/conf"
  type: "Convolution"
  bottom: "conv6_2"
  top: "conv6/conf"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv6/conf/perm"
  type: "Permute"
  bottom: "conv6/conf"
  top: "conv6/conf/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "conv6/conf/perm/flat"
  type: "Reshape"
  bottom: "conv6/conf/perm"
  top: "conv6/conf/perm/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "conv6/priorbox"
  type: "IPlugin"
  bottom: "conv6_2"
  bottom: "data"
  top: "conv6/priorbox"
  prior_box_param {
    min_size: 256
    aspect_ratio: 1
    flip: true
    clip: true
    variance: 0.1
    variance: 0.1
    variance: 0.2
    variance: 0.2
  }
}

layer {
  name: "conv7_1"
  type: "Convolution"
  bottom: "conv6_2"
  top: "conv7_1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 128
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv7/bn1"
  type: "BatchNorm"
  bottom: "conv7_1"
  top: "conv7_1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv7/bn_scale1"
  type: "Scale"
  bottom: "conv7_1"
  top: "conv7_1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv7/relu1"
  type: "ReLU"
  bottom: "conv7_1"
  top: "conv7_1"
}

layer {
  name: "conv7_2"
  type: "Convolution"
  bottom: "conv7_1"
  top: "conv7_2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 256
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 2
  }
}

layer {
  name: "conv7/bn2"
  type: "BatchNorm"
  bottom: "conv7_2"
  top: "conv7_2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv7/bn_scale2"
  type: "Scale"
  bottom: "conv7_2"
  top: "conv7_2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv7/relu2"
  type: "ReLU"
  bottom: "conv7_2"
  top: "conv7_2"
}

layer {
  name: "conv7/loc"
  type: "Convolution"
  bottom: "conv7_2"
  top: "conv7/loc"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv7/loc/perm"
  type: "Permute"
  bottom: "conv7/loc"
  top: "conv7/loc/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "conv7/loc/perm/flat"
  type: "Reshape"
  bottom: "conv7/loc/perm"
  top: "conv7/loc/perm/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "conv7/conf"
  type: "Convolution"
  bottom: "conv7_2"
  top: "conv7/conf"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv7/conf/perm"
  type: "Permute"
  bottom: "conv7/conf"
  top: "conv7/conf/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "conv7/conf/perm/flat"
  type: "Reshape"
  bottom: "conv7/conf/perm"
  top: "conv7/conf/perm/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "conv7/priorbox"
  type: "IPlugin"
  bottom: "conv7_2"
  bottom: "data"
  top: "conv7/priorbox"
  prior_box_param {
    min_size: 512
    aspect_ratio: 1
    flip: true
    clip: true
    variance: 0.1
    variance: 0.1
    variance: 0.2
    variance: 0.2
  }
}

layer {
  name: "mbox_loc"
  type: "Concat"
  bottom: "Inception3/conv/loc1/flat"
  bottom: "Inception3/conv/loc2/flat"
  bottom: "Inception3/conv/loc3/flat"
  bottom: "conv6/loc/perm/flat"
  bottom: "conv7/loc/perm/flat"
  top: "mbox_loc"
  concat_param {
    axis: 1
  }
}

layer {
  name: "mbox_conf"
  type: "Concat"
  bottom: "Inception3/conv/conf1/flat"
  bottom: "Inception3/conv/conf2/flat"
  bottom: "Inception3/conv/conf3/flat"
  bottom: "conv6/conf/perm/flat"
  bottom: "conv7/conf/perm/flat"
  top: "mbox_conf"
  concat_param {
    axis: 1
  }
}

layer {
  name: "mbox_priorbox"
  type: "Concat"
  bottom: "Inception3/conv/priorbox1"
  bottom: "Inception3/conv/priorbox2"
  bottom: "Inception3/conv/priorbox3"
  bottom: "conv6/priorbox"
  bottom: "conv7/priorbox"
  top: "mbox_priorbox"
  concat_param {
    axis: 2
  }
}

layer {
  name: "detection_out"
  type: "DetectionOutput"
  bottom: "mbox_loc"
  bottom: "mbox_conf"
  bottom: "mbox_priorbox"
  top: "detection_out"
  include {
    phase: TEST
  }
  detection_output_param {
    num_classes: 2
    share_location: true
    background_label_id: 0
    nms_param {
      nms_threshold: 0.45
      top_k: 100
    }
    code_type: CENTER_SIZE
    keep_top_k: 100
    confidence_threshold: 0.5
  }
}

Are you able to run our sampleSSD successfully?
If yes, I think you can refer to it and get to know how it handle those unsupported layers in SSD.
And I just modified your prototxt and can get it working with TRT 5.0 GA,

[b]/usr/src/tensorrt/bin/trtexec --deploy=FaceBoxes.prototxt --output=detection_out  
[/b]
name: "FaceBoxes"
input: "data"
input_shape {
  dim: 1
  dim: 3
  dim: 1024
  dim: 1024
}

#conv1
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  convolution_param {
    num_output: 24
    pad: 0
    kernel_size: 7
	stride: 4
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv1/bn"
  type: "BatchNorm"
  bottom: "conv1"
  top: "conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv1/neg"
  type: "Power"
  bottom: "conv1"
  top: "conv1/neg"
  power_param {
    power: 1
    scale: -1.0
    shift: 0
  }
}

layer {
  name: "conv1/concat"
  type: "Concat"
  bottom: "conv1"
  bottom: "conv1/neg"
  top: "conv1_CR"
}

layer {
  name: "conv1/scale"
  type: "Scale"
  bottom: "conv1_CR"
  top: "conv1_CR"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 2.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv1/relu"
  type: "ReLU"
  bottom: "conv1_CR"
  top: "conv1_CR"
}

layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1_CR"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
#conv2
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  convolution_param {
    num_output: 64
    pad: 0
    kernel_size: 5
	stride: 2
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv2/bn"
  type: "BatchNorm"
  bottom: "conv2"
  top: "conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv2/neg"
  type: "Power"
  bottom: "conv2"
  top: "conv2/neg"
  power_param {
    power: 1
    scale: -1.0
    shift: 0
  }
}

layer {
  name: "conv2/concat"
  type: "Concat"
  bottom: "conv2"
  bottom: "conv2/neg"
  top: "conv2_CR"
}

layer {
  name: "conv2/scale"
  type: "Scale"
  bottom: "conv2_CR"
  top: "conv2_CR"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 2.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv2/relu"
  type: "ReLU"
  bottom: "conv2_CR"
  top: "conv2_CR"
}

layer {
  name: "pool2"
  type: "Pooling"
  bottom: "conv2_CR"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
#Inception1
layer {
  name: "conv3/incep0/conv"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv3/incep0/bn"
  type: "BatchNorm"
  bottom: "conv3/incep0/conv"
  top: "conv3/incep0/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep0/bn_scale"
  type: "Scale"
  bottom: "conv3/incep0/conv"
  top: "conv3/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep0/relu"
  type: "ReLU"
  bottom: "conv3/incep0/conv"
  top: "conv3/incep0/conv"
}

layer {
  name: "conv3/incep1/conv1"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv3/incep1/bn1"
  type: "BatchNorm"
  bottom: "conv3/incep1/conv1"
  top: "conv3/incep1/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep1/bn_scale1"
  type: "Scale"
  bottom: "conv3/incep1/conv1"
  top: "conv3/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep1/relu1"
  type: "ReLU"
  bottom: "conv3/incep1/conv1"
  top: "conv3/incep1/conv1"
}

layer {
  name: "conv3/incep1/conv2"
  type: "Convolution"
  bottom: "conv3/incep1/conv1"
  top: "conv3/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
	weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv3/incep1/bn2"
  type: "BatchNorm"
  bottom: "conv3/incep1/conv2"
  top: "conv3/incep1/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep1/bn_scale2"
  type: "Scale"
  bottom: "conv3/incep1/conv2"
  top: "conv3/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep1/relu2"
  type: "ReLU"
  bottom: "conv3/incep1/conv2"
  top: "conv3/incep1/conv2"
}

layer {
  name: "conv3/incep2/conv1"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv3/incep2/bn1"
  type: "BatchNorm"
  bottom: "conv3/incep2/conv1"
  top: "conv3/incep2/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep2/bn_scale1"
  type: "Scale"
  bottom: "conv3/incep2/conv1"
  top: "conv3/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep2/relu1"
  type: "ReLU"
  bottom: "conv3/incep2/conv1"
  top: "conv3/incep2/conv1"
}

layer {
  name: "conv3/incep2/conv2"
  type: "Convolution"
  bottom: "conv3/incep2/conv1"
  top: "conv3/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv3/incep2/bn2"
  type: "BatchNorm"
  bottom: "conv3/incep2/conv2"
  top: "conv3/incep2/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep2/bn_scale2"
  type: "Scale"
  bottom: "conv3/incep2/conv2"
  top: "conv3/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep2/relu2"
  type: "ReLU"
  bottom: "conv3/incep2/conv2"
  top: "conv3/incep2/conv2"
}

layer {
  name: "conv3/incep2/conv3"
  type: "Convolution"
  bottom: "conv3/incep2/conv2"
  top: "conv3/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv3/incep2/bn3"
  type: "BatchNorm"
  bottom: "conv3/incep2/conv3"
  top: "conv3/incep2/conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep2/bn_scale3"
  type: "Scale"
  bottom: "conv3/incep2/conv3"
  top: "conv3/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep2/relu3"
  type: "ReLU"
  bottom: "conv3/incep2/conv3"
  top: "conv3/incep2/conv3"
}

layer {
  name: "conv3/incep3/pool"
  type: "Pooling"
  bottom: "pool2"
  top: "conv3/incep3/pool"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 1
    pad: 1
  }
}

layer {
  name: "conv3/incep3/conv"
  type: "Convolution"
  bottom: "conv3/incep3/pool"
  top: "conv3/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv3/incep3/bn"
  type: "BatchNorm"
  bottom: "conv3/incep3/conv"
  top: "conv3/incep3/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv3/incep3/bn_scale"
  type: "Scale"
  bottom: "conv3/incep3/conv"
  top: "conv3/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv3/incep3/relu"
  type: "ReLU"
  bottom: "conv3/incep3/conv"
  top: "conv3/incep3/conv"
}

layer {
  name: "conv3/incep"
  type: "Concat"
  bottom: "conv3/incep0/conv"
  bottom: "conv3/incep1/conv2"
  bottom: "conv3/incep2/conv3"
  bottom: "conv3/incep3/conv"
  top: "conv3/incep"
}
#Inception2
layer {
  name: "conv4/incep0/conv"
  type: "Convolution"
  bottom: "conv3/incep"
  top: "conv4/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv4/incep0/bn"
  type: "BatchNorm"
  bottom: "conv4/incep0/conv"
  top: "conv4/incep0/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep0/bn_scale"
  type: "Scale"
  bottom: "conv4/incep0/conv"
  top: "conv4/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep0/relu"
  type: "ReLU"
  bottom: "conv4/incep0/conv"
  top: "conv4/incep0/conv"
}

layer {
  name: "conv4/incep1/conv1"
  type: "Convolution"
  bottom: "conv3/incep"
  top: "conv4/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv4/incep1/bn1"
  type: "BatchNorm"
  bottom: "conv4/incep1/conv1"
  top: "conv4/incep1/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep1/bn_scale1"
  type: "Scale"
  bottom: "conv4/incep1/conv1"
  top: "conv4/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep1/relu1"
  type: "ReLU"
  bottom: "conv4/incep1/conv1"
  top: "conv4/incep1/conv1"
}

layer {
  name: "conv4/incep1/conv2"
  type: "Convolution"
  bottom: "conv4/incep1/conv1"
  top: "conv4/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv4/incep1/bn2"
  type: "BatchNorm"
  bottom: "conv4/incep1/conv2"
  top: "conv4/incep1/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep1/bn_scale2"
  type: "Scale"
  bottom: "conv4/incep1/conv2"
  top: "conv4/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep1/relu2"
  type: "ReLU"
  bottom: "conv4/incep1/conv2"
  top: "conv4/incep1/conv2"
}

layer {
  name: "conv4/incep2/conv1"
  type: "Convolution"
  bottom: "conv3/incep"
  top: "conv4/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv4/incep2/bn1"
  type: "BatchNorm"
  bottom: "conv4/incep2/conv1"
  top: "conv4/incep2/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep2/bn_scale1"
  type: "Scale"
  bottom: "conv4/incep2/conv1"
  top: "conv4/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep2/relu1"
  type: "ReLU"
  bottom: "conv4/incep2/conv1"
  top: "conv4/incep2/conv1"
}

layer {
  name: "conv4/incep2/conv2"
  type: "Convolution"
  bottom: "conv4/incep2/conv1"
  top: "conv4/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv4/incep2/bn2"
  type: "BatchNorm"
  bottom: "conv4/incep2/conv2"
  top: "conv4/incep2/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep2/bn_scale2"
  type: "Scale"
  bottom: "conv4/incep2/conv2"
  top: "conv4/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep2/relu2"
  type: "ReLU"
  bottom: "conv4/incep2/conv2"
  top: "conv4/incep2/conv2"
}

layer {
  name: "conv4/incep2/conv3"
  type: "Convolution"
  bottom: "conv4/incep2/conv2"
  top: "conv4/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv4/incep2/bn3"
  type: "BatchNorm"
  bottom: "conv4/incep2/conv3"
  top: "conv4/incep2/conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep2/bn_scale3"
  type: "Scale"
  bottom: "conv4/incep2/conv3"
  top: "conv4/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep2/relu3"
  type: "ReLU"
  bottom: "conv4/incep2/conv3"
  top: "conv4/incep2/conv3"
}

layer {
  name: "conv4/incep3/pool"
  type: "Pooling"
  bottom: "conv3/incep"
  top: "conv4/incep3/pool"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 1
    pad: 1
  }
}

layer {
  name: "conv4/incep3/conv"
  type: "Convolution"
  bottom: "conv4/incep3/pool"
  top: "conv4/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv4/incep3/bn"
  type: "BatchNorm"
  bottom: "conv4/incep3/conv"
  top: "conv4/incep3/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv4/incep3/bn_scale"
  type: "Scale"
  bottom: "conv4/incep3/conv"
  top: "conv4/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv4/incep3/relu"
  type: "ReLU"
  bottom: "conv4/incep3/conv"
  top: "conv4/incep3/conv"
}

layer {
  name: "conv4/incep"
  type: "Concat"
  bottom: "conv4/incep0/conv"
  bottom: "conv4/incep1/conv2"
  bottom: "conv4/incep2/conv3"
  bottom: "conv4/incep3/conv"
  top: "conv4/incep"
}

layer {
  name: "conv5/incep0/conv"
  type: "Convolution"
  bottom: "conv4/incep"
  top: "conv5/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv5/incep0/bn"
  type: "BatchNorm"
  bottom: "conv5/incep0/conv"
  top: "conv5/incep0/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep0/bn_scale"
  type: "Scale"
  bottom: "conv5/incep0/conv"
  top: "conv5/incep0/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep0/relu"
  type: "ReLU"
  bottom: "conv5/incep0/conv"
  top: "conv5/incep0/conv"
}

layer {
  name: "conv5/incep1/conv1"
  type: "Convolution"
  bottom: "conv4/incep"
  top: "conv5/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv5/incep1/bn1"
  type: "BatchNorm"
  bottom: "conv5/incep1/conv1"
  top: "conv5/incep1/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep1/bn_scale1"
  type: "Scale"
  bottom: "conv5/incep1/conv1"
  top: "conv5/incep1/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep1/relu1"
  type: "ReLU"
  bottom: "conv5/incep1/conv1"
  top: "conv5/incep1/conv1"
}

layer {
  name: "conv5/incep1/conv2"
  type: "Convolution"
  bottom: "conv5/incep1/conv1"
  top: "conv5/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv5/incep1/bn2"
  type: "BatchNorm"
  bottom: "conv5/incep1/conv2"
  top: "conv5/incep1/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep1/bn_scale2"
  type: "Scale"
  bottom: "conv5/incep1/conv2"
  top: "conv5/incep1/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep1/relu2"
  type: "ReLU"
  bottom: "conv5/incep1/conv2"
  top: "conv5/incep1/conv2"
}

layer {
  name: "conv5/incep2/conv1"
  type: "Convolution"
  bottom: "conv4/incep"
  top: "conv5/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 24
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv5/incep2/bn1"
  type: "BatchNorm"
  bottom: "conv5/incep2/conv1"
  top: "conv5/incep2/conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep2/bn_scale1"
  type: "Scale"
  bottom: "conv5/incep2/conv1"
  top: "conv5/incep2/conv1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep2/relu1"
  type: "ReLU"
  bottom: "conv5/incep2/conv1"
  top: "conv5/incep2/conv1"
}

layer {
  name: "conv5/incep2/conv2"
  type: "Convolution"
  bottom: "conv5/incep2/conv1"
  top: "conv5/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv5/incep2/bn2"
  type: "BatchNorm"
  bottom: "conv5/incep2/conv2"
  top: "conv5/incep2/conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep2/bn_scale2"
  type: "Scale"
  bottom: "conv5/incep2/conv2"
  top: "conv5/incep2/conv2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep2/relu2"
  type: "ReLU"
  bottom: "conv5/incep2/conv2"
  top: "conv5/incep2/conv2"
}

layer {
  name: "conv5/incep2/conv3"
  type: "Convolution"
  bottom: "conv5/incep2/conv2"
  top: "conv5/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 1
  }
}

layer {
  name: "conv5/incep2/bn3"
  type: "BatchNorm"
  bottom: "conv5/incep2/conv3"
  top: "conv5/incep2/conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep2/bn_scale3"
  type: "Scale"
  bottom: "conv5/incep2/conv3"
  top: "conv5/incep2/conv3"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep2/relu3"
  type: "ReLU"
  bottom: "conv5/incep2/conv3"
  top: "conv5/incep2/conv3"
}

layer {
  name: "conv5/incep3/pool"
  type: "Pooling"
  bottom: "conv4/incep"
  top: "conv5/incep3/pool"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 1
    pad: 1
  }
}

layer {
  name: "conv5/incep3/conv"
  type: "Convolution"
  bottom: "conv5/incep3/pool"
  top: "conv5/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv5/incep3/bn"
  type: "BatchNorm"
  bottom: "conv5/incep3/conv"
  top: "conv5/incep3/conv"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv5/incep3/bn_scale"
  type: "Scale"
  bottom: "conv5/incep3/conv"
  top: "conv5/incep3/conv"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv5/incep3/relu"
  type: "ReLU"
  bottom: "conv5/incep3/conv"
  top: "conv5/incep3/conv"
}

layer {
  name: "conv5/incep"
  type: "Concat"
  bottom: "conv5/incep0/conv"
  bottom: "conv5/incep1/conv2"
  bottom: "conv5/incep2/conv3"
  bottom: "conv5/incep3/conv"
  top: "conv5/incep"
}

layer {
  name: "Inception3/conv/loc1"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/loc1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/loc1/perm"
  type: "Permute"
  bottom: "Inception3/conv/loc1"
  top: "Inception3/conv/loc1/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/loc1/flat"
  type: "Reshape"
  bottom: "Inception3/conv/loc1/perm"
  top: "Inception3/conv/loc1/flat"
  reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
} 

layer {
  name: "Inception3/conv/conf1"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/conf1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/conf1/perm"
  type: "Permute"
  bottom: "Inception3/conv/conf1"
  top: "Inception3/conv/conf1/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/conf1/flat"
  type: "Reshape"
  bottom: "Inception3/conv/conf1/perm"
  top: "Inception3/conv/conf1/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "Inception3/conv/priorbox1"
#type: "IPlugin"
  type: "PriorBox"
  bottom: "conv5/incep"
  bottom: "data"
  top: "Inception3/conv/priorbox1"
  prior_box_param {
    min_size: 32
    aspect_ratio: 1
    flip: true
    clip: true
    variance: 0.1
    variance: 0.1
    variance: 0.2
    variance: 0.2
  }
}

layer {
  name: "Inception3/conv/loc2"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/loc2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/loc2/perm"
  type: "Permute"
  bottom: "Inception3/conv/loc2"
  top: "Inception3/conv/loc2/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/loc2/flat"
  type: "Reshape"
  bottom: "Inception3/conv/loc2/perm"
  top: "Inception3/conv/loc2/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
} 

layer {
  name: "Inception3/conv/conf2"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/conf2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/conf2/perm"
  type: "Permute"
  bottom: "Inception3/conv/conf2"
  top: "Inception3/conv/conf2/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/conf2/flat"
  type: "Reshape"
  bottom: "Inception3/conv/conf2/perm"
  top: "Inception3/conv/conf2/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "Inception3/conv/priorbox2"
#type: "IPlugin"
  type: "PriorBox"
  bottom: "conv5/incep"
  bottom: "data"
  top: "Inception3/conv/priorbox2"
  prior_box_param {
    min_size: 64
    aspect_ratio: 1
    flip: true
    clip: true
    variance: 0.1
    variance: 0.1
    variance: 0.2
    variance: 0.2
  }
}

layer {
  name: "Inception3/conv/loc3"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/loc3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/loc3/perm"
  type: "Permute"
  bottom: "Inception3/conv/loc3"
  top: "Inception3/conv/loc3/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/loc3/flat"
  type: "Reshape"
  bottom: "Inception3/conv/loc3/perm"
  top: "Inception3/conv/loc3/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
} 

layer {
  name: "Inception3/conv/conf3"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "Inception3/conv/conf3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "Inception3/conv/conf3/perm"
  type: "Permute"
  bottom: "Inception3/conv/conf3"
  top: "Inception3/conv/conf3/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "Inception3/conv/conf3/flat"
  type: "Reshape"
  bottom: "Inception3/conv/conf3/perm"
  top: "Inception3/conv/conf3/flat"
   reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "Inception3/conv/priorbox3"
#type: "IPlugin"
  type: "PriorBox"
  bottom: "conv5/incep"
  bottom: "data"
  top: "Inception3/conv/priorbox3"
  prior_box_param {
    min_size: 128
    aspect_ratio: 1
    flip: true
    clip: true
    variance: 0.1
    variance: 0.1
    variance: 0.2
    variance: 0.2
  }
}

layer {
  name: "conv6_1"
  type: "Convolution"
  bottom: "conv5/incep"
  top: "conv6_1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 128
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv6/bn1"
  type: "BatchNorm"
  bottom: "conv6_1"
  top: "conv6_1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv6/bn_scale1"
  type: "Scale"
  bottom: "conv6_1"
  top: "conv6_1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv6/relu1"
  type: "ReLU"
  bottom: "conv6_1"
  top: "conv6_1"
}

layer {
  name: "conv6_2"
  type: "Convolution"
  bottom: "conv6_1"
  top: "conv6_2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 256
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 2
  }
}

layer {
  name: "conv6/bn2"
  type: "BatchNorm"
  bottom: "conv6_2"
  top: "conv6_2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv6/bn_scale2"
  type: "Scale"
  bottom: "conv6_2"
  top: "conv6_2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv6/relu2"
  type: "ReLU"
  bottom: "conv6_2"
  top: "conv6_2"
}

layer {
  name: "conv6/loc"
  type: "Convolution"
  bottom: "conv6_2"
  top: "conv6/loc"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv6/loc/perm"
  type: "Permute"
  bottom: "conv6/loc"
  top: "conv6/loc/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "conv6/loc/perm/flat"
  type: "Reshape"
  bottom: "conv6/loc/perm"
  top: "conv6/loc/perm/flat"
   reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "conv6/conf"
  type: "Convolution"
  bottom: "conv6_2"
  top: "conv6/conf"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv6/conf/perm"
  type: "Permute"
  bottom: "conv6/conf"
  top: "conv6/conf/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "conv6/conf/perm/flat"
  type: "Reshape"
  bottom: "conv6/conf/perm"
  top: "conv6/conf/perm/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "conv6/priorbox"
#type: "IPlugin"
  type: "PriorBox"
  bottom: "conv6_2"
  bottom: "data"
  top: "conv6/priorbox"
  prior_box_param {
    min_size: 256
    aspect_ratio: 1
    flip: true
    clip: true
    variance: 0.1
    variance: 0.1
    variance: 0.2
    variance: 0.2
  }
}

layer {
  name: "conv7_1"
  type: "Convolution"
  bottom: "conv6_2"
  top: "conv7_1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 128
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 0
    kernel_size: 1
    stride: 1
  }
}

layer {
  name: "conv7/bn1"
  type: "BatchNorm"
  bottom: "conv7_1"
  top: "conv7_1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv7/bn_scale1"
  type: "Scale"
  bottom: "conv7_1"
  top: "conv7_1"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv7/relu1"
  type: "ReLU"
  bottom: "conv7_1"
  top: "conv7_1"
}

layer {
  name: "conv7_2"
  type: "Convolution"
  bottom: "conv7_1"
  top: "conv7_2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 256
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
    pad: 1
    kernel_size: 3
    stride: 2
  }
}

layer {
  name: "conv7/bn2"
  type: "BatchNorm"
  bottom: "conv7_2"
  top: "conv7_2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}

layer {
  name: "conv7/bn_scale2"
  type: "Scale"
  bottom: "conv7_2"
  top: "conv7_2"
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  param {
    lr_mult: 1.0
    decay_mult: 0
  }
  scale_param {
    bias_term: true
  }
}

layer {
  name: "conv7/relu2"
  type: "ReLU"
  bottom: "conv7_2"
  top: "conv7_2"
}

layer {
  name: "conv7/loc"
  type: "Convolution"
  bottom: "conv7_2"
  top: "conv7/loc"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv7/loc/perm"
  type: "Permute"
  bottom: "conv7/loc"
  top: "conv7/loc/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "conv7/loc/perm/flat"
  type: "Reshape"
  bottom: "conv7/loc/perm"
  top: "conv7/loc/perm/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "conv7/conf"
  type: "Convolution"
  bottom: "conv7_2"
  top: "conv7/conf"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  name: "conv7/conf/perm"
  type: "Permute"
  bottom: "conv7/conf"
  top: "conv7/conf/perm"
  permute_param {
    order: 0
    order: 2
    order: 3
    order: 1
  }
}

layer {
  name: "conv7/conf/perm/flat"
  type: "Reshape"
  bottom: "conv7/conf/perm"
  top: "conv7/conf/perm/flat"
    reshape_param {
    shape{
	dim:0
	dim:-1
	dim:1
	dim:1
	}
  }
}

layer {
  name: "conv7/priorbox"
#type: "IPlugin"
  type: "PriorBox"
  bottom: "conv7_2"
  bottom: "data"
  top: "conv7/priorbox"
  prior_box_param {
    min_size: 512
    aspect_ratio: 1
    flip: true
    clip: true
    variance: 0.1
    variance: 0.1
    variance: 0.2
    variance: 0.2
  }
}

layer {
  name: "mbox_loc"
  type: "Concat"
  bottom: "Inception3/conv/loc1/flat"
  bottom: "Inception3/conv/loc2/flat"
  bottom: "Inception3/conv/loc3/flat"
  bottom: "conv6/loc/perm/flat"
  bottom: "conv7/loc/perm/flat"
  top: "mbox_loc"
  concat_param {
    axis: 1
  }
}

layer {
  name: "mbox_conf"
  type: "Concat"
  bottom: "Inception3/conv/conf1/flat"
  bottom: "Inception3/conv/conf2/flat"
  bottom: "Inception3/conv/conf3/flat"
  bottom: "conv6/conf/perm/flat"
  bottom: "conv7/conf/perm/flat"
  top: "mbox_conf"
  concat_param {
    axis: 1
  }
}

layer {
  name: "mbox_priorbox"
  type: "Concat"
  bottom: "Inception3/conv/priorbox1"
  bottom: "Inception3/conv/priorbox2"
  bottom: "Inception3/conv/priorbox3"
  bottom: "conv6/priorbox"
  bottom: "conv7/priorbox"
  top: "mbox_priorbox"
  concat_param {
    axis: 2
  }
}

layer {
  name: "detection_out"
  type: "DetectionOutput"
  bottom: "mbox_loc"
  bottom: "mbox_conf"
  bottom: "mbox_priorbox"
  top: "detection_out"
  top: "keep_count"
  include {
    phase: TEST
  }
  detection_output_param {
    num_classes: 2
    share_location: true
    background_label_id: 0
    nms_param {
      nms_threshold: 0.45
      top_k: 100
    }
    code_type: CENTER_SIZE
    keep_top_k: 100
    confidence_threshold: 0.5
  }
}

hello,
“trtexec --deploy=FaceBoxes.prototxt --output=detection_out” has already worked,but when parsing it in using the TensorRT caffe parser,the problem remains exits.

sample_face_detection: nvPluginsLegacy.cpp:1068: virtual nvinfer1::Dims nvinfer1::plugin::ConcatLegacy::getOutputDimensions(int, const nvinfer1::Dims*, int): Assertion `nbInputDims >= 1' failed.
Aborted (core dumped)

now,the priorbox layer and detection_out layer are all replaced by the Custom layer,we have implemented the IPluginExt class and IPluginFactory class as follows, Maybe it has a lot of problems. Please take a look at them and tell me how to solve this problem?

//priorbox layer plugin
class PriorBoxPlugin : nvinfer1::IPluginExt
{
public:

	PriorBoxPlugin(HiPrioxBoxParameters params)
	{
		mPriorBoxParamters = params;		
	}

	PriorBoxPlugin(const void*data ,size_t length)
	{
		const char* d = static_cast<const char*>(data), *a = d;
		read(d,mDataType);
		read(d,num_priors_);
		read(d,mPriorBoxParamters);
		read(d,mLayerWidth);
		read(d,mLayerHeight);
		read(d,mImgWidth);
		read(d,mImgHeight);
		
		assert(d == a+length);
	}
	~PriorBoxPlugin(){}

	int getNbOutputs() const override
	{
		return 1;
	}

	Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims)override
	{
		assert(index == 0 && nbInputDims == 2 && inputs[0].nbDims==3  );
		//dim=layer_height * layer_width * num_priors_ * 4;
		top_data_size = inputs[0].d[0] * inputs[0].d[1] *num_priors_ *4;
		
		return Dims3(1, 2, top_data_size);
	}

	bool supportsFormat(DataType type, PluginFormat format) const override
	{ 
		return (type == DataType::kFLOAT || type == DataType::kHALF) && format == PluginFormat::kNCHW; 
	}

	void configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) override
	{
		assert((type == DataType::kFLOAT || type == DataType::kHALF) && format == PluginFormat::kNCHW);
		assert(nbInputs == 2);
		mDataType = type;		
		num_priors_ = mPriorBoxParamters.numAspectRatios * mPriorBoxParamters.numMinSize;
		mLayerWidth = inputDims[0].d[1];
		mLayerHeight = inputDims[0].d[2];
		if (mPriorBoxParamters.imgH == 0 || mPriorBoxParamters.imgW == 0)
		{
			mImgWidth = inputDims[1].d[1];
			mImgHeight =  inputDims[1].d[2];
		}
		else
		{
			mImgWidth = mPriorBoxParamters.imgW;
			mImgHeight =  mPriorBoxParamters.imgH;

		}
	}

	int initialize() override
	{
		return 0;
	}

	virtual void terminate() override { ; }

	virtual size_t getWorkspaceSize(int batchSize) const override 
	{ 
		return top_data_size * batchSize; 
	}

	virtual int enqueue(int batchSize, const void*const *inputs, void** outputs, void*, cudaStream_t stream) override
	{
		float step_h,step_w;
		if (mPriorBoxParamters.stepH == 0 ||mPriorBoxParamters.stepW == 0)
		{
			step_w = mImgWidth/mLayerWidth;
			step_h = mImgHeight/mLayerHeight;
		}
		else
		{
			step_w = mPriorBoxParamters.stepW;
			step_h = mPriorBoxParamters.stepH;
		}

		int dim = mLayerWidth * mLayerHeight * num_priors_ * 4;
		float top_data[dim] = {0.};	
		int idx = 0;
		for (int h = 0;h < mLayerHeight;++h)
		{
			for(int w=0;w<mLayerWidth;++w)
			{
				float center_x = (w + mPriorBoxParamters.offset) * step_w;
   		        float center_y = (h + mPriorBoxParamters.offset) * step_h;
   		        float box_width, box_height;
				for (int s = 0;s<mPriorBoxParamters.numMinSize;++s)
				{
					int min_size_ = mPriorBoxParamters.minSize[s];
					// first prior: aspect_ratio = 1, size = min_size
			        box_width = box_height = min_size_;
			        // xmin
			        top_data[idx++] = (center_x - box_width / 2.) / mImgWidth;
			        // ymin
			        top_data[idx++] = (center_y - box_height / 2.) / mImgHeight;
			        // xmax
			        top_data[idx++] = (center_x + box_width / 2.) / mImgWidth;
			        // ymax
			        top_data[idx++] = (center_y + box_height / 2.) / mImgHeight;

			        if (mPriorBoxParamters.numMaxSize > 0) 
					{
			          int max_size_ = mPriorBoxParamters.maxSize[s];
			          // second prior: aspect_ratio = 1, size = sqrt(min_size * max_size)
			          box_width = box_height = sqrt(min_size_ * max_size_);
			          // xmin
			          top_data[idx++] = (center_x - box_width / 2.) / mImgWidth;
			          // ymin
			          top_data[idx++] = (center_y - box_height / 2.) / mImgHeight;
			          // xmax
			          top_data[idx++] = (center_x + box_width / 2.) / mImgWidth;
			          // ymax
			          top_data[idx++] = (center_y + box_height / 2.) / mImgHeight;
			        }
					// rest of priors
			        for (int r = 0; r < mPriorBoxParamters.numAspectRatios; ++r) 
					{
			          float ar = mPriorBoxParamters.aspectRatios[r];
			          if (fabs(ar - 1.) < 1e-6)
					  {
			            continue;
			          }
			          box_width = min_size_ * sqrt(ar);
			          box_height = min_size_ / sqrt(ar);
			          // xmin
			          top_data[idx++] = (center_x - box_width / 2.) / mImgWidth;
			          // ymin
			          top_data[idx++] = (center_y - box_height / 2.) / mImgHeight;
			          // xmax
			          top_data[idx++] = (center_x + box_width / 2.) / mImgWidth;
			          // ymax
			          top_data[idx++] = (center_y + box_height / 2.) / mImgHeight;
			        }
								
				}

			}
		}
		// clip the prior's coordidate such that it is within [0, 1]
		if (mPriorBoxParamters.clip) 
		{
		    for (int d = 0; d < num_priors_; ++d)
			{
		      top_data[d] = std::min(std::max((float)top_data[d], 0.0f), 1.0f);
		    }
		}
		
		float top_data1[dim] = {0};
		// set the variance.
		int count =0;
		for (int h = 0; h < mLayerHeight; ++h) {
			for (int w = 0; w < mLayerWidth; ++w) {
				for (int i = 0; i < num_priors_; ++i) {
					for (int j = 0; j < 4; ++j) {
						top_data1[count] = mPriorBoxParamters.variance[j];
						++count;
					}
				}
			}
 		}
		outputs[0] = top_data;
		outputs[1] = top_data1;
		return 0;
	}

	virtual size_t getSerializationSize() override
	{
		size_t size = sizeof(mDataType) + sizeof(num_priors_) + sizeof(mPriorBoxParamters)+sizeof(mLayerWidth)+sizeof(mLayerHeight)+sizeof(mImgWidth)+sizeof(mImgHeight);
		return (size*type2size(mDataType));
	}

	virtual void serialize(void *buffer)override
	{
		char* d = static_cast<char*>(buffer), *a = d;
		write(d,mDataType);
		write(d,num_priors_);
		write(d,mPriorBoxParamters);
		write(d,mLayerWidth);
		write(d,mLayerHeight);
		write(d,mImgWidth);
		write(d,mImgHeight);
		assert(d == a + getSerializationSize());
	}
	

	
	int getTensorRTVersion()const override
	{
		return 0;
	}
	IPluginExt *clone()const override
	{
		return new PriorBoxPlugin(mPriorBoxParamters);
	}
	void destroy() { delete this; }
    
	 const char* getPluginType() const override
    {
        return "FCPlugin_TRT";
    }

    const char* getPluginVersion() const override
    {
        return "001";
    }
	
private:

	size_t type2size(DataType type) { return type == DataType::kFLOAT ? sizeof(float) : sizeof(__half); }

    template<typename T> void write(char*& buffer, const T& val)
    {
        *reinterpret_cast<T*>(buffer) = val;
        buffer += sizeof(T);
    }

    template<typename T> void read(const char*& buffer, T& val)
    {
        val = *reinterpret_cast<const T*>(buffer);
        buffer += sizeof(T);
    }


	HiPrioxBoxParameters mPriorBoxParamters;
	int num_priors_,top_data_size,mImgHeight,mImgWidth,mLayerWidth,mLayerHeight;
	DataType mDataType{DataType::kFLOAT};
};



class PluginFactory : public nvinfer1::IPluginFactory,public nvcaffeparser1::IPluginFactoryExt
{
public:
	virtual nvinfer1::IPlugin* createPlugin(const char* layerName, const nvinfer1::Weights* weights, int nbWeights) override;
	
	nvinfer1::IPlugin* createPlugin(const char* layerName,const void* seriaData,size_t seriaLength)override;
    // caffe parser plugin implementation
	bool isPlugin(const char* name) override { return isPluginExt(name); }
	
    bool isPluginExt(const char* name) override ;
    
    void destroyPlugin();

    void (*pluginDeleter)( nvinfer1::plugin::INvPlugin*) {[]( nvinfer1::plugin::INvPlugin* ptr) {ptr->destroy();}};
	//priorbox layer
    std::unique_ptr< PriorBoxPlugin> Inception3_conv_priorbox1_layer{nullptr};
 	std::unique_ptr< PriorBoxPlugin> Inception3_conv_priorbox2_layer{nullptr};
 	std::unique_ptr< PriorBoxPlugin> Inception3_conv_priorbox3_layer{nullptr};
	std::unique_ptr< PriorBoxPlugin> conv6_priorbox_layer{nullptr};
	std::unique_ptr< PriorBoxPlugin> conv7_priorbox_layer{nullptr};
	//detection output layer
	std::unique_ptr< nvinfer1::plugin::INvPlugin, decltype(pluginDeleter)> mDetection_out{nullptr, pluginDeleter};
};

//pluginfactory
/****************************************************/
nvinfer1::IPlugin* PluginFactory::createPlugin(const char* layerName, const nvinfer1::Weights* weights, int nbWeights)
{
	assert(PluginFactory::isPluginExt(layerName));
	std::cout << "==========createPlugin=========layerName:" << layerName << std::endl;
	if(!strcmp(layerName,"Inception3/conv/priorbox1"))
	{
		assert(Inception3_conv_priorbox1_layer.get() == nullptr);
		HiPrioxBoxParameters params;
		float minSize[1] = {32};
		float aspectRatios[1] = {1}; 
		params.minSize = minSize;
		params.maxSize = nullptr;
		params.aspectRatios = aspectRatios;
		params.numMinSize = 1;
		params.numMaxSize = 0;
		params.numAspectRatios = 1;
		params.flip = true;
		params.clip = true;
		params.variance[0] = 0.1;
		params.variance[1] = 0.1;
		params.variance[2] = 0.2;
		params.variance[3] = 0.2;
		params.imgH = 0;
		params.imgW = 0;
		params.stepH = 0;
		params.stepW = 0;
		params.offset = 0.5;
		Inception3_conv_priorbox1_layer = std::unique_ptr<PriorBoxPlugin>(new PriorBoxPlugin(params));
		
		std::cout << "==========createPlugin succ=========layerName:" << layerName << std::endl;
		
		return (nvinfer1::IPlugin*)Inception3_conv_priorbox1_layer.get();			
	}

	else if (!strcmp(layerName,"Inception3/conv/priorbox2"))
	{
		assert(Inception3_conv_priorbox2_layer.get() == nullptr);
		HiPrioxBoxParameters params;
		float minSize[1] = {64};
		float aspectRatios[1] = {1}; 
		params.minSize = minSize;
		params.maxSize = nullptr;
		params.aspectRatios = aspectRatios;
		params.numMinSize = 1;
		params.numMaxSize = 0;
		params.numAspectRatios = 1;
		params.flip = true;
		params.clip = true;
		params.variance[0] = 0.1;
		params.variance[1] = 0.1;
		params.variance[2] = 0.2;
		params.variance[3] = 0.2;
		params.imgH = 0;
		params.imgW = 0;
		params.stepH = 0;
		params.stepW = 0;
		params.offset = 0.5;

		Inception3_conv_priorbox2_layer = std::unique_ptr<PriorBoxPlugin>(new PriorBoxPlugin(params));
		return (nvinfer1::IPlugin*)Inception3_conv_priorbox2_layer.get();	
	}

	else if (!strcmp(layerName,"Inception3/conv/priorbox3"))
	{
		assert(Inception3_conv_priorbox3_layer.get() == nullptr);
		HiPrioxBoxParameters params;
		float minSize[1] = {128};
		float aspectRatios[1] = {1}; 
		params.minSize = minSize;
		params.maxSize = nullptr;
		params.aspectRatios = aspectRatios;
		params.numMinSize = 1;
		params.numMaxSize = 0;
		params.numAspectRatios = 1;
		params.flip = true;
		params.clip = true;
		params.variance[0] = 0.1;
		params.variance[1] = 0.1;
		params.variance[2] = 0.2;
		params.variance[3] = 0.2;
		params.imgH = 0;
		params.imgW = 0;
		params.stepH = 0;
		params.stepW = 0;
		params.offset = 0.5;

		Inception3_conv_priorbox3_layer = std::unique_ptr<PriorBoxPlugin>(new PriorBoxPlugin(params));
		return  (nvinfer1::IPlugin*)Inception3_conv_priorbox3_layer.get();	
	}

	else if (!strcmp(layerName,"conv6/priorbox"))
	{
		assert(conv6_priorbox_layer.get() == nullptr);
		HiPrioxBoxParameters params;
		float minSize[1] = {256};
		float aspectRatios[1] = {1}; 
		params.minSize = minSize;
		params.maxSize = nullptr;
		params.aspectRatios = aspectRatios;
		params.numMinSize = 1;
		params.numMaxSize = 0;
		params.numAspectRatios = 1;
		params.flip = true;
		params.clip = true;
		params.variance[0] = 0.1;
		params.variance[1] = 0.1;
		params.variance[2] = 0.2;
		params.variance[3] = 0.2;
		params.imgH = 0;
		params.imgW = 0;
		params.stepH = 0;
		params.stepW = 0;
		params.offset = 0.5;

		conv6_priorbox_layer = std::unique_ptr<PriorBoxPlugin>(new PriorBoxPlugin(params));
		return  (nvinfer1::IPlugin*)conv6_priorbox_layer.get();	
	}

	else if (!strcmp(layerName,"conv7/priorbox"))
	{
		assert(conv7_priorbox_layer.get() == nullptr);
		HiPrioxBoxParameters params;
		float minSize[1] = {512};
		float aspectRatios[1] = {1}; 
		params.minSize = minSize;
		params.maxSize = nullptr;
		params.aspectRatios = aspectRatios;
		params.numMinSize = 1;
		params.numMaxSize = 0;
		params.numAspectRatios = 1;
		params.flip = true;
		params.clip = true;
		params.variance[0] = 0.1;
		params.variance[1] = 0.1;
		params.variance[2] = 0.2;
		params.variance[3] = 0.2;
		params.imgH = 0;
		params.imgW = 0;
		params.stepH = 0;
		params.stepW = 0;
		params.offset = 0.5;

		conv7_priorbox_layer = std::unique_ptr<PriorBoxPlugin>(new PriorBoxPlugin(params));
		return  (nvinfer1::IPlugin*)conv7_priorbox_layer.get();	
	}

	else if (!strcmp(layerName,"detection_out"))
	{
		assert(mDetection_out.get() == nullptr);
		plugin::DetectionOutputParameters params;
		params.shareLocation = true;
		params.varianceEncodedInTarget = true;
		params.backgroundLabelId = 0;
		params.numClasses = 2;
		params.topK = 100;
		params.keepTopK = 100;
		params.confidenceThreshold = 0.5;
		params.nmsThreshold = 0.45;
		params.codeType = CodeTypeSSD::CENTER_SIZE;
		params.inputOrder[0] = 0;
		params.inputOrder[1] = 1;
		params.inputOrder[2] = 2;
		params.confSigmoid = true;
		params.isNormalized = true;

		mDetection_out = std::unique_ptr<nvinfer1::plugin::INvPlugin, decltype(pluginDeleter)>(plugin::createSSDDetectionOutputPlugin(params),pluginDeleter);

		return mDetection_out.get();
	}

	else
	{
		std::cout << "this layer is not found :" << layerName << std::endl;
		assert(0);
		return nullptr;
	}

}

nvinfer1::IPlugin* PluginFactory::createPlugin(const char* layerName,const void* seriaData,size_t serialLength)
{
	assert(PluginFactory::isPluginExt(layerName));
	if(!strcmp(layerName,"Inception3/conv/priorbox1"))
	{
		assert(Inception3_conv_priorbox1_layer.get() == nullptr);
		Inception3_conv_priorbox1_layer = std::unique_ptr<PriorBoxPlugin>(new PriorBoxPlugin(seriaData,serialLength));

		return (nvinfer1::IPlugin*)Inception3_conv_priorbox1_layer.get();
	}
	else if(!strcmp(layerName,"Inception3/conv/priorbox2"))
	{
		assert(Inception3_conv_priorbox2_layer.get() == nullptr);
		Inception3_conv_priorbox2_layer = std::unique_ptr<PriorBoxPlugin>(new PriorBoxPlugin(seriaData,serialLength));

		return (nvinfer1::IPlugin*)Inception3_conv_priorbox2_layer.get();
	}
	else if(!strcmp(layerName,"Inception3/conv/priorbox3"))
	{
		assert(Inception3_conv_priorbox3_layer.get() == nullptr);
		Inception3_conv_priorbox3_layer = std::unique_ptr<PriorBoxPlugin>(new PriorBoxPlugin(seriaData,serialLength));

		return (nvinfer1::IPlugin*)Inception3_conv_priorbox3_layer.get();
	}
	else if(!strcmp(layerName,"conv6/priorbox"))
	{
		assert(conv6_priorbox_layer.get() == nullptr);
		conv6_priorbox_layer = std::unique_ptr<PriorBoxPlugin>(new PriorBoxPlugin(seriaData,serialLength));

		return (nvinfer1::IPlugin*)conv6_priorbox_layer.get();
	}
	else if(!strcmp(layerName,"conv7/priorbox"))
	{
		assert(conv7_priorbox_layer.get() == nullptr);
		conv7_priorbox_layer = std::unique_ptr<PriorBoxPlugin>(new PriorBoxPlugin(seriaData,serialLength));

		return (nvinfer1::IPlugin*)conv7_priorbox_layer.get();
	}
	else if(!strcmp(layerName,"detection_out"))
	{
		assert(mDetection_out.get() == nullptr);
		mDetection_out = std::unique_ptr<nvinfer1::plugin::INvPlugin, decltype(pluginDeleter)>(plugin::createSSDDetectionOutputPlugin(seriaData,serialLength),pluginDeleter);

		return mDetection_out.get();
	}
	else
	{
		std::cout << "the layer is not exit:" << layerName << std::endl;
		assert(0);
		return nullptr;
	}
}

bool PluginFactory::isPluginExt(const char* name)
{
	return (!strcmp(name,"Inception3/conv/priorbox1")
			||!strcmp(name,"Inception3/conv/priorbox2")
			||!strcmp(name,"Inception3/conv/priorbox3")
			||!strcmp(name,"conv6/priorbox")
			||!strcmp(name,"conv7/priorbox")
			||!strcmp(name,"detection_out"));
}

void PluginFactory::destroyPlugin()
{
std::cout << "==========destroyPlugin=========:" << std::endl;
	Inception3_conv_priorbox1_layer.release();
	Inception3_conv_priorbox1_layer = nullptr;
	Inception3_conv_priorbox2_layer.release();
	Inception3_conv_priorbox2_layer = nullptr;
	Inception3_conv_priorbox3_layer.release();
	Inception3_conv_priorbox3_layer = nullptr;
	conv6_priorbox_layer.release();
	conv6_priorbox_layer = nullptr;
	conv7_priorbox_layer.release();
	conv7_priorbox_layer = nullptr;
	mDetection_out.release();
	mDetection_out = nullptr;
}

by the way ,we aren’t able to run your sampleSSD successfully !!

Seems like the README.md within sampleSSD is not documented the steps well, can you follow the following one to try it again?

# NVIDIA TensorRT Sample "sampleSSD"

This example is based on the following paper, SSD: Single Shot MultiBox 
Detector (https://arxiv.org/abs/1512.02325). The SSD network performs the 
task of object detection and localization in a single forward pass of the 
network. This network is trained on VGG network using PASCAL VOC 2007+ 2012 
datasets. This sample uses the dataset from here: https://github.com/weiliu89/caffe/tree/ssd

## How to get caffe model

* Download models_VGGNet_VOC0712_SSD_300x300.tar.gz using 
the link provided by the author of SSD: https://drive.google.com/file/d/0BzKzrI_SkD1_WVVTSmQxU0dVRzA/view
* Extract the contents. tar xvf models_VGGNet_VOC0712_SSD_300x300.tar.gz
* MD5 hash commands:
  md5sum models_VGGNet_VOC0712_SSD_300x300.tar.gz
  Expected MD5 hash:
  9a795fc161fff2e8f3aed07f4d488faf  models_VGGNet_VOC0712_SSD_300x300.tar.gz
* Edit deploy.prototxt and change all the “Flatten” layers to “Reshape” operations, with the following parameters
  reshape_param {
    shape {
      dim: 0
      dim: -1
      dim: 1
      dim: 1
  }
* Update the detection_out layer to add the keep_count output as expected by the TensorRT DetectionOutput Plugin. 
  top: "keep_count"
* Rename the updated deploy.prototxt to ssd.prototxt and move to data directory
  mv ssd.prototxt <TensorRT_Install_Directory>/data/ssd
* Move the caffemodel file to the data directory. 
  mv VGG_VOC0712_SSD_300x300_iter_120000.caffemodel <TensorRT_Install_Directory>/data/ssd

## TensorRT Plugin layers in SSD

SSD has 3 plugin layers. They are Normalize, PriorBox and DetectionOutput. 
You can check ssd.prototxt and modify the plugin layer parameters similar 
to other caffe layers. The details about each layer and its parameters is 
shown below in caffe.proto format.

~~~~
message LayerParameter {
  optional DetectionOutputParameter detection_output_param = 881;
  optional NormalizeParameter norm_param = 882;
  optional PriorBoxParameter prior_box_param ==883;
}

// Message that stores parameters used by Normalize layer
NormalizeParameter {
  optional bool across_spatial = 1 [default = true];
  // Initial value of scale. Default is 1.0
  optional FillerParameter scale_filler = 2;
  // Whether or not scale parameters are shared across channels.
  optional bool channel_shared = 3 [default = true];
  // Epsilon for not dividing by zero while normalizing variance
  optional float eps = 4 [default = 1e-10];
}

// Message that stores parameters used by PriorBoxLayer
message PriorBoxParameter {
  // Encode/decode type.
  enum CodeType {
    CORNER = 1;
    CENTER_SIZE = 2;
    CORNER_SIZE = 3;
  }
  // Minimum box size (in pixels). Required!
  repeated float min_size = 1;
  // Maximum box size (in pixels). Required!
  repeated float max_size = 2;
  // Various aspect ratios. Duplicate ratios will be ignored.
  // If none is provided, we use default ratio 1.
  repeated float aspect_ratio = 3;
  // If true, will flip each aspect ratio.
  // For example, if there is aspect ratio "r",
  // we will generate aspect ratio "1.0/r" as well.
  optional bool flip = 4 [default = true];
  // If true, will clip the prior so that it is within [0, 1]
  optional bool clip = 5 [default = false];
  // Variance for adjusting the prior bboxes.
  repeated float variance = 6;
  // By default, we calculate img_height, img_width, step_x, step_y based on
  // bottom[0] (feat) and bottom[1] (img). Unless these values are explicitly
  // provided.
  // Explicitly provide the img_size.
  optional uint32 img_size = 7;
  // Either img_size or img_h/img_w should be specified; not both.
  optional uint32 img_h = 8;
  optional uint32 img_w = 9;

  // Explicitly provide the step size.
  optional float step = 10;
  // Either step or step_h/step_w should be specified; not both.
  optional float step_h = 11;
  optional float step_w = 12;

  // Offset to the top left corner of each cell.
  optional float offset = 13 [default = 0.5];
}

message NonMaximumSuppressionParameter {
  // Threshold to be used in NMS.
  optional float nms_threshold = 1 [default = 0.3];
  // Maximum number of results to be kept.
  optional int32 top_k = 2;
  // Parameter for adaptive NMS.
  optional float eta = 3 [default = 1.0];
}

// Message that stores parameters used by DetectionOutputLayer
message DetectionOutputParameter {
  // Number of classes to be predicted. Required!
  optional uint32 num_classes = 1;
  // If true, bounding box are shared among different classes.
  optional bool share_location = 2 [default = true];
  // Background label id. If there is no background class,
  // set it as -1.
  optional int32 background_label_id = 3 [default = 0];
  // Parameters used for NMS.
  optional NonMaximumSuppressionParameter nms_param = 4;

  // Type of coding method for bbox.
  optional PriorBoxParameter.CodeType code_type = 5 [default = CORNER];
  // If true, variance is encoded in target; otherwise we need to adjust the
  // predicted offset accordingly.
  optional bool variance_encoded_in_target = 6 [default = false];
  // Number of total bboxes to be kept per image after nms step.
  // -1 means keeping all bboxes after nms step.
  optional int32 keep_top_k = 7 [default = -1];
  // Only consider detections whose confidences are larger than a threshold.
  // If not provided, consider all boxes.
  optional float confidence_threshold = 8;
  // If true, visualize the detection results.
  optional bool visualize = 9 [default = false];
  // The threshold used to visualize the detection results.
}

~~~~

## Generate INT8 calibration batches

* Install Pillow
    - For Python2 run, `python2 -m pip install Pillow`
    - For Python3 run, `python3 -m pip install Pillow`
* Run `prepareINT8CalibrationBatches.sh` to generate INT8 bacthes. It select 500 
random JPEG images from PASCAL VOC dataset and convert to PPM images. These 500 
PPM images is used to generate INT8 calibration batches. 
* Please keep the batch files at <TensorRT_Install_Directory>/data/ssd/batches 
directory.
* If you want to use a different dataset to generate INT8 batches, please use 
batchPrepare.py and place the batch files in <TensorRT_Install_Directory>/data/ssd/batches directory.

## Usage

This sample can be run as:

    ./sample_ssd [-h] [--mode FP32/FP16/INT8]

Hello,
First of all ,since our pribox layer of caffe network exits some parameters which no defined in the struct of PriorBoxParameters, so that we have created the IPluginExt class and IPluginFactory class to implement it by myself .rather than use the api provided by the class of INvPlugin(createSSDPriorBoxPlugin) .however, when using the caffe parser to parse network,the implementation of the plugin layer we created resulted in the following problems:

sample_face_detection: nvPluginsLegacy.cpp:1068: virtual nvinfer1::Dims nvinfer1::plugin::ConcatLegacy::getOutputDimensions(int, const nvinfer1::Dims*, int):
Assertion `nbInputDims >= 1' failed.
Aborted (core dumped)
caused by the lack of experience, we don't know whether the function we created in the IPluginExt and IPluginFactory class has problems or not ,The code and network have been attached in the previous reply,I hope you can give me some detailed guidance, thank you!

 首先,由于我们的网络中pribox layer层存在PriorBoxParameters结构体中未定义的其他参数,所以我们并没有使用INvPlugin中提供的API去实现pribox 层,而是自己构造了ext类去实现custom layer,但是,实现后的plugin层在利用parser解析的时候出现了以下问题,该问题,由于经验不足,不知道我们定义的类中的函数是否有问题,部分代码已经在前面的回复中附上了,希望您能给与详细的指导,谢谢

Hello,

You error happened to legacy concat IPlugin, other than Priorbox.

sample_face_detection: nvPluginsLegacy.cpp:1068: virtual nvinfer1::Dims nvinfer1::plugin::<b>ConcatLegacy</b>::getOutputDimensions(int, const nvinfer1::Dims*, int): Assertion `nbInputDims >= 1' failed.
  1. You should NOT use the legacy concat IPlugin as TRT supports concat domestically in different axis from TRT 5.0.
  2. The assertion means concat input is less than 2, so it’s not a valid case. You should check if the concat layer contains enough bottom layer.
layer {
  name: "mbox_priorbox"
  type: "Concat"
  bottom: "conv4_3_norm_mbox_priorbox"
  bottom: "fc7_mbox_priorbox"
  bottom: "conv6_2_mbox_priorbox"
  bottom: "conv7_2_mbox_priorbox"
  bottom: "conv8_2_mbox_priorbox"
  bottom: "conv9_2_mbox_priorbox"
  top: "mbox_priorbox"
  concat_param {
    axis: 2
  }
}

Hello,thanks for your reply,

  when i parse the caffe model, it always show the errors like that: <b>all concat input tensors must have the same dimensions exception on the concatenation axis</b>. according the prototxt file,the input tensors are all the priorbox layer created by myself and i am sure that the output dimensions are (1,2,86016)(1,2,1024)(1,2,512),which meet the requirements of error log, however, it doesn't work.
   <b>It is worth nothing that When I change the output dimension to the same value include the dimensions exception on the concatenation axis ,it parse succ!!!</b>
   how can it be? wether the trt 5.0 can only support exactly the same dimensions and don't allow the different value on the concatenation axis or not? If sure ,how can i do to solve it ?
layer {
  name: "mbox_priorbox"
  type: "Concat"
  bottom: "inception3_priorbox"   <b>#--priorbox layer,dims(1,2,86016)</b>
  bottom: "conv3_2_priorbox"     <b> #--priorbox layer,dims(1,2,1024)</b>
  bottom: "conv4_2_priorbox"      <b>#--priorbox layer,dims(1,2,512)</b>
  top: "mbox_priorbox"
  concat_param {
    axis: 2
  }
}

Hi,

I got your point.
As the error mesg indicates, we can’t concat the case that input dims are not the same for non-axis dims,
so the suggestion would be,

  1. change the dim order to make the dims of non-axis the same,
--priorbox layer,dims(1,2,86016)   ==> dims(2,86016,1)
--priorbox layer,dims(1,2,1024)    ==> dims(2,1024,1)
--priorbox layer,dims(1,2,512)     ==> dims(2,512,1)

or 2. adjust the layer parameter ‘axis’ to 3

Hello,
the problem raised above have been solved, but a new problem arose when the cuda engine was created,across the error log, I don’t know what “formats” means and what format should get ?
the error log is:

1_sample_face_detection: ../builder/cudnnBuilder2.cpp:834: virtual std::vector<nvinfer1::query::RequirementsCombination> 
 nvinfer1::builder::EngineTacticSupply::getSupportedFormats(const nvinfer1::builder::Node&,
 const nvinfer1::query::Ports<nvinfer1::query::AbstractTensor>&): Assertion `!formats.empty()' failed.

Thread 1 "1_sample_face_d" received signal SIGABRT, Aborted.
__GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:51
51	../sysdeps/unix/sysv/linux/raise.c: No such file or directory.

call the stack with the gdb command,the logs shows as follows:

(gdb) where
#0  0x0000007fae5674d8 in __GI_raise (sig=sig@entry=6)
    at ../sysdeps/unix/sysv/linux/raise.c:51
#1  0x0000007fae5688b4 in __GI_abort () at abort.c:79
#2  0x0000007fae560b44 in __assert_fail_base (fmt=0x7fae65c0c0 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n", assertion=assertion@entry=0x7faf48d800 "<b>!formats.empty</b>()", file=file@entry=0x7faf48d7e0 "../builder/cudnnBuilder2.cpp", line=line@entry=834, function=function@entry=0x7faf48c0d0 "virtual std::vector<nvinfer1::query::RequirementsCombination> nvinfer1::builder::EngineTacticSupply::<b>getSupportedFormats</b>(const nvinfer1::builder::Node&, const nvinfer1::query::Ports<nvinfer1::query::A"...) at assert.c:92
#3  0x0000007fae560bc4 in __GI___assert_fail (assertion=0x7faf48d800 "!formats.empty()", file=0x7faf48d7e0 "../builder/cudnnBuilder2.cpp", line=834, function=0x7faf48c0d0 "virtual std::vector<nvinfer1::query::RequirementsCombination> nvinfer1::builder::EngineTacticSupply::<b>getSupportedFormats</b>(const nvinfer1::builder::Node&, const nvinfer1::query::Ports<nvinfer1::query::A"...) at assert.c:101
#4  0x0000007faf146664 in  () at /usr/lib/aarch64-linux-gnu/libnvinfer.so.5
#5  0x0000007faf173168 in nvinfer1::builder::<b>chooseFormatsAndTactics</b>(nvinfer1::builder::Graph&, nvinfer1::builder::TacticSupply&, std::unordered_map<std::string, std::vector<float, std::allocator<float> >, std::hash<std::string>, std::equal_to<std::string>, std::allocator<std::pair<std::string const, std::vector<float, std::allocator<float> > > > >*, bool) ()
    at /usr/lib/aarch64-linux-gnu/libnvinfer.so.5
#6  0x0000007faf14c040 in nvinfer1::builder::<b>makeEngineFromGraph</b>(nvinfer1::CudaE---Type <return> to continue, or q <return> to quit---return
ngineBuildConfig const&, nvinfer1::rt::HardwareContext const&, nvinfer1::builder::Graph&, std::unordered_map<std::string, std::vector<float, std::allocator<float> >, std::hash<std::string>, std::equal_to<std::string>, std::allocator<std::pair<std::string const, std::vector<float, std::allocator<float> > > > >*, int) () at /usr/lib/aarch64-linux-gnu/libnvinfer.so.5
#7  0x0000007faf14facc in nvinfer1::builder::<b>buildEngine</b>(nvinfer1::CudaEngineBuildConfig&, nvinfer1::rt::HardwareContext const&, nvinfer1::Network const&) ()
    at /usr/lib/aarch64-linux-gnu/libnvinfer.so.5
#8  0x0000007faf1ba2ec in nvinfer1::builder::Builder::<b>buildCudaEngine</b>(nvinfer1::INetworkDefinition&) () at /usr/lib/aarch64-linux-gnu/libnvinfer.so.5
#9  0x000000555555c3b8 in <b>caffeToGIEModel</b>(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&, unsigned int, nvcaffeparser1::IPluginFactoryExt*, nvinfer1::IHostMemory*&) ()
#10 0x0000005555562334 in <b>main</b> ()

how can I do to solve this problem?
thank you for your time!!

How do you implement the following API? You can refer to the following sample,

@samplePlugin/samplePlugin.cpp
    bool supportsFormat(DataType type, PluginFormat format) const override 
    { 
         return (type == DataType::kFLOAT || type == DataType::kHALF) && format == PluginFormat::kNCHW; 
    }

Hello,
I have already implemented the API,exactly the same as you said above.
by the way,when is this API called and whether the return value of this function should normally be 1,for the reason that i found it maybe 0 sometimes.

Yes, it will be invoked for several times and return false sometimes, but at lease be true for once.
The purpose of this process is to figure out which kind combination (of data type and format) your plugin can support.

Is it working now with this API?

Hello,
sorry to explain clearly,I mean that this api is already implemented before you said ,and it doesn’t work at all.

Okay, seems like it’s not related to plugin layer from what you described (sometimes it will return true). So, we will need to figure out which kind of layer TRT can’t find supported format for it.
We can’t get the layer info from the limited mesg. Can we reproduce this issue locally, so that we can gdb it and figure out what’s wrong here.

Hello,
This problem doesn’t happen again when i use the FP16 mode to parse the network,although don’t know why.now I want to know parameters about the API of enqueue(int batchSize, const voidconst inputs, void* outputs, void workspace,cudaStream_t stream) .

In this function,whether the pointer of outputs has allocated memory space or not before entering?if so, how to defined the size of memory space? if not,how should i assign the results of calculations performed in the layer of priorbox plugin layer to the pointer of outputs?

when I execute the follows API created by myself,it always shows the Segmentation fault (core dumped).
this is part of my API,please ignore the details of the assignment of top_data,the problem occurs in the blod black part.

int PriorBoxPlugin::enqueue(int batchSize, const void*const *inputs, void** outputs, void* workspace,cudaStream_t stream)
{
	float step_h,step_w;
	if (mPriorBoxParamters.stepH == 0 ||mPriorBoxParamters.stepW == 0)
	{
		step_w = mImgWidth/mLayerWidth;
		step_h = mImgHeight/mLayerHeight;
	}
	else
	{
		step_w = mPriorBoxParamters.stepW;
		step_h = mPriorBoxParamters.stepH;
	}

	int dim = mLayerWidth * mLayerHeight *mPriorBoxParamters.numPriors * 4;
	float top_data[2][dim];	
	int idx = 0;
	// set the data.
	int count =0;
	for (int h = 0; h < mLayerHeight; ++h) {
		for (int w = 0; w < mLayerWidth; ++w) {
			for (int i = 0; i < mPriorBoxParamters.numPriors; ++i) {
				for (int j = 0; j < 4; ++j) {
					top_data[0][count] = 1.0f;
					++count;
				}
			}
		}
	}
	

	// set the variance.
	int count =0;
	for (int h = 0; h < mLayerHeight; ++h) {
		for (int w = 0; w < mLayerWidth; ++w) {
			for (int i = 0; i < mPriorBoxParamters.numPriors; ++i) {
				for (int j = 0; j < 4; ++j) {
					top_data[1][count] = 2.0f;
					++count;
				}
			}
		}
	}
	std::cout << "enqueue::memcpy start" <<std::endl;
	//outputs = (void**)malloc(2*sizeof(float*));
	[b]for(int j=0;j<2;j++)
	{
		outputs[j] = (void*)malloc(dim*sizeof(float));
	}
	assert((dim*sizeof(float))==(sizeof(top_data[0])));

	for(int i=0;i<2;i++)
	{
		memcpy(outputs[i],( const void *)top_data[i],sizeof(top_data[i]));
	}
[/b]


	std::cout << "enqueue::end" <<std::endl;
	return 0;
}

Hi,

The output memory has been allocated by TRT internally, and the size is the volume of your output dimensions, so you just need to compute based on the input buffer and place the result in the output buffer.

Hi, J-Penny
In your priorbox implementation code:

mLayerWidth = inputDims[0].d[1];
mLayerHeight = inputDims[0].d[2];
if (mPriorBoxParamters.imgH == 0 || mPriorBoxParamters.imgW == 0)
{
	mImgWidth = inputDims[1].d[1];
	mImgHeight =  inputDims[1].d[2];
}
else
{
	mImgWidth = mPriorBoxParamters.imgW;
	mImgHeight =  mPriorBoxParamters.imgH;

}

Dims format should be CHW, so
mLayerHeight = inputDims[0].d[1];
mLayerWidth = inputDims[0].d[2];