Failure using own ZF faster rcnn model in TensorRT2

Hi,

I tried the provided faster rcnn VGG16 example and it’s working well.
My own ZF caffemodel and definition file works in caffe. But failed when using them in TensorRT.
The definition file is modified according to the provided vgg16 prototxt file. All the parameters at beginning of example code are same except the input_H, input_W, pooled_H, pooled_W and I fixed them to my own.

So

  1. Could you provide your ZF definition prototxt file? I see there is a ZF caffemodel in the provided downloaded model package.

  2. Did anyone success modifying your own faster rcnn definition prototxt file in TensorRT2?

Thank you.

Hi,
Have you changed ZF network?I have the same problem

Hi,
Have you changed ZF network?I have the same problem

Meet the same problem. VGG16 works fine.
ZF crash in

Thread 1 “my_fasterRCNN_d” received signal SIGSEGV, Segmentation fault.
0x00007fffebd995f7 in CaffeWeightFactory::getBlobProtoData(ditcaffe::BlobProto const&, ditcaffe::Type, std::vector<void*, std::allocator<void*> >&) ()
from /usr/local/nvidia/tensorrt/targets/x86_64-linux-gnu/lib/libnvparsers.so.4.0.2

Here is the prototxt file I used:

name: "ZF"

input: "data"
input_shape {
  dim: 2
  dim: 3
  dim: 375
  dim: 500
}

input: "im_info"
input_shape {
  dim: 2
  dim: 1
  dim: 1
  dim: 3
}

#========= conv1-conv5 ============

layer {
	name: "conv1"
	type: "Convolution"
	bottom: "data"
	top: "conv1"
	convolution_param {
		num_output: 96
		kernel_size: 7
		pad: 3
		stride: 2
	}
}
layer {
	name: "relu1"
	type: "ReLU"
	bottom: "conv1"
	top: "conv1"
}
layer {
	name: "norm1"
	type: "LRN"
	bottom: "conv1"
	top: "norm1"
	lrn_param {
		local_size: 3
		alpha: 0.00005
		beta: 0.75
		norm_region: WITHIN_CHANNEL
    engine: CAFFE
	}
}
layer {
	name: "pool1"
	type: "Pooling"
	bottom: "norm1"
	top: "pool1"
	pooling_param {
		kernel_size: 3
		stride: 2
		pad: 1
		pool: MAX
	}
}
layer {
	name: "conv2"
	type: "Convolution"
	bottom: "pool1"
	top: "conv2"
	convolution_param {
		num_output: 256
		kernel_size: 5
		pad: 2
		stride: 2
	}
}
layer {
	name: "relu2"
	type: "ReLU"
	bottom: "conv2"
	top: "conv2"
}
layer {
	name: "norm2"
	type: "LRN"
	bottom: "conv2"
	top: "norm2"
	lrn_param {
		local_size: 3
		alpha: 0.00005
		beta: 0.75
		norm_region: WITHIN_CHANNEL
    engine: CAFFE
	}
}
layer {
	name: "pool2"
	type: "Pooling"
	bottom: "norm2"
	top: "pool2"
	pooling_param {
		kernel_size: 3
		stride: 2
		pad: 1
		pool: MAX
	}
}
layer {
	name: "conv3"
	type: "Convolution"
	bottom: "pool2"
	top: "conv3"
	convolution_param {
		num_output: 384
		kernel_size: 3
		pad: 1
		stride: 1
	}
}
layer {
	name: "relu3"
	type: "ReLU"
	bottom: "conv3"
	top: "conv3"
}
layer {
	name: "conv4"
	type: "Convolution"
	bottom: "conv3"
	top: "conv4"
	convolution_param {
		num_output: 384
		kernel_size: 3
		pad: 1
		stride: 1
	}
}
layer {
	name: "relu4"
	type: "ReLU"
	bottom: "conv4"
	top: "conv4"
}
layer {
	name: "conv5"
	type: "Convolution"
	bottom: "conv4"
	top: "conv5"
	convolution_param {
		num_output: 256
		kernel_size: 3
		pad: 1
		stride: 1
	}
}
layer {
	name: "relu5"
	type: "ReLU"
	bottom: "conv5"
	top: "conv5"
}

#========= RPN ============

layer {
  name: "rpn_conv/3x3"
  type: "Convolution"
  bottom: "conv5"
  top: "rpn/output"
  convolution_param {
    num_output: 256
    kernel_size: 3 pad: 1 stride: 1
  }
}
layer {
  name: "rpn_relu/3x3"
  type: "ReLU"
  bottom: "rpn/output"
  top: "rpn/output"
}

layer {
  name: "rpn_cls_score"
  type: "Convolution"
  bottom: "rpn/output"
  top: "rpn_cls_score"
  convolution_param {
    num_output: 18   # 2(bg/fg) * 9(anchors)
    kernel_size: 1 pad: 0 stride: 1
  }
}
layer {
  name: "rpn_bbox_pred"
  type: "Convolution"
  bottom: "rpn/output"
  top: "rpn_bbox_pred"
  convolution_param {
    num_output: 36   # 4 * 9(anchors)
    kernel_size: 1 pad: 0 stride: 1
  }
}
layer {
   bottom: "rpn_cls_score"
   top: "rpn_cls_score_reshape"
   name: "ReshapeCTo2"
   type: "IPlugin"
}

#========= RoI Proposal ============

layer {
  name: "rpn_cls_prob"
  type: "Softmax"
  bottom: "rpn_cls_score_reshape"
  top: "rpn_cls_prob"
}
layer {
  name: 'ReshapeCTo18'
  type: 'IPlugin'
  bottom: 'rpn_cls_prob'
  top: 'rpn_cls_prob_reshape'
}
layer {
  name: 'RPROIFused'
  type: 'IPlugin'
  bottom: 'rpn_cls_prob_reshape'
  bottom: 'rpn_bbox_pred'
  bottom: 'conv5'
  bottom: 'im_info'
  top: 'rois'
  top: 'roi_pool_conv5'
}
layer {
  name: "fc6"
  type: "InnerProduct"
  bottom: "roi_pool_conv5"
  top: "fc6"
  inner_product_param {
    num_output: 4096
  }
}
layer {
  name: "relu6"
  type: "ReLU"
  bottom: "fc6"
  top: "fc6"
}
layer {
  name: "fc7"
  type: "InnerProduct"
  bottom: "fc6"
  top: "fc7"
  inner_product_param {
    num_output: 4096
  }
}
layer {
  name: "relu7"
  type: "ReLU"
  bottom: "fc7"
  top: "fc7"
}
layer {
  name: "cls_score"
  type: "InnerProduct"
  bottom: "fc7"
  top: "cls_score"
  inner_product_param {
    num_output: 21
  }
}
layer {
  name: "bbox_pred"
  type: "InnerProduct"
  bottom: "fc7"
  top: "bbox_pred"
  inner_product_param {
    num_output: 84
  }
}
layer {
  name: "cls_prob"
  type: "Softmax"
  bottom: "cls_score"
  top: "cls_prob"
}

Have anyone got the results?
Did the Faster-rcnn worked in tensorRT, jetson-tx2 with 30 FPS?