Hi,I want to implement depthwise seperable convolution in tensorrt2.1,I just follow the samplePlugin, and the nbWeights parameter is 0,and no weight is passed in the createPlugin Function。
my code as follow:
nvinfer1::IPlugin* PluginFactory::createPlugin(const char* layerName, const nvinfer1::Weights* weights, int nbWeights){
assert(isPlugin(layerName));
LayerType layerType = getLayerType(layerName);
switch (layerType) {
case DEPTHWISELAYER_S1:
cout <<"layer : "<<layerName<<" weight num "<<nbWeights<<endl;
assert(std::find_if(depLayers.begin(), depLayers.end(), [&](const DepthwiseLayer& r) {return r.first == layerName;}) == depLayers.end());
dLayer = std::unique_ptr<DepthwiseConvLayer>(new DepthwiseConvLayer(weights, nbWeights, 1));
depLayers.push_back(std::make_pair(layerName, std::move(dLayer)));
return dLayer.get();
case DEPTHWISELAYER_S2:
cout <<"layer : "<<layerName<<" weight num "<<nbWeights<<" count :"<<weights[0].count<<endl;
assert(std::find_if(depLayers.begin(), depLayers.end(), [&](const DepthwiseLayer& r) {return r.first == layerName;}) == depLayers.end());
dLayer = std::unique_ptr<DepthwiseConvLayer>(new DepthwiseConvLayer(weights, nbWeights, 2));
depLayers.push_back(std::make_pair(layerName, std::move(dLayer)));
return dLayer.get();
default:
printf("Not supportted layer\n");
assert(0);
}
}
bool PluginFactory::isPlugin(const char *name){
return (!strcmp(name, "preprocess"))
|| (!strcmp(name, "postprocess"))
|| (!strcmp(name, "reorg"))
|| (strstr(name, "depthwise_s1"))
|| (strstr(name, "depthwise_s2"))
|| (!strcmp(name, "region_output"));
}
IPlugin* PluginFactory::createPlugin(const char* layerName, const void *serialData, size_t serialLength) {
assert(isPlugin(layerName));
LayerType layerType = getLayerType(layerName);
switch (layerType) {
case PREPROCESSLAYER:
assert(preprocessLayer.get() == nullptr);
preprocessLayer = std::unique_ptr<PreprocessLayer>(new PreprocessLayer());
return preprocessLayer.get();
break;
case POSTPROCESSLAYER:
assert(postProcessLayer.get() == nullptr);
postProcessLayer = std::unique_ptr<PostProcessLayer>(new PostProcessLayer());
return postProcessLayer.get();
break;
case REORGLAYER:
assert(reorgLayer.get() == nullptr);
reorgLayer = std::unique_ptr<ReorgLayer>(new ReorgLayer());
return reorgLayer.get();
break;
case DEPTHWISELAYER_S1:
case DEPTHWISELAYER_S2:
assert(std::find_if(depLayers.begin(), depLayers.end(), [&](const DepthwiseLayer& r) {return r.first == layerName;}) == depLayers.end());
dLayer = std::unique_ptr<DepthwiseConvLayer>(new DepthwiseConvLayer(serialData, serialLength));
depLayers.push_back(std::make_pair(layerName, std::move(dLayer)));
return dLayer.get();
break;
case OUTPUTLAYER:
assert(outputLayer.get() == nullptr);
outputLayer = std::unique_ptr<OutputLayer>(new OutputLayer());
return outputLayer.get();
default:
printf("Not supportted layer\n");
assert(0);
}
}
the layer configuration in prototxt is
layer {
name: "conv2_1/depthwise_s1"
type: "Convolution"
bottom: "conv1"
top: "conv2_1/dw"
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 32
#num_output: 1
bias_term: false
pad: 1
kernel_size: 3
group: 32
#engine: CAFFE
stride: 1
weight_filler {
type: "msra"
}
}
}