When I implementd the leakRelu layer by the IPlugin layer on the TX2 and called caffeParser, it occurs 'assertion ‘mLeakyReluLayer.get()==nullptr’failed ’, I want to ask if it’s because I’ve called this leakyRelu IPlugin layer many times .here is the code that I create leakyRelu IPlugin.
#include "pluginImplement.h"
#include "leakyReluLayer.h"
#include "cudaUtility.h"
#include <fstream>
#include <sstream>
#include <iostream>
using namespace std;
/******************************/
// PluginFactory
/******************************/
nvinfer1::IPlugin* PluginFactory::createPlugin(const char* layerName, const nvinfer1::Weights* weights, int nbWeights)
{
assert(isPlugin(layerName));
if(!strncmp(layerName,"leakyRelu",9))
{
assert(mLeakyReluLayer.get()==nullptr);
mLeakyReluLayer = std::unique_ptr<LeakyReluLayer>(new LeakyReluLayer());
return mLeakyReluLayer.get();
}
else
{
assert(0);
return nullptr;
}
}
IPlugin* PluginFactory::createPlugin(const char* layerName, const void* serialData, size_t serialLength)
{
assert(isPlugin(layerName));
if(!strncmp(layerName,"leakyRelu",9))
{
assert(mLeakyReluLayer.get()==nullptr);
mLeakyReluLayer = std::unique_ptr<LeakyReluLayer>(new LeakyReluLayer(serialData,serialLength));
return mLeakyReluLayer.get();
}
else
{
assert(0);
return nullptr;
}
}
bool PluginFactory::isPlugin(const char* name)
{
return (!strncmp(name,"leakyRelu",9));
}
void PluginFactory::destroyPlugin()
{
mLeakyReluLayer.release();
mLeakyReluLayer = nullptr;
}
/******************************/
// LeakyRelu Plugin Layer
/******************************/
LeakyReluLayer::LeakyReluLayer(const void* buffer,size_t size)
{
assert(size==(7*sizeof(int)));
const int* d=reinterpret_cast<const int*>(buffer);
dimsData = DimsCHW{d[0],d[1],d[2]};
dimsLeakyRelu = DimsCHW{d[3],d[4],d[5]};
negative_slope = d[6];
}
Dims LeakyReluLayer::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
assert(nbInputDims==1);
return DimsCHW(inputs[0].d[0],inputs[0].d[1],inputs[0].d[2]);
}
int LeakyReluLayer::initialize()
{
return 0;
}
int LeakyReluLayer::enqueue(int batchsize, const void*const *inputs, void** outputs, void*, cudaStream_t stream)
{
CHECK(cudaThreadSynchronize());
int width = dimsData.w();
int height = dimsData.h();
int channels = dimsData.c();
leakyReluLayer((float*)inputs[0], width, height, channels, batchsize, (float*) outputs[0], negative_slope,stream);
return 0;
}
void LeakyReluLayer::configure(const Dims* inputs,int, const Dims* outputs,int ,int)
{
dimsData = DimsCHW{inputs[0].d[0],inputs[0].d[1],inputs[0].d[2]};
dimsLeakyRelu = DimsCHW{inputs[0].d[0],inputs[0].d[1],inputs[0].d[2]};
negative_slope = 0.1;
}
void LeakyReluLayer::serialize(void* buffer)
{
int *d = reinterpret_cast<int*>(buffer);
d[0] = dimsData.c();
d[1] = dimsData.h();
d[2] = dimsData.w();
d[3] = dimsData.c();
d[4] = dimsData.h();
d[5] = dimsData.w();
d[6] = negative_slope;
}
size_t LeakyReluLayer::getSerializationSize()
{
return 7*sizeof(int);
}