Dynamic Onnx to trt: Misc Error in allocateResources: 2

Description

I wrote a DeformableConNetPlugin inherits IPluginV2DynamicExt then use onnx-tensorrt to convert dla34.onnx to .trt.
The onnx parser can parse dla34.onnx without problem but when it starts to build engine, it ocurrs :

Building TensorRT engine, FP16 available:0
    Max batch size:     32
    Max workspace size: 1024 MiB

[2020-10-28 09:55:40   ERROR] ../rtSafe/cuda/cudaPluginV2Runner.cpp (28) - Misc Error in allocateResources: 2
[2020-10-28 09:55:40   ERROR] ../rtSafe/cuda/cudaPluginV2Runner.cpp (28) - Misc Error in allocateResources: 2
terminate called after throwing an instance of 'std::runtime_error'

I’m not clear where the problem is.

Environment

TensorRT Version: TensorRT-7.0.0.11
GPU Type: 1080ti
Nvidia Driver Version: 440.36
CUDA Version: 10.2
CUDNN Version: 7.6.5

Relevant Files

I add dcnv2 op to onnx-tensorrt and use dynamic way to config builder:

   auto trt_engine = common::infer_object(trt_builder->buildEngineWithConfig(*trt_network.get(),*config));

Here are some relevent files:

DEFINE_BUILTIN_OP_IMPORTER(dcn_v2)
{
    ASSERT(inputs.at(0).is_tensor(), ErrorCode::kUNSUPPORTED_NODE); // input
    ASSERT(inputs.at(1).is_tensor(), ErrorCode::kUNSUPPORTED_NODE); // offset
    ASSERT(inputs.at(2).is_tensor(), ErrorCode::kUNSUPPORTED_NODE); // mask
    ASSERT(inputs.at(3).is_weights(), ErrorCode::kUNSUPPORTED_NODE); // weight

    std::vector<nvinfer1::ITensor*> tensors;
    for (auto& input : inputs)
    {
        if(input.is_tensor())
            tensors.push_back(&input.tensor());
    }

    auto kernel_weights = inputs.at(3).weights();
    nvinfer1::Weights bias_weights;
    if( inputs.size() == 5 ) {
        ASSERT(inputs.at(4).is_weights(), ErrorCode::kUNSUPPORTED_NODE);
        auto shaped_bias_weights = inputs.at(4).weights();
        ASSERT(shaped_bias_weights.shape.nbDims == 1, ErrorCode::kINVALID_NODE);
        ASSERT(shaped_bias_weights.shape.d[0] == kernel_weights.shape.d[0], ErrorCode::kINVALID_NODE);
  
        bias_weights = shaped_bias_weights;
    } else {
        bias_weights = ShapedWeights::empty(kernel_weights.type);
    }

    int out_channel,in_channel,kernel_H,kernel_W,deformable_group,dilation,groups,padding,stride;
    out_channel = kernel_weights.shape.d[0];
    in_channel = kernel_weights.shape.d[1];
    kernel_H = kernel_weights.shape.d[2];
    kernel_W = kernel_weights.shape.d[3];

    OnnxAttrs attrs(node, ctx);
    deformable_group = attrs.get("deformable_group", 1);
    dilation = attrs.get("dilation", 1);
    groups = attrs.get("groups", 1);
    padding = attrs.get("padding", 1);
    stride = attrs.get("stride", 1);

    // Populate instanceNormalization plugin properties.
    const std::string pluginName = "DeformableConNet_TRT";
    const std::string pluginVersion = "001";
    std::vector<nvinfer1::PluginField> f;

    f.emplace_back("in_channel", &in_channel, nvinfer1::PluginFieldType::kINT32, 1);
    f.emplace_back("out_channel", &out_channel, nvinfer1::PluginFieldType::kINT32, 1);
    f.emplace_back("kernel_H", &kernel_H, nvinfer1::PluginFieldType::kINT32, 1);
    f.emplace_back("kernel_W", &kernel_W, nvinfer1::PluginFieldType::kINT32, 1);
    f.emplace_back("deformable_group", &deformable_group, nvinfer1::PluginFieldType::kINT32, 1);
    f.emplace_back("dilation", &dilation, nvinfer1::PluginFieldType::kINT32, 1);
    f.emplace_back("groups", &groups, nvinfer1::PluginFieldType::kINT32, 1);
    f.emplace_back("padding", &padding, nvinfer1::PluginFieldType::kINT32, 1);
    f.emplace_back("stride", &stride, nvinfer1::PluginFieldType::kINT32, 1);

    f.emplace_back("h_weight", kernel_weights.values, nvinfer1::PluginFieldType::kFLOAT32, kernel_weights.count());
    f.emplace_back("h_bias", bias_weights.values, nvinfer1::PluginFieldType::kFLOAT32, bias_weights.count);

    // Create plugin from registry
    nvinfer1::IPluginV2* plugin = importPluginFromRegistry(ctx, pluginName, pluginVersion, node.name(), f);

    ASSERT(plugin != nullptr && "dcn_v2 plugin was not found in the plugin registry!",
        ErrorCode::kUNSUPPORTED_NODE);

    RETURN_FIRST_OUTPUT(ctx->network()->addPluginV2(tensors.data(), tensors.size(), *plugin));
}

And ‘getOutputDimensions’ from IPluginV2DynamicExt:

class DeformableConNetPlugin final : public nvinfer1::IPluginV2DynamicExt
{}
...
DimsExprs DeformableConNetPlugin::getOutputDimensions(
    int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder)
{
        assert(nbInputs == 3);
    assert(inputs[0].nbDims == 4);
    assert(inputs[1].nbDims == 4);
    assert(inputs[2].nbDims == 4);
    assert(outputIndex == 0);

    nvinfer1::DimsExprs ret;
    ret.nbDims = 4;
    ret.d[0] = inputs[0].d[0];
    ret.d[1] = exprBuilder.constant(_out_channel);

    ret.d[2] = inputs[1].d[2];
    ret.d[3] = inputs[1].d[3];
}

Any suggestions?

Hi @guoyazong345,
Can you please share your onnx model so that we can try reproducing the issue at our end?

Thanks!

Thanks, this is my .onnx:
https://drive.google.com/file/d/1AmDrxt-SKuw8KwVo37h35Po85tRpHcGg/view?usp=sharing
And the code about this plugin is :

#include <stdexcept>
#include "deformableConNetPlugin.h"
#include "dcn_v2_im2col_cuda.h"

using namespace nvinfer1;
using nvinfer1::plugin::DeformableConNetPlugin;
using nvinfer1::plugin::DeformableConNetPluginCreator;

#define CHECK_CUDA(call)                                                                                               \
    do                                                                                                                 \
    {                                                                                                                  \
        cudaError_t status = call;                                                                                     \
        if (status != cudaSuccess)                                                                                     \
        {                                                                                                              \
            return status;                                                                                             \
        }                                                                                                              \
    } while (0)

cublasHandle_t blas_handle()
{
    static int init[16] = {0};
    static cublasHandle_t handle[16];
    int n = 0;
    cudaError_t status = cudaGetDevice(&n);
    if(!init[n]) {
        cublasCreate(&handle[n]);
        init[n] = 1;
    }
    return handle[n];
}

namespace {
    constexpr const char* INSTANCE_PLUGIN_VERSION{"001"};
    constexpr const char* INSTANCE_PLUGIN_NAME{"DeformableConNet_TRT"};
}

PluginFieldCollection DeformableConNetPluginCreator::mFC{};
std::vector<PluginField> DeformableConNetPluginCreator::mPluginAttributes;

DeformableConNetPlugin::DeformableConNetPlugin( int in_channel,
                                                int out_channel,
                                                int kernel_H,
                                                int kernel_W,
                                                int deformable_group,
                                                int dilation,
                                                int groups,
                                                int padding,
                                                int stride,
                                                const std::vector<float>& h_weight,
                                                const std::vector<float>& h_bias)
    :_in_channel(in_channel), _out_channel(out_channel),_kernel_H(kernel_H),_kernel_W(kernel_W),_deformable_group(deformable_group),
    _dilation(dilation),_groups(groups),_padding(padding),_stride(stride), _initialized(false), _h_weight(h_weight), _h_bias(h_bias)
{}


DeformableConNetPlugin::DeformableConNetPlugin( int in_channel,
                                                int out_channel,
                                                int kernel_H,
                                                int kernel_W,
                                                int deformable_group,
                                                int dilation,
                                                int groups,
                                                int padding,
                                                int stride,
                                                nvinfer1::Weights const& weight,
                                                nvinfer1::Weights const& bias)
    :_in_channel(in_channel), _out_channel(out_channel),_kernel_H(kernel_H),_kernel_W(kernel_W),_deformable_group(deformable_group),
    _dilation(dilation),_groups(groups),_padding(padding),_stride(stride), _initialized(false)
{
    if (weight.type == nvinfer1::DataType::kFLOAT){

        _h_weight.assign((float*)weight.values,(float*)weight.values+weight.count);
    } else {

         throw std::runtime_error("Unsupported weight dtype");
    }

    if (bias.type == nvinfer1::DataType::kFLOAT){

        _h_bias.assign((float*)bias.values,(float*)bias.values+bias.count);
    } else { 

        throw std::runtime_error("Unsupported bias dtype");
    }
    // initialize();
}

DeformableConNetPlugin::DeformableConNetPlugin(void const* serialData, size_t serialLength) : _initialized(false)
{
    deserialize_value(&serialData, &serialLength, &_in_channel);
    deserialize_value(&serialData, &serialLength, &_out_channel);
    deserialize_value(&serialData, &serialLength, &_kernel_H);
    deserialize_value(&serialData, &serialLength, &_kernel_W);
    deserialize_value(&serialData, &serialLength, &_deformable_group);
    deserialize_value(&serialData, &serialLength, &_dilation);
    deserialize_value(&serialData, &serialLength, &_groups);
    deserialize_value(&serialData, &serialLength, &_padding);
    deserialize_value(&serialData, &serialLength, &_stride);
    deserialize_value(&serialData, &serialLength, &_h_weight);
    deserialize_value(&serialData, &serialLength, &_h_bias);
}

DeformableConNetPlugin::~DeformableConNetPlugin()
{
    terminate();
}

// DeformableConNetPlugin returns one output.
int DeformableConNetPlugin::getNbOutputs() const
{
    return 1;
}

DimsExprs DeformableConNetPlugin::getOutputDimensions(
    int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder)
{
    assert(nbInputs == 3);
    assert(inputs[0].nbDims == 4);
    assert(inputs[1].nbDims == 4);
    assert(inputs[2].nbDims == 4);
    assert(outputIndex == 0);

    nvinfer1::DimsExprs ret;
    ret.nbDims = 4;
    ret.d[0] = inputs[0].d[0];
    ret.d[1] = exprBuilder.constant(_out_channel);

    ret.d[2] = inputs[1].d[2];
    ret.d[3] = inputs[1].d[3];

    return ret;
}

int DeformableConNetPlugin::initialize()
{
    std::cout << "initialize" << std::endl;
    if(_initialized) 
        return 0;

    // size_t ones_size = _output_dims.d[1]*_output_dims.d[2]* sizeof(float);
    // 先假设一个比较大的空间
    size_t ones_size = 1024*1024*sizeof(float);
    size_t weight_size = _h_weight.size()* sizeof(float);
    size_t bias_size = _h_bias.size()* sizeof(float);

    if(_d_ones == nullptr){
        float *ones_cpu = new float[ones_size/ sizeof(float)];
        for (int i = 0; i < ones_size/ sizeof(float); i++) {
            ones_cpu[i] = 1.0;
        }
        CHECK_CUDA(cudaMalloc((void**)&_d_ones, ones_size));
        CHECK_CUDA(cudaMemcpy(_d_ones, ones_cpu, ones_size, cudaMemcpyHostToDevice));
        delete[] ones_cpu;
    }

    if(_d_columns == nullptr){
        CHECK_CUDA(cudaMalloc((void**)&_d_columns, _in_channel * _kernel_H * _kernel_W * ones_size););
    }
    if(_d_weight == nullptr){
        CHECK_CUDA(cudaMalloc((void**)&_d_weight, weight_size));
        CHECK_CUDA(cudaMemcpy(_d_weight, _h_weight.data(), weight_size, cudaMemcpyHostToDevice));
    }
    if(_d_bias == nullptr){
        CHECK_CUDA(cudaMalloc((void**)&_d_bias, bias_size));
        CHECK_CUDA(cudaMemcpy(_d_bias, _h_bias.data(), bias_size, cudaMemcpyHostToDevice));
    }

    _initialized = true;
    return 0;
}

void DeformableConNetPlugin::terminate()
{
    if (!_initialized)
    {
        return;
    }
    cudaFree(_d_columns);
    cudaFree(_d_bias);
    cudaFree(_d_weight);
    cudaFree(_d_ones);
    _initialized = false;
}

size_t DeformableConNetPlugin::getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs, int nbInputs, const nvinfer1::PluginTensorDesc* outputs, int nbOutputs) const 
{ 
    std::cout << "getWorkspaceSize" << std::endl;
    int sizeof_dtype = 4;

    int batch_size = inputs[0].dims.d[0];
    int nInputPlane = inputs[0].dims.d[1];
    int inputHeight = inputs[0].dims.d[2];
    int inputWidth = inputs[0].dims.d[3];

    int nOutputPlane = outputs[0].dims.d[1];
    int outputHeight = outputs[0].dims.d[2];
    int outputWidth = outputs[0].dims.d[3];

    int kW = _kernel_H;
    int kH = _kernel_W;
    int im2col_step = std::min(int(batch_size), 64);

    size_t col_size =
        nInputPlane * kW * kH * outputHeight * outputWidth * sizeof_dtype;

    return col_size + 100 * sizeof(float);
}


int DeformableConNetPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
    const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace,
    cudaStream_t stream)
{
    std::cout << "DeformableConNetPlugin::enqueue" << std::endl;
    float alpha ,beta;
    int m, n, k;
    cublasHandle_t handle = blas_handle();
    const float* input = static_cast<const float *>(inputs[0]);
    const float* offset = static_cast<const float *>(inputs[1]);
    const float* mask = static_cast<const float *>(inputs[2]);
    float * output = static_cast<float *>(outputs[0]);
    nvinfer1::Dims input_dims = inputDesc[0].dims;

    int h = input_dims.d[2];
    int w = input_dims.d[3];
    int height_out = (h + 2 * _padding - (_dilation * (_kernel_H - 1) + 1)) / _stride + 1;
    int width_out = (w + 2 * _padding - (_dilation * (_kernel_W - 1) + 1)) / _stride + 1;

    m = _out_channel;
    n = height_out * width_out;
    k = 1;
    alpha = 1.0;
    beta = 0.0;
    /// output  nxm
    /// ones    1xn  T ->> nx1
    /// bias    1xm
    /// ones x bias = nxm
    //  add bias
    cublasSgemm(handle,
                CUBLAS_OP_T, CUBLAS_OP_N,
                n, m, k,&alpha,
                _d_ones, k,
                _d_bias, k,&beta,
                output, n);
    // im2col (offset and mask)
    modulated_deformable_im2col_cuda(stream,input,offset,mask,
                                     1, _in_channel, h, w,
                                     height_out, width_out, _kernel_H, _kernel_W,
                                     _padding, _padding, _stride, _stride, _dilation, _dilation,
                                     _deformable_group, _d_columns);
    m = _out_channel;
    n = height_out * width_out;
    k = _in_channel * _kernel_H * _kernel_W;
    alpha = 1.0;
    beta = 1.0;
    // im2col conv
    cublasSgemm(handle,
                CUBLAS_OP_N, CUBLAS_OP_N,
                n, m, k,&alpha,
                _d_columns, n,
                _d_weight, k,
                &beta,
                output, n);
    return 0;
}

size_t DeformableConNetPlugin::getSerializationSize() const
{
    std::cout << "getSerializationSize" << std::endl;
    return (serialized_size(_in_channel) +
            serialized_size(_out_channel) +
            serialized_size(_kernel_H) +
            serialized_size(_kernel_W) +
            serialized_size(_deformable_group) +
            serialized_size(_dilation) +
            serialized_size(_groups) +
            serialized_size(_padding) +
            serialized_size(_stride) +
            serialized_size(_h_weight) +
            serialized_size(_h_bias)
            );
}

void DeformableConNetPlugin::serialize(void *buffer) const
{
    std::cout << "serialize" << std::endl;
    serialize_value(&buffer, _in_channel);
    serialize_value(&buffer, _out_channel);
    serialize_value(&buffer, _kernel_H);
    serialize_value(&buffer, _kernel_W);
    serialize_value(&buffer, _deformable_group);
    serialize_value(&buffer, _dilation);
    serialize_value(&buffer, _groups);
    serialize_value(&buffer, _padding);
    serialize_value(&buffer, _stride);
    serialize_value(&buffer, _h_weight);
    serialize_value(&buffer, _h_bias);
}

bool DeformableConNetPlugin::supportsFormatCombination(
    int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs)
{
    std::cout << "supportsFormatCombination" << std::endl;
    assert(0 <= pos && pos < 4);
    const auto *in = inOut;
    const auto *out = inOut + nbInputs;
    switch (pos) {
        case 0:
        return in[0].type == DataType::kFLOAT &&
                in[0].format == nvinfer1::TensorFormat::kLINEAR;
        case 1:
        return in[1].type == in[0].type &&
                in[1].format == nvinfer1::TensorFormat::kLINEAR;
        case 2:
        return in[2].type == in[0].type &&
                in[2].format == nvinfer1::TensorFormat::kLINEAR;
        case 3:
        return out[0].type == in[0].type &&
                out[0].format == nvinfer1::TensorFormat::kLINEAR;
    }
}

const char* DeformableConNetPlugin::getPluginType() const
{
    return INSTANCE_PLUGIN_NAME;
}

const char* DeformableConNetPlugin::getPluginVersion() const
{
    return INSTANCE_PLUGIN_VERSION;
}

void DeformableConNetPlugin::destroy()
{ 
    delete this;
}

IPluginV2DynamicExt* DeformableConNetPlugin::clone() const
{ 
    std::cout << "clone" << std::endl;
    auto plugin = new DeformableConNetPlugin{_in_channel, _out_channel, _kernel_H, _kernel_W, 
                                            _deformable_group, _dilation, _groups, _padding,
                                            _stride, _h_weight, _h_bias};
    plugin->setPluginNamespace(mPluginNamespace);
    return plugin;
}

// Set plugin namespace
void DeformableConNetPlugin::setPluginNamespace(const char* pluginNamespace)
{
    mPluginNamespace = pluginNamespace;
}

const char* DeformableConNetPlugin::getPluginNamespace() const
{
    return mPluginNamespace;
}

nvinfer1::DataType DeformableConNetPlugin::getOutputDataType(
    int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
    std::cout << "getOutputDataType" << std::endl;
    ASSERT(nbInputs == 3);
    ASSERT(inputTypes && nbInputs > 0 && index == 0);
    return inputTypes[0];
}

// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void DeformableConNetPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}

// Detach the plugin object from its execution context.
void DeformableConNetPlugin::detachFromContext() {}

void DeformableConNetPlugin::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
    const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs)
{
    ASSERT(nbOutputs == 1);
    ASSERT(nbInputs == 3);
    std::cout << "configurePlugin done" << std::endl;
}

// DeformableConNetPluginCreator methods
DeformableConNetPluginCreator::DeformableConNetPluginCreator()
{
    mPluginAttributes.emplace_back(PluginField("in_channel", nullptr, PluginFieldType::kFLOAT32, 1));
    mPluginAttributes.emplace_back(PluginField("out_channel", nullptr, PluginFieldType::kFLOAT32, 1));
    mPluginAttributes.emplace_back(PluginField("kernel_H", nullptr, PluginFieldType::kFLOAT32, 1));
    mPluginAttributes.emplace_back(PluginField("kernel_W", nullptr, PluginFieldType::kFLOAT32, 1));
    mPluginAttributes.emplace_back(PluginField("deformable_group", nullptr, PluginFieldType::kFLOAT32, 1));
    mPluginAttributes.emplace_back(PluginField("dilation", nullptr, PluginFieldType::kFLOAT32, 1));
    mPluginAttributes.emplace_back(PluginField("groups", nullptr, PluginFieldType::kFLOAT32, 1));
    mPluginAttributes.emplace_back(PluginField("padding", nullptr, PluginFieldType::kFLOAT32, 1));
    mPluginAttributes.emplace_back(PluginField("stride", nullptr, PluginFieldType::kFLOAT32, 1));
    mPluginAttributes.emplace_back(PluginField("weight", nullptr, PluginFieldType::kFLOAT32, 1));
    mPluginAttributes.emplace_back(PluginField("bias", nullptr, PluginFieldType::kFLOAT32, 1));
    
    mFC.nbFields = mPluginAttributes.size();
    mFC.fields = mPluginAttributes.data();
}

const char* DeformableConNetPluginCreator::getPluginName() const
{
    return INSTANCE_PLUGIN_NAME;
}

const char* DeformableConNetPluginCreator::getPluginVersion() const
{
    return INSTANCE_PLUGIN_VERSION;
}

const PluginFieldCollection* DeformableConNetPluginCreator::getFieldNames()
{
    return &mFC;
}

IPluginV2DynamicExt* DeformableConNetPluginCreator::createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc)
{
    int in_channel;
    int out_channel;
    int kernel_H;
    int kernel_W;
    int deformable_group;
    int dilation;
    int groups; 
    int padding;
    int stride;
    std::vector<float> h_weight;
    std::vector<float> h_bias;
    const PluginField* fields = fc->fields;
    for (int i = 0; i < fc->nbFields; ++i)
    {
        const char* attrName = fields[i].name;
        if (!strcmp(attrName, "in_channel"))
        {
            ASSERT(fields[i].type == PluginFieldType::kINT32);
            in_channel= *(static_cast<const int32_t*>(fields[i].data));
        }
        else if (!strcmp(attrName, "out_channel"))
        {
            ASSERT(fields[i].type == PluginFieldType::kINT32);
            out_channel= *(static_cast<const int32_t*>(fields[i].data));
        }
        else if (!strcmp(attrName, "kernel_H"))
        {
            ASSERT(fields[i].type == PluginFieldType::kINT32);
            kernel_H= *(static_cast<const int32_t*>(fields[i].data));
        }
        else if (!strcmp(attrName, "kernel_W"))
        {
            ASSERT(fields[i].type == PluginFieldType::kINT32);
            kernel_W= *(static_cast<const int32_t*>(fields[i].data));
        }
        else if (!strcmp(attrName, "deformable_group"))
        {
            ASSERT(fields[i].type == PluginFieldType::kINT32);
            deformable_group= *(static_cast<const int32_t*>(fields[i].data));
        }
        else if (!strcmp(attrName, "dilation"))
        {
            ASSERT(fields[i].type == PluginFieldType::kINT32);
            dilation= *(static_cast<const int32_t*>(fields[i].data));
        }
        else if (!strcmp(attrName, "groups"))
        {
            ASSERT(fields[i].type == PluginFieldType::kINT32);
            groups= *(static_cast<const int32_t*>(fields[i].data));
        }
        else if (!strcmp(attrName, "padding"))
        {
            ASSERT(fields[i].type == PluginFieldType::kINT32);
            padding= *(static_cast<const int32_t*>(fields[i].data));
        }
        else if (!strcmp(attrName, "stride"))
        {
            ASSERT(fields[i].type == PluginFieldType::kINT32);
            stride= *(static_cast<const int32_t*>(fields[i].data));
        }
        else if (!strcmp(attrName, "h_weight"))
        {
            ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
            int size = fields[i].length;
            h_weight.reserve(size);
            const auto* w = static_cast<const float*>(fields[i].data);
            for (int j = 0; j < size; j++)
            {
                h_weight.push_back(*w);
                w++;
            }
        }
        else if (!strcmp(attrName, "h_bias"))
        {
            ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
            int size = fields[i].length;
            h_bias.reserve(size);
            const auto* w = static_cast<const float*>(fields[i].data);
            for (int j = 0; j < size; j++)
            {
                h_bias.push_back(*w);
                w++;
            }
        }
    }

    Weights weightWeights{DataType::kFLOAT, h_weight.data(), (int64_t) h_weight.size()};
    Weights biasWeights{DataType::kFLOAT, h_bias.data(), (int64_t) h_bias.size()};

    DeformableConNetPlugin* obj = new DeformableConNetPlugin(in_channel, out_channel, kernel_H, kernel_W,
     deformable_group, dilation, groups, padding, stride, weightWeights, biasWeights);
    obj->setPluginNamespace(mNamespace.c_str());
    return obj;
}

IPluginV2DynamicExt* DeformableConNetPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
    std::cout << "deserializePlugin" << std::endl;
    DeformableConNetPlugin* obj = new DeformableConNetPlugin{serialData, serialLength}; 
    obj->setPluginNamespace(mNamespace.c_str());
    return obj;
}

I think this problem should be the plugin code I wrote, but I don’t know what went wrong, any good suggestions, thank you~

Hi @guoyazong345,
Kindly grant access.

Thanks!