Problems about Unsupported operation _AddV2 in mobilenet tensorrt

Hi.
I wanna to use tensorrt6 to accelerate my training model using the keras-tensorflow framework.

I have trained the model which is mobilenet-segnet and saved as:

mobilenet_segnet.h5

Then I converted this mobilenet_segnet.h5 to mobilenet_segnet.uff using this command:

from keras.models import load_model
import tensorflow as tf
import keras
import keras.backend as K
from keras.backend.tensorflow_backend import set_session
import uff
from keras.utils.generic_utils import CustomObjectScope

config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
config.log_device_placement = True
sess =  tf.Session(config = config)
set_session(sess)
keras.backend.get_session().run(tf.initialize_all_variables())

K.set_learning_phase(0)
with CustomObjectScope({'relu6': keras.applications.mobilenet.relu6,'DepthwiseConv2D': keras.applications.mobilenet.DepthwiseConv2D}):
    model = load_model('./mobilenet_segnet.h5') 
K.set_learning_phase(0)

output_name = model.output.op.name
input_name = model.input.op.name
frozen_graph = tf.graph_util.remove_training_nodes(tf.graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(),[output_name]))

uff_model = uff.from_tensorflow(frozen_graph, output_filename = './mobilenet_segnet.uff')

Then I can see a mobilenet_segnet.uff in my path and show some result as following:

NOTE: UFF has been tested with TensorFlow 1.14.0.
WARNING: The version of TensorFlow installed on this system is not guaranteed to work with UFF.
UFF Version 0.6.5
=== Automatically deduced input nodes ===
[name: "input_1"
op: "Placeholder"
attr {
  key: "dtype"
  value {
    type: DT_FLOAT
  }
}
attr {
  key: "shape"
  value {
    shape {
      dim {
        size: -1
      }
      dim {
        size: 224
      }
      dim {
        size: 224
      }
      dim {
        size: 3
      }
    }
  }
}
]
=========================================

=== Automatically deduced output nodes ===
[name: "activation_1/truediv"
op: "RealDiv"
input: "activation_1/Exp"
input: "activation_1/Sum"
attr {
  key: "T"
  value {
    type: DT_FLOAT
  }
}
]
==========================================

Using output node activation_1/truediv
Converting to UFF graph
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: No conversion function registered for layer: AddV2 yet.
Converting batch_normalization_4/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting batch_normalization_4/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: ResizeNearestNeighbor yet.
Converting up_sampling2d_3/ResizeNearestNeighbor as custom op: ResizeNearestNeighbor
Warning: No conversion function registered for layer: AddV2 yet.
Converting batch_normalization_3/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting batch_normalization_3/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: ResizeNearestNeighbor yet.
Converting up_sampling2d_2/ResizeNearestNeighbor as custom op: ResizeNearestNeighbor
Warning: No conversion function registered for layer: AddV2 yet.
Converting batch_normalization_2/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting batch_normalization_2/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: ResizeNearestNeighbor yet.
Converting up_sampling2d_1/ResizeNearestNeighbor as custom op: ResizeNearestNeighbor
Warning: No conversion function registered for layer: AddV2 yet.
Converting batch_normalization_1/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting batch_normalization_1/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_11_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_11_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_11_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_11_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_10_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_10_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_10_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_10_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_9_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_9_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_9_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_9_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_8_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_8_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_8_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_8_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_7_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_7_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_7_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_7_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_6_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_6_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_6_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_6_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_5_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_5_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_5_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_5_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_4_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_4_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_4_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_4_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_3_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_3_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_3_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_3_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_2_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_2_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_2_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_2_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_1_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_pw_1_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_1_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv_dw_1_bn/batchnorm/add as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv1_bn/batchnorm/add_1 as custom op: AddV2
Warning: No conversion function registered for layer: AddV2 yet.
Converting conv1_bn/batchnorm/add as custom op: AddV2
DEBUG [/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter.py:96] Marking ['activation_1/truediv'] as outputs
No. nodes: 499
UFF Output written to ./mobilenet_segnet.uff

Then I try to continue to run this command in a new terminal:

import tensorrt as trt

model_file = './mobilenet_segnet.uff'

TRT_LOGGER = trt.Logger(trt.Logger.WARNING)

def build_engine(model_file):
    with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
        builder.max_workspace_size = 1 << 20
        parser.register_input("input_1", (224, 224, 3))
        parser.register_output("activation_1/truediv")
        parser.parse(model_file, network)
        return builder.build_cuda_engine(network)


with build_engine(model_file) as engine:
    print('1')

And the terminal shows as following:

[TensorRT] ERROR: UffParser: Validator error: batch_normalization_4/batchnorm/add: Unsupported operation _AddV2

Is that mean I should use gs.create_plugin_node to build some new trt_nodes to replace original nodes such as the Relu6, DepthwiseConv2D, PointwiseCon2D? How to create a new node for this problem?

Then I try to create a new mobilenet_segnet.uff as following in a new terminal:

from keras.models import load_model
import tensorflow as tf
import keras
import keras.backend as K
from keras.backend.tensorflow_backend import set_session
import uff
from keras.utils.generic_utils import CustomObjectScope

config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
config.log_device_placement = True
sess =  tf.Session(config = config)
set_session(sess)
keras.backend.get_session().run(tf.initialize_all_variables())

K.set_learning_phase(0)
with CustomObjectScope({'relu6': keras.applications.mobilenet.relu6,'DepthwiseConv2D': keras.applications.mobilenet.DepthwiseConv2D}):
    model = load_model('./mobilenet_segnet.h5') 
K.set_learning_phase(0)

output_name = model.output.op.name
input_name = model.input.op.name
frozen_graph = tf.graph_util.remove_training_nodes(tf.graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(),[output_name]))

# ---------*new  code*-------------
import graphsurgeon as gs  
dynamic_graph = gs.DynamicGraph(frozen_graph)
add_nodes = dynamic_graph.find_nodes_by_op('AddV2')
for node in add_nodes:
    node.op = "Add"
# ---------*new  code*-------------


uff_model = uff.from_tensorflow(frozen_graph, output_filename = './mobilenet_segnet.uff')

Then the terminal shows as following:

NOTE: UFF has been tested with TensorFlow 1.14.0.
WARNING: The version of TensorFlow installed on this system is not guaranteed to work with UFF.
UFF Version 0.6.5
=== Automatically deduced input nodes ===
[name: "input_1"
op: "Placeholder"
attr {
  key: "dtype"
  value {
    type: DT_FLOAT
  }
}
attr {
  key: "shape"
  value {
    shape {
      dim {
        size: -1
      }
      dim {
        size: 224
      }
      dim {
        size: 224
      }
      dim {
        size: 3
      }
    }
  }
}
]
=========================================

=== Automatically deduced output nodes ===
[name: "activation_1/truediv"
op: "RealDiv"
input: "activation_1/Exp"
input: "activation_1/Sum"
attr {
  key: "T"
  value {
    type: DT_FLOAT
  }
}
]
==========================================

Using output node activation_1/truediv
Converting to UFF graph
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: No conversion function registered for layer: ResizeNearestNeighbor yet.
Converting up_sampling2d_3/ResizeNearestNeighbor as custom op: ResizeNearestNeighbor
WARNING:tensorflow:From /usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter.py:179: The name tf.AttrValue is deprecated. Please use tf.compat.v1.AttrValue instead.

Warning: No conversion function registered for layer: ResizeNearestNeighbor yet.
Converting up_sampling2d_2/ResizeNearestNeighbor as custom op: ResizeNearestNeighbor
Warning: No conversion function registered for layer: ResizeNearestNeighbor yet.
Converting up_sampling2d_1/ResizeNearestNeighbor as custom op: ResizeNearestNeighbor
DEBUG [/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter.py:96] Marking ['activation_1/truediv'] as outputs
No. nodes: 499
UFF Output written to ./mobilenet_segnet.uff

Then I try to do this again in a new terminal:

import tensorrt as trt

model_file = './mobilenet_segnet.uff'

TRT_LOGGER = trt.Logger(trt.Logger.WARNING)

def build_engine(model_file):
    with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
        builder.max_workspace_size = 1 << 20
        parser.register_input("input_1", (224, 224, 3))
        parser.register_output("activation_1/truediv")
        parser.parse(model_file, network)
        return builder.build_cuda_engine(network)


with build_engine(model_file) as engine:
    print('1')

And shows as following:

[TensorRT] ERROR: UffParser: Validator error: up_sampling2d_1/ResizeNearestNeighbor: Unsupported operation _ResizeNearestNeighbor

How to solve it?
Many thanks.

My platform is Jetson Nano Jetpack4.2 keras 2.1.3 tensorflow 1.14 Tensorrt6.

Many thanks.

Hi,

Our uff parser doesn’t support resize layer.
You can find our supported matrix here:

Thanks.

Hi,

I found there is a ResizeNearestNeighbor plugin implementation in the TensorRT OSS GitHub.

So you can build the OSS plugin and replace it with the default plugin library for the resize layer support.

Thanks.

Hi AastaLLL,
thank you for your helping.

I found that the original tensorrt has the ResizeNearest plugin as following:

import tensorrt as trt

TRT_LOGGER = trt.Logger(trt.Logger.INFO)
trt.init_libnvinfer_plugins(TRT_LOGGER, '')

PLUGIN_CREATORS = trt.get_plugin_registry().plugin_creator_list
for plugin_creator in PLUGIN_CREATORS:
    print(plugin_creator.name)

and print these:

RnRes2Br1Br2c_TRT
RnRes2Br2bBr2c_TRT
SingleStepLSTMPlugin
FancyActivation
ResizeNearest
Split
InstanceNormalization
GridAnchor_TRT
GridAnchorRect_TRT
NMS_TRT
Reorg_TRT
Region_TRT
Clip_TRT
LReLU_TRT
PriorBox_TRT
Normalize_TRT
RPROI_TRT
BatchedNMS_TRT
FlattenConcat_TRT

So I try doing this command in a new terminal:

TRT_LOGGER = trt.Logger(trt.Logger.INFO)
trt.init_libnvinfer_plugins(TRT_LOGGER, '')
dynamic_graph = gs.DynamicGraph(frozen_graph)

add_nodes = dynamic_graph.find_nodes_by_op('AddV2')
for node in add_nodes:
    node.op = 'Add'

resize_nearest_0 = gs.create_plugin_node('up_sampling2d_1/ResizeNearestNeighbor', op='ResizeNearest', nbInputChannels=512, inputHeight=14, inputWidth= 14)

resize_nearest_1 = gs.create_plugin_node('up_sampling2d_2/ResizeNearestNeighbor', op='ResizeNearest', nbInputChannels=256, inputHeight=28, inputWidth= 28)

resize_nearest_2 = gs.create_plugin_node('up_sampling2d_3/ResizeNearestNeighbor', op='ResizeNearest', nbInputChannels=128, inputHeight=112, inputWidth= 112)

my_relu6 = gs.create_plugin_node('conv1_relu', op='Clip_TRT', clipMin=0.0, clipMax=6.0)

namespace_plugin_map = {'up_sampling2d_1/ResizeNearestNeighbor': resize_nearest_0,'up_sampling2d_2/ResizeNearestNeighbor': resize_nearest_1,'up_sampling2d_3/ResizeNearestNeighbor':resize_nearest_2,'conv1_relu':my_relu6}

dynamic_graph.collapse_namespaces(namespace_plugin_map)

new_dynamic_graph = dynamic_graph.as_graph_def()

uff_model = uff.from_tensorflow(new_dynamic_graph, output_filename = './mobilenet_segnet.uff')

and print these:

NOTE: UFF has been tested with TensorFlow 1.14.0.
WARNING: The version of TensorFlow installed on this system is not guaranteed to work with UFF.
UFF Version 0.6.5
=== Automatically deduced input nodes ===
[name: "input_1"
op: "Placeholder"
attr {
  key: "dtype"
  value {
    type: DT_FLOAT
  }
}
attr {
  key: "shape"
  value {
    shape {
      dim {
        size: -1
      }
      dim {
        size: 224
      }
      dim {
        size: 224
      }
      dim {
        size: 3
      }
    }
  }
}
]
=========================================

=== Automatically deduced output nodes ===
[name: "activation_1/truediv"
op: "RealDiv"
input: "activation_1/Exp"
input: "activation_1/Sum"
attr {
  key: "T"
  value {
    type: DT_FLOAT
  }
}
]
==========================================

Using output node activation_1/truediv
Converting to UFF graph
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: keepdims is ignored by the UFF Parser and defaults to True
Warning: No conversion function registered for layer: ResizeNearest yet.
Converting up_sampling2d_3/ResizeNearestNeighbor as custom op: ResizeNearest
WARNING:tensorflow:From /usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter.py:179: The name tf.AttrValue is deprecated. Please use tf.compat.v1.AttrValue instead.

Warning: No conversion function registered for layer: ResizeNearest yet.
Converting up_sampling2d_2/ResizeNearestNeighbor as custom op: ResizeNearest
Warning: No conversion function registered for layer: ResizeNearest yet.
Converting up_sampling2d_1/ResizeNearestNeighbor as custom op: ResizeNearest
Warning: No conversion function registered for layer: Clip_TRT yet.
Converting conv1_relu as custom op: Clip_TRT
DEBUG [/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter.py:96] Marking ['activation_1/truediv'] as outputs
No. nodes: 497
UFF Output written to ./mobilenet_segnet.uff

It looks like everything is ok and I try to do once again:

import tensorrt as trt

model_file = './mobilenet_segnet.uff'

TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt.init_libnvinfer_plugins(TRT_LOGGER, '')

def build_engine(model_file):
    with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
        builder.max_workspace_size = 1 << 10
        parser.register_input("input_1", (3, 224, 224))
        parser.register_output("activation_1/truediv")
        parser.parse(model_file, network)
        return builder.build_cuda_engine(network)


with build_engine(model_file) as engine:
    print('1')

and shows this result:

[TensorRT] ERROR: UffParser: Parser error: conv_dw_1_relu/Minimum: Unsupported binary op min with constant right

Why does tensorrt can not support relu?
Many thanks.

Hi,

YES. RELU is in our support list.
Guess that there is something incorrect when applying the nearest plugin.

This resize plugin is also used in our MaskRCNN sample.
Could you follow the config to see if helps?
https://github.com/NVIDIA/TensorRT/blob/master/samples/opensource/sampleUffMaskRCNN/converted/config.py#L20

Thanks.

HI AastaLLL,
thank you for your advices.

Actually I found that the ResizeNearestNeighbour which is supported the “channels_first”. So I change my .h5 model and change to .engine files but the terminal shows as follow:

Function not implemented
Segmentation fault (core dumped)

It looks like that this codes:

resize_nearest_0 = gs.create_plugin_node('up_sampling2d_1/ResizeNearestNeighbor', op='ResizeNearest', nbInputChannels=512, inputHeight=14, inputWidth= 14)

resize_nearest_1 = gs.create_plugin_node('up_sampling2d_2/ResizeNearestNeighbor', op='ResizeNearest', nbInputChannels=256, inputHeight=28, inputWidth= 28)

resize_nearest_2 = gs.create_plugin_node('up_sampling2d_3/ResizeNearestNeighbor', op='ResizeNearest', nbInputChannels=128, inputHeight=112, inputWidth= 112)

not useful for me.

Could you tell me how to use the resizeNearesePlugin.cpp to build the .so files and use python to use this plugin?
Thank you very much!

Hi,

Please following the same approach for building FlattenConcat plugin.

$ /usr/src/tensorrt/samples/python/uff_ssd/plugin/
$ mkdir -p build
$ cd build
$ cmake ..
$ make

And add the library to python with ctype like this:

import ctypes
...
ctypes.CDLL("lib/libflattenconcat.so")

Thanks.

More, please make sure the plugin implementation has the register like this:

namespace
{
const char* FLATTENCONCAT_PLUGIN_VERSION{"1"};
const char* FLATTENCONCAT_PLUGIN_NAME{"FlattenConcat_TRT"};
}

...

PluginFieldCollection FlattenConcatPluginCreator::mFC{};
std::vector<PluginField> FlattenConcatPluginCreator::mPluginAttributes;

REGISTER_TENSORRT_PLUGIN(FlattenConcatPluginCreator);

Thanks.

Hi AastaLLL,
thank you once again.

I try to follow your recommendation and build the ‘libclipplugin.so’ and shows as following:

cd /usr/src/tensorrt/samples/python/uff_custom_plugin
sudo mkdir build 
cd build && cmake .. && make -j2

I can find a file named libclipplugin.so in build/ .
And I try to do your ideas as following:

import ctypes
import tensorrt as trt
ctypes.CDLL('/usr/src/tensorrt/samples/python/uff_custom_plugin/build/libclipplugin.so')
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
PLUGIN_CREATORS = trt.get_plugin_registry().plugin_creator_list
for plugin_creator in PLUGIN_CREATORS:
    print(plugin_creator.name)

and print these message:

RnRes2Br1Br2c_TRT
RnRes2Br2bBr2c_TRT
SingleStepLSTMPlugin
FancyActivation
ResizeNearest
Split
InstanceNormalization
CustomClipPlugin

I can use the "CustomClipPlugin " operation.

But actually I wanna to use the “ResizeNearest_TRT” operation.
So I try to download these files named resizeNearestPlugin.cpp and resizeNearestPlugin.h from the website:

https://github.com/NVIDIA/TensorRT/tree/master/plugin/resizeNearestPlugin

and make a new CMakeLists.txt as following:

# We need cmake >= 3.8, since 3.8 introduced CUDA as a first class language
set(CMAKE_CUDA_COMPILER "/usr/local/cuda-10.0/bin/nvcc")
cmake_minimum_required(VERSION 3.8 FATAL_ERROR)
project(resizeNearestPlugin LANGUAGES CXX CUDA)

# Enable all compile warnings
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wno-long-long -pedantic -Wno-deprecated-declarations")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wno-write-strings")

# Sets variable to a value if variable is unset.
macro(set_ifndef var val)
    if (NOT ${var})
        set(${var} ${val})
    endif()
    message(STATUS "Configurable variable ${var} set to ${${var}}")
endmacro()

# -------- CONFIGURATION --------
set_ifndef(TRT_LIB /usr/lib/aarch64-linux-gnu)
set_ifndef(TRT_INCLUDE /usr/include/aarch64-linux-gnu)

set_ifndef(CUDA_ROOT /usr/local/cuda-10.0)
# Find dependencies:
message("\nThe following variables are derived from the values of the previous variables unless provided explicitly:\n")

# TensorRT's nvinfer lib
find_library(_NVINFER_LIB nvinfer HINTS ${TRT_LIB} PATH_SUFFIXES lib lib64)
set_ifndef(NVINFER_LIB ${_NVINFER_LIB})

find_path(_CUDA_INC_DIR cuda_runtime_api.h HINTS ${CUDA_ROOT} PATH_SUFFIXES include)
set_ifndef(CUDA_INC_DIR ${_CUDA_INC_DIR})

find_library(_CUDA_LIB cudart HINTS ${CUDA_ROOT} PATH_SUFFIXES lib lib64)
set_ifndef(CUDA_LIB ${_CUDA_LIB})

# -------- BUILDING --------

# Add include directories
include_directories(${CUDA_INC_DIR} ${TRT_INCLUDE} ${CMAKE_SOURCE_DIR}/resizeNearestPlugin/  /home/nvidia/TensorRT/plugin/common/kernels/ /home/nvidia/TensorRT/plugin/common/)


# Define clip plugin library target
add_library(resizeNearestPlugin MODULE
  ${CMAKE_SOURCE_DIR}/resizeNearestPlugin/resizeNearestPlugin.h
  ${CMAKE_SOURCE_DIR}/resizeNearestPlugin/resizeNearestPlugin.cpp
)

# Use C++11
target_compile_features(resizeNearestPlugin PUBLIC cxx_std_11)

# Link TensorRT's nvinfer lib
target_link_libraries(resizeNearestPlugin PRIVATE ${NVINFER_LIB})

# We need to explicitly state that we need all CUDA files
# to be built with -dc as the member functions will be called by
# other libraries and executables (in our case, Python inference scripts)
set_target_properties(resizeNearestPlugin PROPERTIES
  CUDA_SEPARABLE_COMPILATION ON
)

and I can see a file named libresizeNearestPlugin.so in the build/
and I try to do as following:

ctypes.CDLL('/home/nvidia/procedure/keras/resizeNearestPlugin/build/libresizeNearestPlugin.so')
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
  File "/usr/lib/python3.6/ctypes/__init__.py", line 348, in __init__
    self._handle = _dlopen(self._name, mode)
OSError: /home/nvidia/procedure/keras/resizeNearestPlugin/build/libresizeNearestPlugin.so: undefined symbol: cudaGetLastError

What does it mean? It looks like that ctypes cannot know where the cuda_runtime_api.h.
How to solve this problem?

Many thanks!

Hi, I have solved this problem due to the lastest Jetpack4.4 for Nano. Using the ResizeNearest_TRT can solve this problme.

Thanks!

Good to know this.
Thanks for updating this to us.