Hi there,
i try to get my custom trained SSD Mobilenetv2 to work on my jetson nano with 1 class.
I manage to convert it to uff by using /usr/lib/python3.6/dist-packages/uff/bin/convert_to_uff.py
with this config.py
import graphsurgeon as gs
import tensorflow as tf
Input = gs.create_node("Input",
op="Placeholder",
dtype=tf.float32,
shape=[1, 3, 300, 300])
PriorBox = gs.create_plugin_node(name="GridAnchor", op="GridAnchor_TRT",
numLayers=6,
minSize=0.2,
maxSize=0.95,
aspectRatios=[1.0, 2.0, 0.5, 3.0, 0.33],
variance=[0.1,0.1,0.2,0.2],
featureMapShapes=[19, 10, 5, 3, 2, 1])
NMS = gs.create_plugin_node(name="NMS", op="NMS_TRT",
shareLocation=1,
varianceEncodedInTarget=0,
backgroundLabelId=0,
confidenceThreshold=1e-8,
nmsThreshold=0.6,
topK=100,
keepTopK=100,
numClasses=1,
inputOrder=[1, 0,2], #021 120 102(this seems to be the right one for me) 012 210
confSigmoid=1,
isNormalized=1)
concat_priorbox = gs.create_node(name="concat_priorbox", op="ConcatV2", dtype=tf.float32, axis=2)
concat_box_loc = gs.create_plugin_node("concat_box_loc", op="FlattenConcat_TRT", dtype=tf.float32, axis=1, ignoreBatch=0)
concat_box_conf = gs.create_plugin_node("concat_box_conf", op="FlattenConcat_TRT", dtype=tf.float32, axis=1, ignoreBatch=0)
namespace_plugin_map = {
"MultipleGridAnchorGenerator": PriorBox,
"Postprocessor": NMS,
"Preprocessor": Input,
"ToFloat": Input,
"image_tensor": Input,
"Concatenate": concat_priorbox,
"concat": concat_box_loc,
"concat_1": concat_box_conf
}
def preprocess(dynamic_graph):
# Now create a new graph by collapsing namespaces
dynamic_graph.collapse_namespaces(namespace_plugin_map)
# Remove the outputs, so we just have a single output node (NMS).
dynamic_graph.remove(dynamic_graph.graph_outputs, remove_exclusive_dependencies=False)
# Disconnect the Input node from NMS, as it expects to have only 3 inputs.
dynamic_graph.find_nodes_by_op("NMS_TRT")[0].input.remove("Input")
getting this warning in the terminal:
Using output node NMS
Converting to UFF graph
Warning: No conversion function registered for layer: NMS_TRT yet.
Converting NMS as custom op: NMS_TRT
Warning: No conversion function registered for layer: GridAnchor_TRT yet.
Converting GridAnchor as custom op: GridAnchor_TRT
Warning: No conversion function registered for layer: FlattenConcat_TRT yet.
Converting concat_box_loc as custom op: FlattenConcat_TRT
Warning: No conversion function registered for layer: FlattenConcat_TRT yet.
Converting concat_box_conf as custom op: FlattenConcat_TRT
No. nodes: 1094
and getting this output as .pbtxt with the -t flag:
version: 1
descriptor_core_version: 1
descriptors {
id: "tensorflow_extension"
version: 1
}
descriptors {
id: "custom"
version: 1
}
graphs {
id: "main"
nodes {
id: "NMS"
inputs: "concat_box_conf"
inputs: "Squeeze"
inputs: "concat_priorbox"
operation: "_NMS_TRT"
fields {
key: "backgroundLabelId_u_int"
value {
i: 0
}
}
fields {
key: "confSigmoid_u_int"
value {
i: 1
}
}
fields {
key: "confidenceThreshold_u_float"
value {
d: 1e-08
}
}
fields {
key: "inputOrder_u_ilist"
value {
i_list {
val: 1
val: 0
val: 2
}
}
}
fields {
key: "isNormalized_u_int"
value {
i: 1
}
}
fields {
key: "keepTopK_u_int"
value {
i: 100
}
}
fields {
key: "nmsThreshold_u_float"
value {
d: 0.6
}
}
fields {
key: "numClasses_u_int"
value {
i: 1
}
}
fields {
key: "shareLocation_u_int"
value {
i: 1
}
}
fields {
key: "topK_u_int"
value {
i: 100
}
}
fields {
key: "varianceEncodedInTarget_u_int"
value {
i: 0
}
}
}
}
fields {
key: "shape"
value {
i_list {
}
}
}
fields {
key: "values"
value {
ref: "weights_FeatureExtractor/MobilenetV2/Conv_1/BatchNorm/batchnorm/add/y"
}
}
}
then I copied /usr/src/tensorrt/samples/python/uff_ssd and changed
numClasses=1,
inputOrder=[1, 0, 2],
in utils/model.py,
the Class List to only one item in utils/coco.py
and the ssd_model_uff_path to my newly created uff model in detect_objets.py.
when I run python3 detect_objects.py …
I get the following error
TensorRT inference engine settings:
* Inference precision - DataType.FLOAT
* Max batch size - 1
Building TensorRT engine. This may take few minutes.
python3: nmsPlugin.cpp:136: virtual void nvinfer1::plugin::DetectionOutput::configureWithFormat(const nvinfer1::Dims*, int, const nvinfer1::Dims*, int, nvinfer1::DataType, nvinfer1::PluginFormat, int): Assertion `numPriors * param.numClasses == inputDims[param.inputOrder[1]].d[0]' failed.
Aborted (core dumped)
I saw on other posts, that some had error 135 when they didn’t change the input order, but no post with error in line 136 of nmsPlugin.cpp.
Did I forget to do something special, to get it working?