Hi,
Trying to convert keras model to TensorRT (and then run on Xavier).
Even naive sample can not run.
I am trying the code below, it seems ok (generates the model file).
import keras.backend as K
from tensorflow.python.framework import graph_io
from tensorflow.python.tools import freeze_graph
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.training import saver as saver_lib
import tensorflow as tf
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Add, Activation, Concatenate, Input
from keras.models import Model
def create_model():
input_layer = Input(shape=(None,None,3),name=‘input’)
x = Conv2D(16, 3, activation=‘linear’, padding=‘same’, strides=(1,1))(input_layer)
return Model(inputs=input_layer,outputs=x)
def convert_keras_to_pb(models_dir, model_filename):
model = create_model()
K.set_learning_phase(0)
sess = K.get_session()
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
checkpoint_path = saver.save(sess, ‘./saved_ckpt’, global_step=0, latest_filename=‘checkpoint_state’)
graph_io.write_graph(sess.graph, '.', 'tmp.pb')
out_names = [node.name for node in tf.get_default_graph().as_graph_def().node]
freeze_graph.freeze_graph('./tmp.pb', '',
False, checkpoint_path, ','.join(out_names),
"save/restore_all", "save/Const:0",
models_dir+model_filename, False, "")
convert_keras_to_pb(‘/home/l/tensorrt/models/’, ‘modelFile’)
BUT: when I am issuing:
/usr/local/bin/convert-to-uff -i models/modelFile -o models/modelFile.uff
I get:
…
Using output node save/restore_all
Converting to UFF graph
Warning: No conversion function registered for layer: NoOp yet.
Converting save/restore_all as custom op: NoOp
Warning: No conversion function registered for layer: Assign yet.
Converting save/Assign_1 as custom op: Assign
Warning: No conversion function registered for layer: RestoreV2 yet.
Converting save/RestoreV2 as custom op: RestoreV2
Traceback (most recent call last):
File “/usr/local/bin/convert-to-uff”, line 11, in
sys.exit(main())
File “/usr/local/lib/python3.5/dist-packages/uff/bin/convert_to_uff.py”, line 89, in main
debug_mode=args.debug
File “/usr/local/lib/python3.5/dist-packages/uff/converters/tensorflow/conversion_helpers.py”, line 187, in from_tensorflow_frozen_model
return from_tensorflow(graphdef, output_nodes, preprocessor, **kwargs)
File “/usr/local/lib/python3.5/dist-packages/uff/converters/tensorflow/conversion_helpers.py”, line 157, in from_tensorflow
debug_mode=debug_mode)
File “/usr/local/lib/python3.5/dist-packages/uff/converters/tensorflow/converter.py”, line 94, in convert_tf2uff_graph
uff_graph, input_replacements, debug_mode=debug_mode)
File “/usr/local/lib/python3.5/dist-packages/uff/converters/tensorflow/converter.py”, line 79, in convert_tf2uff_node
op, name, tf_node, inputs, uff_graph, tf_nodes=tf_nodes, debug_mode=debug_mode)
File “/usr/local/lib/python3.5/dist-packages/uff/converters/tensorflow/converter.py”, line 47, in convert_layer
return cls.registry_[op](name, tf_node, inputs, uff_graph, **kwargs)
File “/usr/local/lib/python3.5/dist-packages/uff/converters/tensorflow/converter_functions.py”, line 27, in convert_const
array = tf2uff.convert_tf2numpy_const_node(tf_node)
File “/usr/local/lib/python3.5/dist-packages/uff/converters/tensorflow/converter.py”, line 151, in convert_tf2numpy_const_node
array = np.frombuffer(data, dtype=np_dtype)
ValueError: itemsize cannot be zero in type
This is the most naive sample I can think of.
Any ideas ?