Description
import tensorflow as tf
import tf2onnx
import numpy as np
import onnxruntime as rt
def build_onnx_range():
onnx_path = 'tf_range.onnx'
inputs = tf.placeholder(tf.float32, shape=([1, None]), name='inputs')
N = tf.shape(inputs)[1]
outputs = tf.range(N)
outputs = tf.identity(outputs, name='outputs')
config=tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True))
sess = tf.Session(config=config)
onnx_graph = tf2onnx.tfonnx.process_tf_graph(sess.graph,
input_names=["inputs:0"], output_names=["outputs:0"])
onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)
model_proto = onnx_graph.make_model("test")
with open(onnx_path, "wb") as f:
f.write(model_proto.SerializeToString())
sess.close()
def onnx_infer_range():
onnx_path = 'tf_range.onnx'
sess = rt.InferenceSession(onnx_path)
inputs_name = sess.get_inputs()[0].name
outputs_name = sess.get_outputs()[0].name
print(inputs_name, outputs_name)
[outputs_data] = sess.run([outputs_name], {inputs_name: np.ones([1, 8], dtype=np.float32)})
print('onnx_infer', outputs_data)
build_onnx_range()
onnx_infer_range()
trtexec --explicitBatch --onnx=tf_range.onnx
[04/09/2020-02:50:16] [I] === Model Options ===
[04/09/2020-02:50:16] [I] Format: ONNX
[04/09/2020-02:50:16] [I] Model: /.../tf_range.onnx
[04/09/2020-02:50:16] [I] Output:
[04/09/2020-02:50:16] [I] === Build Options ===
[04/09/2020-02:50:16] [I] Max batch: explicit
[04/09/2020-02:50:16] [I] Workspace: 16 MB
[04/09/2020-02:50:16] [I] minTiming: 1
[04/09/2020-02:50:16] [I] avgTiming: 8
[04/09/2020-02:50:16] [I] Precision: FP32
[04/09/2020-02:50:16] [I] Calibration:
[04/09/2020-02:50:16] [I] Safe mode: Disabled
[04/09/2020-02:50:16] [I] Save engine:
[04/09/2020-02:50:16] [I] Load engine:
[04/09/2020-02:50:16] [I] Inputs format: fp32:CHW
[04/09/2020-02:50:16] [I] Outputs format: fp32:CHW
[04/09/2020-02:50:16] [I] Input build shapes: model
[04/09/2020-02:50:16] [I] === System Options ===
[04/09/2020-02:50:16] [I] Device: 0
[04/09/2020-02:50:16] [I] DLACore:
[04/09/2020-02:50:16] [I] Plugins:
[04/09/2020-02:50:16] [I] === Inference Options ===
[04/09/2020-02:50:16] [I] Batch: Explicit
[04/09/2020-02:50:16] [I] Iterations: 10
[04/09/2020-02:50:16] [I] Duration: 3s (+ 200ms warm up)
[04/09/2020-02:50:16] [I] Sleep time: 0ms
[04/09/2020-02:50:16] [I] Streams: 1
[04/09/2020-02:50:16] [I] ExposeDMA: Disabled
[04/09/2020-02:50:16] [I] Spin-wait: Disabled
[04/09/2020-02:50:16] [I] Multithreading: Disabled
[04/09/2020-02:50:16] [I] CUDA Graph: Disabled
[04/09/2020-02:50:16] [I] Skip inference: Disabled
[04/09/2020-02:50:16] [I] Inputs:
[04/09/2020-02:50:16] [I] === Reporting Options ===
[04/09/2020-02:50:16] [I] Verbose: Disabled
[04/09/2020-02:50:16] [I] Averages: 10 inferences
[04/09/2020-02:50:16] [I] Percentile: 99
[04/09/2020-02:50:16] [I] Dump output: Disabled
[04/09/2020-02:50:16] [I] Profile: Disabled
[04/09/2020-02:50:16] [I] Export timing to JSON file:
[04/09/2020-02:50:16] [I] Export output to JSON file:
[04/09/2020-02:50:16] [I] Export profile to JSON file:
[04/09/2020-02:50:16] [I]
----------------------------------------------------------------
Input filename: /.../tf_range.onnx
ONNX IR version: 0.0.6
Opset version: 8
Producer name: tf2onnx
Producer version: 1.5.5
Domain:
Model version: 0
Doc string:
----------------------------------------------------------------
[04/09/2020-02:50:18] [E] [TRT] (Unnamed Layer* 15) [Constant]: invalid weights type of Bool
[04/09/2020-02:50:18] [E] [TRT] (Unnamed Layer* 15) [Constant]: invalid weights type of Bool
Segmentation fault (core dumped)
How can I fix this problem?
A clear and concise description of the bug or issue.
Environment
TensorRT Version: 7.0.0.11
GPU Type: P40
Nvidia Driver Version: 418.56
CUDA Version: 10.0
CUDNN Version: 7.6.5.32
Operating System + Version: ubuntu 16.04
Python Version (if applicable): 3.6.4
TensorFlow Version (if applicable): 1.13.2
Relevant Files
Please attach or include links to any models, data, files, or scripts necessary to reproduce your issue. (Github repo, Google Drive, Dropbox, etc.)
Steps To Reproduce
Please include:
- Exact steps/commands to build your repro
- Exact steps/commands to run your repro
- Full traceback of errors encountered