Description
Hey guys,
I am converting PSENet from pytorch to onnx and finally to tensorrt. But I met the problem saying in the topic. Does anyone met this and have any idea?
Here is my code for building engine.
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
def build_engine(model_path):
with trt.Builder(TRT_LOGGER) as builder, \
builder.create_network(EXPLICIT_BATCH) as network, \
builder.create_builder_config() as config,\
trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 20
builder.max_batch_size = 1
with open(model_path, "rb") as f:
parser.parse(f.read())
#network.add_input("data", trt.float32, (1, 3, -1, -1))
profile = builder.create_optimization_profile()
profile.set_shape("data", (1, 3, 100, 100), (1, 3, 896, 1312), (1, 3, 2000, 3000))
config.add_optimization_profile(profile)
last_layer = network.get_layer(network.num_layers - 1)
print(config)
network.mark_output(last_layer.get_output(0))
engine = builder.build_engine(network, config)
return engine
And the code for onnx converting:
def export_onnx_model(model, onnx_path, input_image, input_names=None,
output_names=None, dynamic_axes=None):
inputs = input_image
model(inputs)
torch.onnx.export(model,
inputs,
onnx_path,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes)
export_onnx_model(model, onnx_path, image_preprocessed, ["input"], ["output"], {
"input": [2, 3],
"output": [2, 3]
})
Environment
TensorRT Version:7.0
GPU Type: V100
Nvidia Driver Version: 440.100
CUDA Version: 10.2
CUDNN Version: 7.6.5
Operating System + Version: ubuntu 18.04
Python Version (if applicable): 3.7.5
PyTorch Version (if applicable): 1.5.1