[Error] Can't convert model ONNX to model tensorrt

Hi! Im struggling with convert model ONNX to tensorrt as i mentioned in the title. The problem is the code worked fine if i didn’t assign any dynamic_axes parameter in torch.onnx.export

but when i tried to convert vary_resnet50_images.onnx by (had dynamic_axes)

create ONNX model
ONNX_FILE_PATH = ‘vary_resnet50_images.onnx’
torch.onnx.export(model, input[‘image’], ONNX_FILE_PATH, input_names=[‘input’],
output_names=[‘output’], export_params=True,
dynamic_axes={
‘input’: {0: ‘batch_size’, 2: ‘height’, 3: ‘width’},
# Allow the batch size, height, and width to be dynamic
‘output’: {0: ‘batch_size’, 2: ‘height’, 3: ‘width’} # Similarly for the output
}
)
#build tensorrt

def build_engine_and_save(onnx_file_path, engine_file_path):
print(‘start build engine…’)
start_time = time.time()

  # initialize TensorRT engine and parse ONNX model
  builder = trt.Builder(TRT_LOGGER)
  network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
  network = builder.create_network(flags=network_flags)
  parser = trt.OnnxParser(network, TRT_LOGGER)
  # parse ONNX
  with open(onnx_file_path, 'rb') as model:
      print('Beginning ONNX file parsing')
      if not parser.parse(model.read()):
          for error in range(parser.num_errors):
              print(parser.get_error(error))
      print('Completed parsing of ONNX file')

  # allow TensorRT to use up to 1GB of GPU memory for tactic selection
  config = builder.create_builder_config()
  config.max_workspace_size = 10 << 30  # 1GB

  # use FP16 mode if possible
  if builder.platform_has_fast_fp16:
      config.set_flag(trt.BuilderFlag.FP16)

  # generate TensorRT engine optimized for the target platform
  print('Building an engine...')
  engine = builder.build_engine(network, config)
  print('checktype: ', type(engine))

  # Serialize the engine to a file
  with open(engine_file_path, 'wb') as f:
      f.write(engine.serialize())

  print("Completed creating and serializing Engine, time consumed: ", time.time() - start_time)

  return engine

def load_engine(engine_file_path):
with open(engine_file_path, ‘rb’) as f:
runtime = trt.Runtime(TRT_LOGGER)
engine = runtime.deserialize_cuda_engine(f.read())
context = engine.create_execution_context()
return engine, context

engine_file_path = ‘./new_model.trt’
ONNX_FILE_PATH = “./vary_resnet50_images.onnx”
if os.path.exists(engine_file_path):
print(‘Loading serialized engine…’)
engine, context = load_engine(engine_file_path)
else:
print(‘Serialized engine not found, building engine…’)
engine = build_engine_and_save(ONNX_FILE_PATH, engine_file_path)
context = engine.create_execution_context()

i got this error:

Serialized engine not found, building engine…
start build engine…
Beginning ONNX file parsing
Completed parsing of ONNX file
/point/huongntl2/ocrc-train-text-detection/./convert_model.py:376: DeprecationWarning: Use set_memory_pool_limit instead.
config.max_workspace_size = 10 << 30 # 1GB
Building an engine…
/point/huongntl2/ocrc-train-text-detection/./convert_model.py:384: DeprecationWarning: Use build_serialized_network instead.
engine = builder.build_engine(network, config)
Segmentation fault (core dumped)

This is some information about my environment:
tensorrt: 8.6.1
onnx: 1.16.1