Getting error while using tensorRT engine

Hi, with the following code , I want to predict with my model :

import cv2
import time
import numpy as np
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda

EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
runtime = trt.Runtime(TRT_LOGGER)

host_inputs  = []
cuda_inputs  = []
host_outputs = []
cuda_outputs = []
bindings = []


def Inference(engine):
    image = cv2.imread("/home/jarf/Desktop/pypro/1pic/Images/B_AAQ_AKH_980920_11_1_0_L1.jpg")
    image = (2.0 / 255.0) * image.transpose((2, 0, 1)) - 1.0

    np.copyto(host_inputs[0], image.ravel())
    stream = cuda.Stream()
    context = engine.create_execution_context()

    start_time = time.time()
    cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)
    context.execute_async(bindings=bindings, stream_handle=stream.handle)
    cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)
    stream.synchronize()
    print("execute times "+str(time.time()-start_time))

    output = host_outputs[0].reshape(np.concatenate(([1],engine.get_binding_shape(1))))
    print(np.argmax(output))


def PrepareEngine():
    with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
        builder.max_workspace_size = 1 << 30
        with open('/home/jarf/Desktop/pypro/1pic/keras2onnx_1pic.onnx', 'rb') as model:
            if not parser.parse(model.read()):
                print ('ERROR: Failed to parse the ONNX file.')
                for error in range(parser.num_errors):
                    print (parser.get_error(error))
        engine = builder.build_cuda_engine(network)

        # create buffer
        for binding in engine:
            size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
            host_mem = cuda.pagelocked_empty(shape=[size],dtype=np.float32)
            cuda_mem = cuda.mem_alloc(host_mem.nbytes)

            bindings.append(int(cuda_mem))
            if engine.binding_is_input(binding):
                host_inputs.append(host_mem)
                cuda_inputs.append(cuda_mem)
            else:
                host_outputs.append(host_mem)
                cuda_outputs.append(cuda_mem)

        return engine


if __name__ == "__main__":
    engine = PrepareEngine()
    Inference(engine)

but when i run it i get the following error :

[TensorRT] WARNING: onnx2trt_utils.cpp:220: Your ONNX model has been generated with INT64 weights, while TensorRT does not natively support INT64. Attempting to cast down to INT32.
[TensorRT] ERROR: Network has dynamic or shape inputs, but no optimization profile has been defined.
[TensorRT] ERROR: Network validation failed.
Traceback (most recent call last):
  File "/home/jarf/Desktop/pypro/trt_engine.py", line 66, in <module>
    engine = PrepareEngine()
  File "/home/jarf/Desktop/pypro/trt_engine.py", line 49, in PrepareEngine
    for binding in engine:
TypeError: 'NoneType' object is not iterable

I want to know how to fix this.
thanks for your attention

Hi,

The error indicates that you are using a dynamic input model but doesn’t define the corresponding.
So please add the required information as below first:

Thanks.

Hi, thank you so much for your answer , I’m new in this case, so can you show me a full sample code?

Sorry for no sample code, you may need to study to implement it by yourself.

Hi,
I am getting the same error. Were you able to fix it-can you post your code?
Thanks.