How to load trained onnx model in jetson nano

I have retrained model using the link. I have got onnx file and the file is used to create engine file and works as I expected. For the whole example I used this repo.

For inference in realtime, I want to avoid using the above repo. So, I decided to run onnx file to run on TRT runtime.

For that I used below code.

import cv2
import time
import numpy as np
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda

EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
runtime = trt.Runtime(TRT_LOGGER)

host_inputs  = []
cuda_inputs  = []
host_outputs = []
cuda_outputs = []
bindings = []


def Inference(engine):
    image = cv2.imread("/usr/src/tensorrt/data/resnet50/airliner.ppm")
    np.copyto(host_inputs[0], image)
    stream = cuda.Stream()
    context = engine.create_execution_context()

    start_time = time.time()
    cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)
    context.execute_async(bindings=bindings, stream_handle=stream.handle)
    cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)
    stream.synchronize()
    print("execute times "+str(time.time()-start_time))

    output = host_outputs[0].reshape(np.concatenate(([1],engine.get_binding_shape(1))))
    print(np.argmax(output))


def PrepareEngine():
    with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
        builder.max_workspace_size = 1 << 30
        with open('/usr/src/tensorrt/data/resnet50/ResNet50.onnx', 'rb') as model:
            if not parser.parse(model.read()):
                print ('ERROR: Failed to parse the ONNX file.')
                for error in range(parser.num_errors):
                    print (parser.get_error(error))
        engine = builder.build_cuda_engine(network)

        # create buffer
        for binding in engine:
            size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
            host_mem = cuda.pagelocked_empty(shape=[size],dtype=np.float32)
            cuda_mem = cuda.mem_alloc(host_mem.nbytes)

            bindings.append(int(cuda_mem))
            if engine.binding_is_input(binding):
                host_inputs.append(host_mem)
                cuda_inputs.append(cuda_mem)
            else:
                host_outputs.append(host_mem)
                cuda_outputs.append(cuda_mem)

        return engine


if __name__ == "__main__":
    engine = PrepareEngine()
    Inference(engine)

I am getting error in pre-process stage.

Below is my stack error.

TensorRT] WARNING: onnx2trt_utils.cpp:220: Your ONNX model has been generated with INT64 weights, while TensorRT does not natively support INT64. Attempting to cast down to INT32.
[TensorRT] WARNING: onnx2trt_utils.cpp:246: One or more weights outside the range of INT32 was clamped
[TensorRT] WARNING: onnx2trt_utils.cpp:246: One or more weights outside the range of INT32 was clamped
[TensorRT] WARNING: onnx2trt_utils.cpp:246: One or more weights outside the range of INT32 was clamped
[TensorRT] INFO: Detected 1 inputs and 4 output network tensors.
Traceback (most recent call last):
File “main.py”, line 67, in
Inference(engine)
File “main.py”, line 23, in Inference
np.copyto(host_inputs[0], img_data)
File “<array_function internals>”, line 6, in copyto
ValueError: could not broadcast input array from shape (224,224,3) into shape (270000)

Can someone help me how to resolve the issue?

I am moving the topic under Inference - so it can get better attention.

Hi @thasin,

This looks like Jetson related. We recommend you to please post your concern Jetson related forum to get better help.

Thank you.

Refer to this file “/usr/src/tensorrt/sample/python/end_to_end_tensorflow_mnist/sample.py" line 89。
File “main.py”,Replace line 23 with the following。

from PIL import Image
test_case_path = "/usr/src/tensorrt/data/resnet50/airliner.ppm"
image = np.array(Image.open(test_case_path)).ravel()
np.copyto(host_inputs[0], 1.0 - image / 255.0)