Python run LPRNet with TensorRT

Hi there, I’m trying to run LPRNet with TensorRT in python on Jetson NX. The Jetpack version is 4.4.0. I downloaded the model from ngc and converted the .etlt model to .engine model with the following command:
tlt-converter -k nvidia_tlt -p image_input,1x3x48x96,4x3x48x96,16x3x48x96 us_lprnet_baseline18_deployable.etlt -t fp16 -e lpr_us_onnx_b16.engine

Bellow is the script I used for inferencing:

import os
import time

import cv2
#import matplotlib.pyplot as plt
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
from PIL import Image


class HostDeviceMem(object):
    def __init__(self, host_mem, device_mem):
        self.host = host_mem
        self.device = device_mem

    def __str__(self):
        return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)

    def __repr__(self):
        return self.__str__()


def load_engine(trt_runtime, engine_path):
    with open(engine_path, "rb") as f:
        engine_data = f.read()
    engine = trt_runtime.deserialize_cuda_engine(engine_data)
    return engine

# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
    inputs = []
    outputs = []
    bindings = []
    stream = cuda.Stream()
    for binding in engine:
        size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
        dtype = trt.nptype(engine.get_binding_dtype(binding))
        # Allocate host and device buffers
        host_mem = cuda.pagelocked_empty(size, dtype)
        device_mem = cuda.mem_alloc(host_mem.nbytes)
        # Append the device buffer to device bindings.
        bindings.append(int(device_mem))
        # Append to the appropriate list.
        if engine.binding_is_input(binding):
            inputs.append(HostDeviceMem(host_mem, device_mem))
        else:
            outputs.append(HostDeviceMem(host_mem, device_mem))
    return inputs, outputs, bindings, stream



def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
    # Transfer input data to the GPU.
    [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
    # Run inference.
    context.execute_async(
        batch_size=batch_size, bindings=bindings, stream_handle=stream.handle
    )
    # Transfer predictions back from the GPU.
    [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
    # Synchronize the stream
    stream.synchronize()
    # Return only the host outputs.
    return [out.host for out in outputs]

# TensorRT logger singleton
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_engine_path = "lpr_us_onnx_b16.engine"

trt_runtime = trt.Runtime(TRT_LOGGER)
trt_engine = load_engine(trt_runtime, trt_engine_path)
# Execution context is needed for inference
context = trt_engine.create_execution_context()
# This allocates memory for network inputs/outputs on both CPU and GPU
inputs, outputs, bindings, stream = allocate_buffers(trt_engine)

image = cv2.imread("license_plate_us.png")
image = cv2.resize(image, (96, 48))

np.copyto(inputs[0].host, image.ravel())

outputs = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
print(outputs)

However I got the following error when allocating the buffer:
pycuda._driver.MemoryError: cuMemHostAlloc failed: out of memory

When I looked into that issue, I found out that engine.get_binding_shape(binding) gives me [-1,3,48,96] which is a negative value, I’m not sure if the conversion is wrong or it’s how this model works.

Any ideas why this is happening? Is there any sample code that I can follow? Thanks in advance!

1 Like

Hi @zhao.wang1,

This looks like Jetson related issue, We recommend you to post your query here.

Thank you.

Hello @zhao.wang1,

Did you figure it out? I got an array of number outputs but am struggling to understand how to convert this to a license plate.

Thanks,
Daniel

Nevermind, found out that the array it outputs is the indexes of the letters/numbers (0-34) and a confidence level for each letter/number.