Python run LPRNet with TensorRT show pycuda._driver.MemoryError: cuMemHostAlloc failed: out of memory

Hi, I am deploying LPR model downloaded from NGC in python. But it usually shows

root@8b2a8f399e34:/workspace/lpr# python3 trt_new.py
Traceback (most recent call last):
  File "trt_new.py", line 83, in <module>
    inputs, outputs, bindings, stream = allocate_buffers(trt_engine)
  File "trt_new.py", line 43, in allocate_buffers
    host_mem = cuda.pagelocked_empty(size, dtype)
pycuda._driver.MemoryError: cuMemHostAlloc failed: out of memory

I checked and found the code stops at the engine.size which is negative.

> /workspace/lpr/trt_new.py(41)allocate_buffers()
-> dtype = trt.nptype(engine.get_binding_dtype(binding))
(Pdb) p size
-13824
(Pdb) n
> /workspace/lpr/trt_new.py(43)allocate_buffers()
-> host_mem = cuda.pagelocked_empty(size, dtype)
(Pdb) 
pycuda._driver.MemoryError: cuMemHostAlloc failed: out of memory
> /workspace/lpr/trt_new.py(43)allocate_buffers()
-> host_mem = cuda.pagelocked_empty(size, dtype)
(Pdb) 
--Return--
> /workspace/lpr/trt_new.py(43)allocate_buffers()->None

My python code is below:

import os
import time

import cv2
#import matplotlib.pyplot as plt
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
from PIL import Image
import pdb


class HostDeviceMem(object):
    def __init__(self, host_mem, device_mem):
        self.host = host_mem
        self.device = device_mem

    def __str__(self):
        return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)

    def __repr__(self):
        return self.__str__()


def load_engine(trt_runtime, engine_path):
    with open(engine_path, "rb") as f:
        engine_data = f.read()
    engine = trt_runtime.deserialize_cuda_engine(engine_data)
    return engine

# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine, batch_size=1):
    inputs = []
    outputs = []
    bindings = []
    stream = cuda.Stream()
    for binding in engine:
        # pdb.set_trace()
        size = trt.volume(engine.get_binding_shape(binding)) * batch_size
        dtype = trt.nptype(engine.get_binding_dtype(binding))
        # Allocate host and device buffers
        host_mem = cuda.pagelocked_empty(size, dtype)
        device_mem = cuda.mem_alloc(host_mem.nbytes)
        # Append the device buffer to device bindings.
        bindings.append(int(device_mem))
        # Append to the appropriate list.
        if engine.binding_is_input(binding):
            inputs.append(HostDeviceMem(host_mem, device_mem))
            print(f"input: shape:{engine.get_binding_shape(binding)} dtype:{engine.get_binding_dtype(binding)}")
        else:
            outputs.append(HostDeviceMem(host_mem, device_mem))
            print(f"output: shape:{engine.get_binding_shape(binding)} dtype:{engine.get_binding_dtype(binding)}")
    return inputs, outputs, bindings, stream



def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
    # Transfer input data to the GPU.
    [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
    # Run inference.
    context.execute_async(
        batch_size=batch_size, bindings=bindings, stream_handle=stream.handle
    )
    # Transfer predictions back from the GPU.
    [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
    # Synchronize the stream
    stream.synchronize()
    # Return only the host outputs.
    return [out.host for out in outputs]

# TensorRT logger singleton
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_engine_path = "lpr_us_onnx_int8.trt"

trt_runtime = trt.Runtime(TRT_LOGGER)
# pdb.set_trace()
trt_engine = load_engine(trt_runtime, trt_engine_path)
# Execution context is needed for inference
context = trt_engine.create_execution_context()
# This allocates memory for network inputs/outputs on both CPU and GPU
inputs, outputs, bindings, stream = allocate_buffers(trt_engine)

# pdb.set_trace()
image = cv2.imread("./car1.jpg")
image = cv2.resize(image, (96, 48))

np.copyto(inputs[0].host, image.ravel())

outputs = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
print(outputs)

The trt file is ok. I can do inference with lprnet command rightly. I think my problem is the same with Python run LPRNet with TensorRT

One reference link for LPRnet inference. See GitHub - NVIDIA-AI-IOT/deepstream_lpr_app: Sample app code for LPR deployment on DeepStream according to LPRNet — Transfer Learning Toolkit 3.0 documentation

You can have a look although it is not python code.

Hi, have you solved the error? I have the same problem.

Hi distantyy,

Please help to open a new topic. Thanks

@3204657659 @distantyy
The lprnet trt engine has dynamic shape.
Please change below

  • batch-size
  • pre/post processing

For reference,
change

def allocate_buffers(engine, batch_size=1)

to

def allocate_buffers(engine, batch_size=-1)

Add below two lines

input_shape = (1,3,48,96)
context.set_binding_shape(0, input_shape)

before
outputs = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)

In preprocessing, after resizing, please divide it by 255.0 , and then transpose image batch from NHWC to NCHW

2 Likes