TensorRT C++

Hi! I’m trying to build my app in docker and I have this error:

/usr/bin/ld: warning: libnvmedia_tensor.so, needed by /usr/lib/gcc/aarch64-linux-gnu/9/../../../aarch64-linux-gnu/libnvinfer.so, not found (try using -rpath or -rpath-link)
/usr/bin/ld: warning: libnvmedia_dla.so, needed by /usr/lib/gcc/aarch64-linux-gnu/9/../../../aarch64-linux-gnu/libnvinfer.so, not found (try using -rpath or -rpath-link)
/usr/bin/ld: /usr/lib/gcc/aarch64-linux-gnu/9/../../../aarch64-linux-gnu/libnvinfer.so: undefined reference to `nvdla::IProfile::setUseSoftMaxOptz(bool)'
/usr/bin/ld: /usr/lib/gcc/aarch64-linux-gnu/9/../../../aarch64-linux-gnu/libnvinfer.so: undefined reference to `nvdla::IPoolingLayer::setPoolingPaddingInclusionType(nvdla::PoolingPaddingInclusionType)'
/usr/bin/ld: /usr/lib/gcc/aarch64-linux-gnu/9/../../../aarch64-linux-gnu/libnvinfer.so: undefined reference to `nvdla::INetwork::addSlice(nvdla::ITensor*, nvdla::Weights, nvdla::Weights, nvdla::Weights, nvdla::Weights, nvdla::SliceLayerMode)'
/usr/bin/ld: /usr/lib/gcc/aarch64-linux-gnu/9/../../../aarch64-linux-gnu/libnvinfer.so: undefined reference to `nvdla::IProfile::setCanCompressStructuredSparseWeights(bool)'
/usr/bin/ld: /usr/lib/gcc/aarch64-linux-gnu/9/../../../aarch64-linux-gnu/libnvinfer.so: undefined reference to `nvdla::INetwork::addResize(nvdla::ITensor*, nvdla::ResizeMode, nvdla::Weights)'
/usr/bin/ld: /usr/lib/gcc/aarch64-linux-gnu/9/../../../aarch64-linux-gnu/libnvinfer.so: undefined reference to `nvdla::ISoftMaxLayer::setAxis(int)'
collect2: error: ld returned 1 exit status
make[2]: *** [CMakeFiles/BerlinTest.dir/build.make:183: Test] Error 1
make[1]: *** [CMakeFiles/Makefile2:73: CMakeFiles/Test.dir/all] Error 2
make: *** [Makefile:84: all] Error 2

This is my dockerfile:

FROM nvcr.io/nvidia/l4t-tensorrt:r8.4.1.5-devel

ARG DEBIAN_FRONTEND=noninteractive
ARG OPENCV_VERSION=4.5.0

WORKDIR /root

RUN apt-get update && apt-get install -y build-essential \
    cmake \
    git \
    unzip \
    pkg-config \
    libjpeg-dev \
    libpng-dev \
    libtiff-dev \
    libavcodec-dev \
    libavformat-dev \
    libswscale-dev \
    libgtk2.0-dev \
    libcanberra-gtk* \
    python3-dev \
    python3-numpy \
    python3-pip \
    libxvidcore-dev \
    libx264-dev \
    libgtk-3-dev \
    libtbb2 \
    libtbb-dev \
    libdc1394-22-dev \
    gstreamer1.0-tools \
    libv4l-dev \
    v4l-utils \
    libgstreamer1.0-dev \
    libgstreamer-plugins-base1.0-dev \
    libavresample-dev \
    libvorbis-dev \
    libxine2-dev \
    libfaac-dev \
    libmp3lame-dev \
    libtheora-dev \
    libopencore-amrnb-dev \
    libopencore-amrwb-dev \
    libopenblas-dev \
    libatlas-base-dev libblas-dev \
    liblapack-dev \
    libeigen3-dev \
    gfortran \
    libhdf5-dev \
    protobuf-compiler \
    libprotobuf-dev \
    libgoogle-glog-dev \
    libgflags-dev \
    wget

RUN sh -c "echo '/usr/local/cuda/lib64' >> /etc/ld.so.conf.d/nvidia-tegra.conf" && ldconfig
RUN wget -O opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip
RUN wget -O opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip

RUN unzip opencv.zip && unzip opencv_contrib.zip && \
    mv opencv-4.5.0 opencv && mv opencv_contrib-4.5.0 opencv_contrib && \
    rm -rf opencv.zip && rm -rf opencv_contrib.zip

RUN mkdir -p opencv/build && cd opencv/build && \
    cmake -D CMAKE_BUILD_TYPE=RELEASE \
    -D CMAKE_INSTALL_PREFIX=/usr \
    -D OPENCV_EXTRA_MODULES_PATH=~/opencv_contrib/modules \
    -D EIGEN_INCLUDE_PATH=/usr/include/eigen3 \
    -D WITH_OPENCL=OFF \
    -D WITH_CUDA=ON \
    -D CUDA_ARCH_BIN=5.3 \
    -D CUDA_ARCH_PTX="" \
    -D WITH_CUDNN=ON \
    -D WITH_CUBLAS=ON \
    -D ENABLE_FAST_MATH=ON \
    -D CUDA_FAST_MATH=ON \
    -D OPENCV_DNN_CUDA=ON \
    -D ENABLE_NEON=ON \
    -D WITH_QT=OFF \
    -D WITH_OPENMP=ON \
    -D BUILD_TIFF=ON \
    -D WITH_FFMPEG=ON \
    -D WITH_GSTREAMER=ON \
    -D WITH_TBB=ON \
    -D BUILD_TBB=ON \
    -D BUILD_TESTS=OFF \
    -D WITH_EIGEN=ON \
    -D WITH_V4L=ON \
    -D WITH_LIBV4L=ON \
    -D OPENCV_ENABLE_NONFREE=ON \
    -D INSTALL_C_EXAMPLES=OFF \
    -D INSTALL_PYTHON_EXAMPLES=OFF \
    -D BUILD_NEW_PYTHON_SUPPORT=ON \
    -D BUILD_opencv_python3=TRUE \
    -D OPENCV_GENERATE_PKGCONFIG=ON \
    -D BUILD_EXAMPLES=OFF .. && \
    make -j1 && make install

WORKDIR Test/

RUN apt-get install -y libmicrohttpd-dev

ADD . .

RUN mkdir build && cd build && cmake .. && make -j1

EXPOSE 3000:3000

CMD ["./build"]

This is my cmake file:

cmake_minimum_required(VERSION 3.10)
project(Test)

set(CMAKE_CXX_STANDARD 17)
set(CUDA_TOOLKIT_ROOT_DIR /usr/local/cuda)

include_directories(/usr/include/opencv4)
include_directories(/usr/lib/aarch64-linux-gnu)

find_package(OpenCV REQUIRED)
find_package(CUDA REQUIRED)

add_executable(Test main.cpp infer/logger.cpp infer/TRTInference.cpp)

target_include_directories(Test PUBLIC ${OpenCV_INCLUDE_DIRS} ${CUDA_INCLUDE_DIRS})
target_link_libraries(Test ${OpenCV_LIBS} ${CUDA_LIBRARIES})
target_link_libraries(Test nvinfer nvonnxparser microhttpd)

I’ve already set default runtime to Nvidia

Hi,

TensorRT 8.4 is available for JetPack 5 users.
However, JetPack 5 doesn’t support Jetson Nano.

Could you try nvcr.io/nvidia/l4t-tensorrt:r8.2.1-runtime instead?
Please noticed that this is a runtime version container, you will need to manually install the header via apt.

Thanks.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.