TensorRT C++

Hi! I try to run tensorrt inference pipeline in c++
This is my code:

#include <iostream>
#include <fstream>

#include <opencv2/opencv.hpp>
#include <cuda_runtime_api.h>
#include <NvInfer.h>
#include <NvOnnxParser.h>

class Logger : public nvinfer1::ILogger {
public:
    void log(Severity severity, const char* msg) noexcept override {
        if ((severity == Severity::kWARNING) || (severity == Severity::kINTERNAL_ERROR)) {
            std::cout << msg << std::endl;
        }
    }
} logger;

int main() {
    std::string onnx_path = "./yolov5.onnx";
    std::string engine_path = "trt.engine";

    nvinfer1::ICudaEngine* engine;
    nvonnxparser::IParser* parser;
    nvinfer1::IBuilderConfig* config;
    nvinfer1::INetworkDefinition* network;
    nvinfer1::IBuilder* builder;
    nvinfer1::IRuntime* infer;

    // Building engine
    builder = nvinfer1::createInferBuilder(logger);
    builder->setMaxBatchSize(4);

    network = builder->createNetworkV2(1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));

    parser = nvonnxparser::createParser(*network, logger);
    if (!(parser->parseFromFile(onnx_path.c_str(), static_cast<uint32_t>(nvinfer1::ILogger::Severity::kWARNING)))) {
        //we use parseFromFile instead of parse, since it has better error logging
        for (int i = 0; i < parser->getNbErrors(); i++) {
            std::cout << parser->getError(i)->desc() << std::endl;
        }
        throw std::runtime_error("Could not parse onnx model from file");
    };
    config = builder->createBuilderConfig();

    config->setMaxWorkspaceSize(1<<25);
    config->setFlag(nvinfer1::BuilderFlag::kFP16);

    nvinfer1::IHostMemory* serialized_engine = builder->buildSerializedNetwork(*network, *config);
}

This is my cmakelists file:

cmake_minimum_required(VERSION 3.10)
project(Test)

set(CMAKE_CXX_STANDARD 17)
set(CUDA_TOOLKIT_ROOT_DIR /usr/local/cuda)

include_directories(/usr/include/opencv4/)
include_directories(/usr/lib/aarch64-linux-gnu)

find_package(OpenCV REQUIRED)
find_package(CUDA REQUIRED)

link_directories(/usr/lib/aarch64-linux-gnu/tegra)

add_executable(Test main.cpp)

target_include_directories(Test PUBLIC ${OpenCV_INCLUDE_DIRS} ${CUDA_INCLUDE_DIRS})
target_link_libraries(Test PUBLIC ${OpenCV_LIBS} ${CUDA_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${NVONNXPARSER_LIBRARY})

And when I run I have this errors:

CMakeFiles/Test.dir/main.cpp.o: In function `main':
main.cpp:(.text.startup+0x88): undefined reference to `createInferBuilder_INTERNAL'
collect2: error: ld returned 1 exit status
CMakeFiles/Test.dir/build.make:164: recipe for target 'Test' failed
make[3]: *** [BerlinTest] Error 1
CMakeFiles/Makefile2:67: recipe for target 'CMakeFiles/Test.dir/all' failed
make[2]: *** [CMakeFiles/Test.dir/all] Error 2
CMakeFiles/Makefile2:79: recipe for target 'CMakeFiles/Test.dir/rule' failed
make[1]: *** [CMakeFiles/Test.dir/rule] Error 2
Makefile:118: recipe for target 'Test' failed
make: *** [Test] Error 2

Search for the undefined reference:

createInferBuilder_INTERNAL is defined in libnvinfer.so

According to undefined reference to symbol 'createInferBuilder_INTERNAL' - #10 by oren.bell, add ${NVINFER_LIBRARY} to your target_link_libraries

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.