Sorry I already got stuck for this problem for a long time. I have no idea on it.
I need your help.
My situation is that I downloaded the NVidia/TensorRT on github, and then I followed the steps to install.
Currently I can pass the cmake step.
This is what I typed the command below.
cmake .. -DCMAKE_CUDA_COMPILER=/usr/local/cuda-10.2/bin/nvcc -DTRT_LIB_DIR=/home/chieh/Downloads/TensorRT-7.0.0.11/lib -DTRT_BIN_DIR=`pwd`/out
Its output is
Building for TensorRT version: 7.0.0.1, library version: 7.0.0
-- The CXX compiler identification is GNU 7.4.0
-- The CUDA compiler identification is NVIDIA 10.2.89
-- Check for working CXX compiler: /usr/bin/g++
-- Check for working CXX compiler: /usr/bin/g++ -- works
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Check for working CUDA compiler: /usr/local/cuda-10.2/bin/nvcc
-- Check for working CUDA compiler: /usr/local/cuda-10.2/bin/nvcc -- works
-- Detecting CUDA compiler ABI info
-- Detecting CUDA compiler ABI info - done
-- Targeting TRT Platform: x86_64
-- GPU_ARCHS is not defined. Generating CUDA code for default SMs: 35;53;61;70;75
-- CUDA version set to 10.2
-- cuDNN version set to 7.6
-- Protobuf version set to 3.0.0
-- Looking for C++ include pthread.h
-- Looking for C++ include pthread.h - found
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Failed
-- Looking for pthread_create in pthreads
-- Looking for pthread_create in pthreads - not found
-- Looking for pthread_create in pthread
-- Looking for pthread_create in pthread - found
-- Found Threads: TRUE
-- Found PkgConfig: /usr/bin/pkg-config (found version "0.29.1")
-- Checking for one of the modules 'zlib'
-- Found CUDA: /usr/local/cuda-10.2 (found suitable version "10.2", minimum required is "10.2")
-- Using libprotobuf /home/chieh/github/TensorRT_fourth/build/third_party.protobuf/lib/libprotobuf.a
-- ========================= Importing and creating target nvinfer ==========================
-- Looking for library nvinfer
-- Library that was found /home/chieh/Downloads/TensorRT-7.0.0.11/lib/libnvinfer.so
-- ==========================================================================================
-- ========================= Importing and creating target nvuffparser ==========================
-- Looking for library nvparsers
-- Library that was found /home/chieh/Downloads/TensorRT-7.0.0.11/lib/libnvparsers.so
-- ==========================================================================================
-- Protobuf proto/trtcaffe.proto -> proto/trtcaffe.pb.cc proto/trtcaffe.pb.h
-- /home/chieh/github/TensorRT_fourth/build/parsers/caffe
-- The C compiler identification is GNU 7.4.0
-- Check for working C compiler: /usr/bin/cc
-- Check for working C compiler: /usr/bin/cc -- works
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Detecting C compile features
-- Detecting C compile features - done
-- Build type not set - defaulting to Release
Generated: /home/chieh/github/TensorRT_fourth/build/parsers/onnx/third_party/onnx/onnx/onnx_onnx2trt_onnx-ml.proto
Generated: /home/chieh/github/TensorRT_fourth/build/parsers/onnx/third_party/onnx/onnx/onnx-operators_onnx2trt_onnx-ml.proto
--
-- ******** Summary ********
-- CMake version : 3.16.2
-- CMake command : /usr/local/bin/cmake
-- System : Linux
-- C++ compiler : /usr/bin/g++
-- C++ compiler version : 7.4.0
-- CXX flags : -Wno-deprecated-declarations -DBUILD_SYSTEM=cmake_oss -Wall -Wno-deprecated-declarations -Wno-unused-function -Wnon-virtual-dtor
-- Build type : Release
-- Compile definitions : _PROTOBUF_INSTALL_DIR=/home/chieh/github/TensorRT_fourth/build;ONNX_NAMESPACE=onnx2trt_onnx
-- CMAKE_PREFIX_PATH :
-- CMAKE_INSTALL_PREFIX : /home/chieh/Downloads/TensorRT-7.0.0.11/lib/..
-- CMAKE_MODULE_PATH :
--
-- ONNX version : 1.6.0
-- ONNX NAMESPACE : onnx2trt_onnx
-- ONNX_BUILD_TESTS : OFF
-- ONNX_BUILD_BENCHMARKS : OFF
-- ONNX_USE_LITE_PROTO : OFF
-- ONNXIFI_DUMMY_BACKEND : OFF
-- ONNXIFI_ENABLE_EXT : OFF
--
-- Protobuf compiler :
-- Protobuf includes :
-- Protobuf libraries :
-- BUILD_ONNX_PYTHON : OFF
-- Found TensorRT headers at /home/chieh/github/TensorRT_fourth/include
-- Find TensorRT libs at /home/chieh/Downloads/TensorRT-7.0.0.11/lib/libnvinfer.so;/home/chieh/Downloads/TensorRT-7.0.0.11/lib/libnvinfer_plugin.so;/home/chieh/Downloads/TensorRT-7.0.0.11/lib/libmyelin.so
-- Found TENSORRT: /home/chieh/github/TensorRT_fourth/include
-- Adding new sample: sample_char_rnn
-- - Parsers Used: none
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_dynamic_reshape
-- - Parsers Used: onnx
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_fasterRCNN
-- - Parsers Used: caffe
-- - InferPlugin Used: ON
-- - Licensing: opensource
-- Adding new sample: sample_googlenet
-- - Parsers Used: caffe
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_int8
-- - Parsers Used: caffe
-- - InferPlugin Used: ON
-- - Licensing: opensource
-- Adding new sample: sample_int8_api
-- - Parsers Used: onnx
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_mlp
-- - Parsers Used: caffe
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_mnist
-- - Parsers Used: caffe
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_mnist_api
-- - Parsers Used: caffe
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_movielens
-- - Parsers Used: uff
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_movielens_mps
-- - Parsers Used: uff
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_nmt
-- - Parsers Used: none
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_onnx_mnist
-- - Parsers Used: onnx
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_plugin
-- - Parsers Used: caffe
-- - InferPlugin Used: ON
-- - Licensing: opensource
-- Adding new sample: sample_reformat_free_io
-- - Parsers Used: caffe
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_ssd
-- - Parsers Used: caffe
-- - InferPlugin Used: ON
-- - Licensing: opensource
-- Adding new sample: sample_uff_fasterRCNN
-- - Parsers Used: uff
-- - InferPlugin Used: ON
-- - Licensing: opensource
-- Adding new sample: sample_uff_maskRCNN
-- - Parsers Used: uff
-- - InferPlugin Used: ON
-- - Licensing: opensource
-- Adding new sample: sample_uff_mnist
-- - Parsers Used: uff
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_uff_plugin_v2_ext
-- - Parsers Used: uff
-- - InferPlugin Used: OFF
-- - Licensing: opensource
-- Adding new sample: sample_uff_ssd
-- - Parsers Used: uff
-- - InferPlugin Used: ON
-- - Licensing: opensource
-- Adding new sample: trtexec
-- - Parsers Used: caffe;uff;onnx
-- - InferPlugin Used: ON
-- - Licensing: opensource
-- Configuring done
-- Generating done
-- Build files have been written to: /home/chieh/github/TensorRT_fourth/build
I also followed the instruction from https://devtalk.nvidia.com/default/topic/1064407/transfer-learning-toolkit/how-to-export-model-using-tlt-converter-for-jetson-nano/post/5393964/#5393964.
$ vim CMakeFiles/third_party.protobuf.dir/build.make
comment out line90
90 #cd /home/nvidia/trt-oss/TensorRT/build/third_party.protobuf/src && /usr/local/bin/cmake -P /home/nvidia/trt-oss/TensorRT/build/third_party.protobuf/src/third_party.protobuf-stamp/download-third_party.protobuf.cmake
Subsequently, I typed
make -j$(nproc)
Then I got the error.
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9534:87: note: suggested alternative: ‘cudaError_t’
extern __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphNode_t *hErrorNode_out, enum cudaGraphExecUpdateResult *updateResult_out);
^~~~~~~~~~~
cudaError_t
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9534:107: error: ‘cudaGraphNode_t’ was not declared in this scope
extern __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphNode_t *hErrorNode_out, enum cudaGraphExecUpdateResult *updateResult_out);
^~~~~~~~~~~~~~~
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9534:107: note: suggested alternative: ‘cudaGraphGetNodes’
extern __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphNode_t *hErrorNode_out, enum cudaGraphExecUpdateResult *updateResult_out);
^~~~~~~~~~~~~~~
cudaGraphGetNodes
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9534:124: error: ‘hErrorNode_out’ was not declared in this scope
extern __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphNode_t *hErrorNode_out, enum cudaGraphExecUpdateResult *updateResult_out);
^~~~~~~~~~~~~~
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9534:140: error: expected primary-expression before ‘enum’
extern __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphNode_t *hErrorNode_out, enum cudaGraphExecUpdateResult *updateResult_out);
^~~~
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9534:188: error: expression list treated as compound expression in initializer [-fpermissive]
extern __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphNode_t *hErrorNode_out, enum cudaGraphExecUpdateResult *updateResult_out);
^
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9559:55: error: ‘cudaGraphExec_t’ was not declared in this scope
extern __host__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream);
^~~~~~~~~~~~~~~
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9559:55: note: suggested alternative: ‘cudaGraphExecUpdate’
extern __host__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream);
^~~~~~~~~~~~~~~
cudaGraphExecUpdate
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9559:95: error: expected primary-expression before ‘stream’
extern __host__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream);
^~~~~~
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9559:101: error: expression list treated as compound expression in initializer [-fpermissive]
extern __host__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream);
^
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9580:60: error: ‘cudaGraphExec_t’ was not declared in this scope
extern __host__ cudaError_t CUDARTAPI cudaGraphExecDestroy(cudaGraphExec_t graphExec);
^~~~~~~~~~~~~~~
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9580:60: note: suggested alternative: ‘cudaGraphExecUpdate’
extern __host__ cudaError_t CUDARTAPI cudaGraphExecDestroy(cudaGraphExec_t graphExec);
^~~~~~~~~~~~~~~
cudaGraphExecUpdate
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9600:56: error: ‘cudaGraph_t’ was not declared in this scope
extern __host__ cudaError_t CUDARTAPI cudaGraphDestroy(cudaGraph_t graph);
^~~~~~~~~~~
/usr/local/cuda-10.2/include/cuda_runtime_api.h:9600:56: note: suggested alternative: ‘cudaError_t’
extern __host__ cudaError_t CUDARTAPI cudaGraphDestroy(cudaGraph_t graph);
^~~~~~~~~~~
cudaError_t
plugin/CMakeFiles/nvinfer_plugin_static.dir/build.make:205: recipe for target 'plugin/CMakeFiles/nvinfer_plugin_static.dir/proposalPlugin/proposalPlugin.cpp.o' failed
make[2]: *** [plugin/CMakeFiles/nvinfer_plugin_static.dir/proposalPlugin/proposalPlugin.cpp.o] Error 1
CMakeFiles/Makefile2:1356: recipe for target 'plugin/CMakeFiles/nvinfer_plugin_static.dir/all' failed
make[1]: *** [plugin/CMakeFiles/nvinfer_plugin_static.dir/all] Error 2
Makefile:151: recipe for target 'all' failed
make: *** [all] Error 2
Another information what I typed.
$whereis libnvinfer_plugin.so
libnvinfer_plugin: /usr/lib/x86_64-linux-gnu/libnvinfer_plugin.so
My environment information:
- Ubuntu 18.04
- Cuda 10.2
- cmake version 3.16.2
- TensorRT 7
- gcc version 7.4.0
Is there any suggestion for solving this problem?
Thank you!!