Jetson Xavier NX
4.5.1 JetPack
I cloned inference v1.0 repo from GitHub and follow the instruction to install the plugin, loadgen and harness with make command line. Error found on “make build_plugins”.
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/NMSOptPlugin/src/softmaxScore.cpp: In function ‘ssdStatus_t nvinfer1::plugin::softmaxScore(cudaStream_t, int, int, int, int, DType_t, const void*, void*, cudnnHandle_t, cudnnTensorDescriptor_t, cudnnTensorDescriptor_t)’:
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/NMSOptPlugin/src/softmaxScore.cpp:43:19: warning: variable ‘status’ set but not used [-Wunused-but-set-variable]
cudnnStatus_t status;
^~~~~~
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/NMSOptPlugin/src/nmsPluginOpt.h(99): warning: function “nvinfer1::IPluginV2Ext::configurePlugin(const nvinfer1::Dims *, int, const nvinfer1::Dims *, int, const nvinfer1::DataType *, const nvinfer1::DataType *, const __nv_bool *, const __nv_bool *, nvinfer1::PluginFormat, int)” is hidden by “nvinfer1::plugin::DetectionOutputOptStatic::configurePlugin” – virtual function override intended?
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/NMSOptPlugin/src/nmsPluginOpt.h(115): warning: function “nvinfer1::IPluginV2::getOutputDimensions(int, const nvinfer1::Dims *, int)” is hidden by “nvinfer1::plugin::DetectionOutputOptDynamic::getOutputDimensions” – virtual function override intended?
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/NMSOptPlugin/src/nmsPluginOpt.h(118): warning: function “nvinfer1::IPluginV2Ext::configurePlugin(const nvinfer1::Dims *, int, const nvinfer1::Dims *, int, const nvinfer1::DataType *, const nvinfer1::DataType *, const __nv_bool *, const __nv_bool *, nvinfer1::PluginFormat, int)” is hidden by “nvinfer1::plugin::DetectionOutputOptDynamic::configurePlugin” – virtual function override intended?
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/NMSOptPlugin/src/nmsPluginOpt.h(120): warning: function “nvinfer1::IPluginV2::getWorkspaceSize(int) const” is hidden by “nvinfer1::plugin::DetectionOutputOptDynamic::getWorkspaceSize” – virtual function override intended?
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/NMSOptPlugin/src/nmsPluginOpt.h(122): warning: function “nvinfer1::IPluginV2::enqueue(int, const void *const *, void **, void *, cudaStream_t)” is hidden by “nvinfer1::plugin::DetectionOutputOptDynamic::enqueue” – virtual function override intended?
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/NMSOptPlugin/src/allClassNMSOpt.cu(139): warning: variable “result_active_count” was set but never used
detected during instantiation of “ssdStatus_t nvinfer1::plugin::allClassNMSOpt_gpu<T_SCORE,T_BBOX>(cudaStream_t, int, int, int, int, float, __nv_bool, __nv_bool, void *, void *, void *, void *, void *, void *, void *, __nv_bool) [with T_SCORE=float, T_BBOX=float]”
(409): here
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/NMSOptPlugin/src/nmsPluginOpt.h(99): warning: function “nvinfer1::IPluginV2Ext::configurePlugin(const nvinfer1::Dims *, int, const nvinfer1::Dims *, int, const nvinfer1::DataType *, const nvinfer1::DataType *, const __nv_bool *, const __nv_bool *, nvinfer1::PluginFormat, int)” is hidden by “nvinfer1::plugin::DetectionOutputOptStatic::configurePlugin” – virtual function override intended?
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/NMSOptPlugin/src/nmsPluginOpt.h(115): warning: function “nvinfer1::IPluginV2::getOutputDimensions(int, const nvinfer1::Dims *, int)” is hidden by “nvinfer1::plugin::DetectionOutputOptDynamic::getOutputDimensions” – virtual function override intended?
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/NMSOptPlugin/src/nmsPluginOpt.h(118): warning: function “nvinfer1::IPluginV2Ext::configurePlugin(const nvinfer1::Dims *, int, const nvinfer1::Dims *, int, const nvinfer1::DataType *, const nvinfer1::DataType *, const __nv_bool *, const __nv_bool *, nvinfer1::PluginFormat, int)” is hidden by “nvinfer1::plugin::DetectionOutputOptDynamic::configurePlugin” – virtual function override intended?
…
…
…
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/RNNTOptPlugin/src/decoderPlugin.cu: In member function ‘virtual bool nvinfer1::plugin::RNNTDecoderPlugin::supportsFormatCombination(int, const nvinfer1::PluginTensorDesc*, int, int)’:
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/RNNTOptPlugin/src/decoderPlugin.cu:186:44: warning: ‘kNCHW’ is deprecated [-Wdeprecated-declarations]
if (inOut[pos].format != TensorFormat::kNCHW)
^~~~~
/usr/include/aarch64-linux-gnu/NvInferRuntimeCommon.h:259:1: note: declared here
kNCHW TRT_DEPRECATED_ENUM = kLINEAR, //!< Deprecated name of kLINEAR, provided for backwards compatibility
^ ~
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/RNNTOptPlugin/src/decoderPlugin.cu:186:44: warning: ‘kNCHW’ is deprecated [-Wdeprecated-declarations]
if (inOut[pos].format != TensorFormat::kNCHW)
^~~~~
/usr/include/aarch64-linux-gnu/NvInferRuntimeCommon.h:259:1: note: declared here
kNCHW TRT_DEPRECATED_ENUM = kLINEAR, //!< Deprecated name of kLINEAR, provided for backwards compatibility
^ ~
…
…
…
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/instanceNormalization3DPlugin/src/instanceNormalization3DPlugin.cu(253): error: enum “nvinfer1::TensorFormat” has no member “kDHWC8”
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/instanceNormalization3DPlugin/src/instanceNormalization3DPlugin.cu(254): error: enum “nvinfer1::TensorFormat” has no member “kCDHW32”
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/instanceNormalization3DPlugin/src/instanceNormalization3DPlugin.cu(362): error: enum “nvinfer1::TensorFormat” has no member “kDHWC8”
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/instanceNormalization3DPlugin/src/instanceNormalization3DPlugin.cu(363): error: enum “nvinfer1::TensorFormat” has no member “kCDHW32”
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/instanceNormalization3DPlugin/src/instanceNormalization3DPlugin.cu(454): error: enum “nvinfer1::TensorFormat” has no member “kDHWC8”
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/plugin/instanceNormalization3DPlugin/src/instanceNormalization3DPlugin.cu(459): error: enum “nvinfer1::TensorFormat” has no member “kCDHW32”
6 errors detected in the compilation of “/tmp/tmpxft_00002399_00000000-8_instanceNormalization3DPlugin.compute_75.cpp1.ii”.
CMakeFiles/instancenorm3dplugin.dir/build.make:82: recipe for target ‘CMakeFiles/instancenorm3dplugin.dir/src/instanceNormalization3DPlugin.cu.o’ failed
make[3]: *** [CMakeFiles/instancenorm3dplugin.dir/src/instanceNormalization3DPlugin.cu.o] Error 1
make[3]: *** Waiting for unfinished jobs…
make[3]: Leaving directory ‘/home/ubuntu/inference_results_v1.0/closed/NVIDIA/build/plugins/instanceNormalization3DPlugin’
CMakeFiles/Makefile2:95: recipe for target ‘CMakeFiles/instancenorm3dplugin.dir/all’ failed
make[2]: *** [CMakeFiles/instancenorm3dplugin.dir/all] Error 2
make[2]: Leaving directory ‘/home/ubuntu/inference_results_v1.0/closed/NVIDIA/build/plugins/instanceNormalization3DPlugin’
Makefile:103: recipe for target ‘all’ failed
make[1]: *** [all] Error 2
make[1]: Leaving directory ‘/home/ubuntu/inference_results_v1.0/closed/NVIDIA/build/plugins/instanceNormalization3DPlugin’
Makefile:464: recipe for target ‘build_plugins’ failed
make: *** [build_plugins] Error 2
Another Error found when “make build_loadgen”
Installing collected packages: mlperf-loadgen
Exception:
Traceback (most recent call last):
File “/usr/lib/python3/dist-packages/pip/basecommand.py”, line 215, in main
status = self.run(options, args)
File “/usr/lib/python3/dist-packages/pip/commands/install.py”, line 360, in run
prefix=options.prefix_path,
File “/usr/lib/python3/dist-packages/pip/req/req_set.py”, line 784, in install
**kwargs
File “/usr/lib/python3/dist-packages/pip/req/req_install.py”, line 851, in install
self.move_wheel_files(self.source_dir, root=root, prefix=prefix)
File “/usr/lib/python3/dist-packages/pip/req/req_install.py”, line 1064, in move_wheel_files
isolated=self.isolated,
File “/usr/lib/python3/dist-packages/pip/wheel.py”, line 247, in move_wheel_files
prefix=prefix,
File “/usr/lib/python3/dist-packages/pip/locations.py”, line 153, in distutils_scheme
i.finalize_options()
File “/usr/lib/python3.6/distutils/command/install.py”, line 274, in finalize_options
raise DistutilsOptionError("can’t combine user with prefix, "
distutils.errors.DistutilsOptionError: can’t combine user with prefix, exec_prefix/home, or install_(plat)base
Makefile:498: recipe for target ‘build_loadgen’ failed
Again, “make buid_harness” errors found
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h: In function ‘int64_t lwis::volume(const nvinfer1::Dims&, const nvinfer1::TensorFormat&, bool)’:
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h:83:38: error: ‘kDHWC8’ is not a member of ‘nvinfer1::TensorFormat’
case nvinfer1::TensorFormat::kDHWC8: spv = 8; channelDim = d_new.nbDims - 4; break;
^~~~~~
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h:86:38: error: ‘kCDHW32’ is not a member of ‘nvinfer1::TensorFormat’
case nvinfer1::TensorFormat::kCDHW32: spv = 32; channelDim = d_new.nbDims - 4; break;
^~~~~~~
In file included from /home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/harness_bert/bert_server.h:34:0,
from /home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/harness_bert/bert_server.cc:17:
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h: In function ‘int64_t lwis::volume(const nvinfer1::Dims&, const nvinfer1::TensorFormat&, bool)’:
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h:83:38: error: ‘kDHWC8’ is not a member of ‘nvinfer1::TensorFormat’
case nvinfer1::TensorFormat::kDHWC8: spv = 8; channelDim = d_new.nbDims - 4; break;
^~~~~~
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h:86:38: error: ‘kCDHW32’ is not a member of ‘nvinfer1::TensorFormat’
case nvinfer1::TensorFormat::kCDHW32: spv = 32; channelDim = d_new.nbDims - 4; break;
^~~~~~~
In file included from /home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/harness_bert/bert_server.h:34:0,
from /home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/harness_bert/bert_core_vs.h:21,
from /home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/harness_bert/bert_core_vs.cc:17:
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h: In function ‘int64_t lwis::volume(const nvinfer1::Dims&, const nvinfer1::TensorFormat&, bool)’:
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h:83:38: error: ‘kDHWC8’ is not a member of ‘nvinfer1::TensorFormat’
case nvinfer1::TensorFormat::kDHWC8: spv = 8; channelDim = d_new.nbDims - 4; break;
^~~~~~
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h:86:38: error: ‘kCDHW32’ is not a member of ‘nvinfer1::TensorFormat’
case nvinfer1::TensorFormat::kCDHW32: spv = 32; channelDim = d_new.nbDims - 4; break;
^~~~~~~
In file included from /home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/harness_bert/bert_server.h:34:0,
from /home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/harness_bert/main_bert.cc:26:
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h: In function ‘int64_t lwis::volume(const nvinfer1::Dims&, const nvinfer1::TensorFormat&, bool)’:
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h:83:38: error: ‘kDHWC8’ is not a member of ‘nvinfer1::TensorFormat’
case nvinfer1::TensorFormat::kDHWC8: spv = 8; channelDim = d_new.nbDims - 4; break;
^~~~~~
/home/ubuntu/inference_results_v1.0/closed/NVIDIA/code/harness/lwis/include/lwis_buffers.h:86:38: error: ‘kCDHW32’ is not a member of ‘nvinfer1::TensorFormat’
case nvinfer1::TensorFormat::kCDHW32: spv = 32; channelDim = d_new.nbDims - 4; break;
^~~~~~~
CMakeFiles/harness_bert.dir/build.make:82: recipe for target ‘CMakeFiles/harness_bert.dir/harness_bert/main_bert.cc.o’ failed
make[3]: *** [CMakeFiles/harness_bert.dir/harness_bert/main_bert.cc.o] Error 1
make[3]: *** Waiting for unfinished jobs…
CMakeFiles/harness_bert.dir/build.make:108: recipe for target ‘CMakeFiles/harness_bert.dir/harness_bert/bert_core_vs.cc.o’ failed
make[3]: *** [CMakeFiles/harness_bert.dir/harness_bert/bert_core_vs.cc.o] Error 1
CMakeFiles/harness_bert.dir/build.make:95: recipe for target ‘CMakeFiles/harness_bert.dir/harness_bert/bert_server.cc.o’