next error after fix error in https://devtalk.nvidia.com/default/topic/1069559/tensorrt/check-allinputdimensionsspecified-for-second-profile-fail/post/5419783/#5419783
error:
[E] [TRT] Parameter check failed at: engine.cpp::enqueueV2::466, condition: bindings[x] != nullptr
My code:
#include "NvInfer.h"
#include <iostream>
#include "NvUtils.h"
#include "NvOnnxParser.h"
using namespace nvinfer1;
#include "common/logger.h"
#include "common/buffers.h"
std::string model_path = "detection_model.onnx";
void convert_dims_to_vect(const nvinfer1::Dims& dims, std::vector<int>& v){
v.resize(dims.nbDims);
for (int i=0; i<dims.nbDims; ++i)
v[i] = dims.d[i];
}
void make_explicit_shapes(IExecutionContext* context,const std::vector<std::string>& tensorNames, std::vector<std::vector<int>>& explicit_shapes){
int n = tensorNames.size();
explicit_shapes.resize(n);
std::string suffix;
int profile_index = context->getOptimizationProfile();
if (profile_index!=0)
suffix = " [profile "+std::to_string(profile_index)+"]";
std::vector<int> v;
for (int i=0; i<n; ++i){
int index = context->getEngine().getBindingIndex((tensorNames[i]+suffix).c_str());
convert_dims_to_vect(context->getBindingDimensions(index), v);
explicit_shapes[i] = v;
}
}
int main(int argc, char** argv) {
auto builder = createInferBuilder(gLogger);
auto config = builder->createBuilderConfig();
Dims4 dims1(1,10,10,1);
Dims4 dims2(1,100,100,1);
Dims4 dims3(1,200,200,1);
std::string input_name = "fts_input_images:0";
for (int i=0; i<2; ++i){
auto profile = builder->createOptimizationProfile();
profile->setDimensions(input_name.c_str(), OptProfileSelector::kMIN, dims1);
profile->setDimensions(input_name.c_str(), OptProfileSelector::kOPT, dims2);
profile->setDimensions(input_name.c_str(), OptProfileSelector::kMAX, dims3);
config->addOptimizationProfile(profile);
}
auto network = builder->createNetworkV2(1U << static_cast<int>(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));
auto parser = nvonnxparser::createParser(*network, gLogger);
parser->parseFromFile(model_path.c_str(), 3);
auto engine = builder->buildEngineWithConfig(*network,*config);
std::vector<std::string> tensorNames;
for (int i=0; i<engine->getNbBindings(); ++i){
std::string name(engine->getBindingName(i));
if (name.find("[profile")==-1){
tensorNames.emplace_back(name);
}
}
std::vector<IExecutionContext*> contexts;
for (int i=0; i<2; ++i){
contexts.emplace_back(engine->createExecutionContext());
auto context = contexts.back();
context->setOptimizationProfile(i);
// std::cout<<"allInputDimensionsSpecified: "<<context->allInputDimensionsSpecified()<<"\n";
int index;
if (i==0)
index = engine->getBindingIndex((input_name).c_str());
else
index = engine->getBindingIndex((input_name+" [profile "+std::to_string(i)+"]").c_str());
context->setBindingDimensions(index, dims2);
// std::cout<<"allInputDimensionsSpecified must equal 1: "<<context->allInputDimensionsSpecified()<<"\n";
std::vector<std::vector<int>> explicit_shapes;
make_explicit_shapes(context, tensorNames, explicit_shapes);
std::vector<samplesCommon::DeviceBuffer> deviceBuffers;
std::vector<samplesCommon::HostBuffer> hostBuffers;
for (int i=0; i<tensorNames.size(); ++i){
size_t allocationSize = std::accumulate(explicit_shapes[i].begin(), explicit_shapes[i].end(), 1, std::multiplies<int>()) * 4;
hostBuffers.emplace_back(allocationSize);
// std::cout<<"allocationSize: "<<allocationSize<<"\n";
deviceBuffers.emplace_back(allocationSize);
}
std::vector<void*> mDeviceBindings;
for (auto& buffer:(deviceBuffers)){
// std::cout<<buffer.data()<<" buffer\n";
mDeviceBindings.emplace_back(buffer.data());
}
cudaStream_t stream;
CHECK(cudaStreamCreate(&stream));
if (!context->enqueueV2(mDeviceBindings.data(), stream, nullptr)){
std::cout<<"error when run graph TensorRT\n";
}
cudaStreamSynchronize(stream);
}
}
My model: https://1drv.ms/u/s!AhFk3ICqlZI2irgOxoSOSIY80QLWHA?e=5idBBf
How to fix? I use tensorrt7.