context_->setDeviceMemory(); segment fault

Description

i ran my application in docker with sudo docker run --runtim nvidia … ,but it get segment fault at context_->setDeviceMemory . here is my code:
`
bool TensorRT::InitFromEngine(std::string &model_data) {

runtime_ = nvinfer1::createInferRuntime(*ilogger_ptr_);

engine_ = runtime_->deserializeCudaEngine(model_data.data(),

                                          model_data.size(), nullptr);

// context_ = engine_->createExecutionContext();

context_ = engine_->createExecutionContextWithoutDeviceMemory();

ASSERT_TRUE_AND_RETURN(engine_ != nullptr);

return true;

}

bool TensorRT::InitFromModel(std::string &model_data, const BuildConfig &build_config) {

nvinfer1::IBuilder *builder = nvinfer1::createInferBuilder(*ilogger_ptr_);

const auto explicit_batch = 1U << static_cast<uint32_t>(

                                nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);

nvinfer1::INetworkDefinition *network =

    builder->createNetworkV2(explicit_batch);

nvonnxparser::IParser *parser = nvonnxparser::createParser(*network, *ilogger_ptr_);

parser->parse(model_data.data(), model_data.size());

for (int i = 0; i < parser->getNbErrors(); ++i) {

    ilogger_ptr_->log(nvinfer1::ILogger::Severity::kERROR,

                      parser->getError(i)->desc());

}

SetExtraNetworkOptions(network, build_config._input_infos,

                       build_config._output_infos);

builder->setMaxBatchSize(build_config._max_batchsize);

nvinfer1::IBuilderConfig *config = builder->createBuilderConfig();

config->setMaxWorkspaceSize(1 << 20);

SetExtraConfigOptions(config, builder, build_config._builder_flags,

                      build_config._profile_infos);

engine_ = builder->buildEngineWithConfig(*network, *config);

ASSERT_TRUE_AND_RETURN(engine_ != nullptr);

config->destroy();

network->destroy();

builder->destroy();

// context_ = engine_->createExecutionContext();

context_ = engine_->createExecutionContextWithoutDeviceMemory();

ASSERT_TRUE_AND_RETURN(context_ != nullptr);

return true;

}

int TensorRT::Run(std::vector<void *> &devices,

              const std::vector<std::vector<size_t>> &input_shapes,

              const int batch_size) {

for (size_t i = 0; i < input_shapes.size(); ++i) {

    nvinfer1::Dims dim;

    VectorToNvDims(input_shapes[i], dim);

    context_->setBindingDimensions(static_cast<int>(i), dim);

}

cudaMalloc(&data, engine_->getDeviceMemorySize());

context_->setDeviceMemory(data);

bool status = context_->executeV2(devices.data());

if (!status) {

    throw std::runtime_error("failed to execute tensorrt");

}

return 0;

}
`

Environment

TensorRT Version: 7.2.2
GPU Type: tesla v100
Nvidia Driver Version: 495
CUDA Version: 11.1
CUDNN Version: 8
Operating System + Version: ubuntu18.04
base Docker image: nvidia/cuda:11.1.1-cudnn8-devel-ubuntu18.04

question

can anyone help me to solve this problem? if my code is correct, did i miss some config?

Hi,

Could you please try on latest TRT version container. If you still face this issue, please share us complete verbose logs for better debugging.

Also, please refer following samples.

Thank you.

thanks for reply,i have found the problem: i donot use the log correct。however, when i change a model with dynamic shape,i get some error: when the program first run context_->setDeviceMemory (infer the first picture),it works well, but when it run context_->setDeviceMemory again( infer the second picture) i got this error:
image

do you know why this happened? (i use trtexec to profile my model, it works well)

Hi,

Could you please share us issue repro model/script and steps to try from our end for better debugging.

Thank you.