#include "argsParser.h" #include "buffers.h" #include "common.h" #include "logger.h" #include "NvCaffeParser.h" #include "NvInfer.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // utilities ---------------------------------------------------------------------------------------------------------- // class to log errors, warnings, and other information during the build and inference phases // destroy TensorRT objects if something goes wrong struct TRTDestroy { template void operator()(T* obj) const { if (obj) { obj->destroy(); } } }; template using TRTUniquePtr = std::unique_ptr; // calculate size of tensor size_t getSizeByDim(const nvinfer1::Dims& dims) { size_t size = 1; for (size_t i = 0; i < dims.nbDims; ++i) { size *= dims.d[i]; } return size; } template struct TrtDestroyer { void operator()(T* t) { t->destroy(); } }; template using SampleUniquePtr = std::unique_ptr; template SampleUniquePtr makeUnique(T* t) { return SampleUniquePtr{t}; } template using TrtUniquePtr = std::unique_ptr >; // preprocessing stage ------------------------------------------------------------------------------------------------ void preprocessImage(const std::string& image_path, float* gpu_input, const nvinfer1::Dims& dims) { // read input image cv::Mat frame = cv::imread(image_path); if (frame.empty()) { std::cerr << "Input image " << image_path << " load failed\n"; return; } cv::Scalar mean, stddev; cv::meanStdDev(frame, mean, stddev); cv::cuda::GpuMat gpu_frame; // upload image to GPU gpu_frame.upload(frame); std::cout << dims.d[0] << "\t" << dims.d[1] << "\t" << dims.d[2] << "\t" << dims.d[3] << std::endl; auto input_width = dims.d[3]; auto input_height = dims.d[2]; auto channels = dims.d[1]; auto input_size = cv::Size(input_width, input_height); cv::cuda::GpuMat flt_image; gpu_frame.convertTo(flt_image, CV_32FC3, 1.f / 255.f); // to tensor std::vector chw; for (size_t i = 0; i < channels; ++i) { chw.emplace_back(cv::cuda::GpuMat(input_size, CV_32FC1, gpu_input + i * input_width * input_height)); } cv::cuda::split(flt_image, chw); } // post-processing stage ---------------------------------------------------------------------------------------------- void postprocessResults(float* gpu_output, const nvinfer1::Dims& dims, int batch_size, std::string fileName) { // copy results from GPU to CPU std::vector cpu_output(getSizeByDim(dims) * batch_size, 0); cudaMemcpy(cpu_output.data(), gpu_output, cpu_output.size() * sizeof(float), cudaMemcpyDeviceToHost); cv::Mat outImage(512, 512, CV_8UC1); cv::Mat obj1(512, 512, CV_32FC1, &cpu_output[0]); cv::Mat obj2(512, 512, CV_32FC1, &cpu_output[512 * 512]); cv::Mat obj3(512, 512, CV_8UC1, cv::Scalar(0)); for (int ii = 0; ii < 512 * 512; ii++) { if (cpu_output[ii] >= cpu_output[ii+(512*512)]) { obj3.at(ii) = 0; } else { obj3.at(ii) = 255; } } cv::imwrite(fileName, obj3); } SampleUniquePtr getEngine(const std::string& engine, int DLACore, std::ostream& err) { std::ifstream engineFile(engine, std::ios::binary); if (!engineFile) { err << "Error opening engine file: " << engine << std::endl; return nullptr; } engineFile.seekg(0, engineFile.end); long int fsize = engineFile.tellg(); engineFile.seekg(0, engineFile.beg); std::vector engineData(fsize); engineFile.read(engineData.data(), fsize); if (!engineFile) { err << "Error loading engine file: " << engine << std::endl; return nullptr; } TrtUniquePtr runtime{ createInferRuntime(sample::gLogger.getTRTLogger()) }; if (DLACore != -1) { runtime->setDLACore(DLACore); } auto temp = runtime->deserializeCudaEngine(engineData.data(), fsize, nullptr); return SampleUniquePtr(temp, samplesCommon::InferDeleter()); } // main pipeline ------------------------------------------------------------------------------------------------------ int main(int argc, char* argv[]) { std::string image_path = "D:/TensorRt/artefact_model_10x/model2/TRT_Output/1.png"; std::string enginePath = "D:/TensorRt/artefact_model_10x/model2/TRT_Engine/model2.engine"; std::string out_String = "D:/TensorRt/artefact_model_10x/model2/TRT_Output/1_out.png"; auto mEngine = getEngine(enginePath, -1, std::cerr); if (!mEngine) { return false; } auto mContext = SampleUniquePtr(mEngine->createExecutionContext()); if (!mContext) { return false; } int batch_size = 1; // get sizes of input and output and allocate memory required for input data and for output data std::vector input_dims; // we expect only one input std::vector output_dims; // and one output std::cout << "mEngine->getNbBindings() : " << mEngine->getNbBindings() << std::endl; std::vector buffers(mEngine->getNbBindings()); // buffers for input and output data for (size_t i = 0; i < mEngine->getNbBindings(); ++i) { auto bindDim = mEngine->getBindingDimensions(i); std::cout << "bindDim : " << bindDim << std::endl; auto binding_size = getSizeByDim(bindDim) * batch_size * sizeof(float); std::cout << "binding_size : " << binding_size << std::endl; cudaMalloc(&buffers[i], binding_size); //cudaMemset(&buffers[i], 0, binding_size); if (mEngine->bindingIsInput(i)) { input_dims.emplace_back(mEngine->getBindingDimensions(i)); std::cout << "mEngine->getBindingDimensions(i) : " << mEngine->getBindingDimensions(i) << std::endl; } else { output_dims.emplace_back(mEngine->getBindingDimensions(i)); std::cout << "mEngine->getBindingDimensions(i) : " << mEngine->getBindingDimensions(i) << std::endl; } } if (input_dims.empty() || output_dims.empty()) { std::cerr << "Expect at least one input and one output for network\n"; return -1; } preprocessImage(image_path, (float*)buffers[0], input_dims[0]); // inference auto out = mContext->enqueue(batch_size, buffers.data(), 0, nullptr); std::cout << "Out : " << out<< std::endl; // postprocess results std::cout << "output_dims[0] : " << output_dims[0] << std::endl; postprocessResults((float*)buffers[1], output_dims[0], batch_size, out_String); for (void* buf : buffers) { cudaFree(buf); } mEngine.reset(); input_dims.clear(); output_dims.clear(); buffers.clear(); return 0; }