/* * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! //! sampleOnnxMNIST.cpp //! This file contains the implementation of the ONNX MNIST sample. It creates the network using //! the MNIST onnx model. //! It can be run with the following command line: //! Command: ./sample_onnx_mnist [-h or --help] [-d=/path/to/data/dir or --datadir=/path/to/data/dir] //! [--useDLACore=] //! #include "argsParser.h" #include "buffers.h" #include "common.h" #include "logger.h" #include "parserOnnxConfig.h" #include "NvInfer.h" #include #include #include #include #include #include #include #include using namespace nvinfer1; using samplesCommon::SampleUniquePtr; const std::string gSampleName = "TensorRT.sample_onnx_mnist"; const int RUNS = 1000; //! \brief The SampleOnnxMNIST class implements the ONNX MNIST sample //! //! \details It creates the network using an ONNX model //! class SampleOnnxMNIST { public: SampleOnnxMNIST(const samplesCommon::OnnxSampleParams& params) : mParams(params) , mEngine(nullptr) { } //! //! \brief Function builds the network engine //! bool build(); //! //! \brief Runs the TensorRT inference engine for this sample //! int infer(); private: samplesCommon::OnnxSampleParams mParams; //!< The parameters for the sample. nvinfer1::Dims mInputDims; //!< The dimensions of the input to the network. nvinfer1::Dims mOutputDims; //!< The dimensions of the output to the network. int mNumber{0}; //!< The number to classify std::shared_ptr mEngine; //!< The TensorRT engine used to run the network //! //! \brief Parses an ONNX model for MNIST and creates a TensorRT network //! bool constructNetwork(SampleUniquePtr& builder, SampleUniquePtr& network, SampleUniquePtr& config, SampleUniquePtr& parser); //! //! \brief Reads the input and stores the result in a managed buffer //! bool processInput(const samplesCommon::BufferManager& buffers); //! //! \brief Classifies digits and verify result //! bool verifyOutput(const samplesCommon::BufferManager& buffers); }; //! //! \brief Creates the network, configures the builder and creates the network engine //! //! \details This function creates the Onnx MNIST network by parsing the Onnx model and builds //! the engine that will be used to run MNIST (mEngine) //! //! \return true if the engine was created successfully and false otherwise //! bool SampleOnnxMNIST::build() { auto builder = SampleUniquePtr(nvinfer1::createInferBuilder(sample::gLogger.getTRTLogger())); if (!builder) { return false; } const auto explicitBatch = 1U << static_cast(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); auto network = SampleUniquePtr(builder->createNetworkV2(explicitBatch)); if (!network) { return false; } auto config = SampleUniquePtr(builder->createBuilderConfig()); if (!config) { return false; } auto parser = SampleUniquePtr(nvonnxparser::createParser(*network, sample::gLogger.getTRTLogger())); if (!parser) { return false; } auto constructed = constructNetwork(builder, network, config, parser); if (!constructed) { return false; } // CUDA stream used for profiling by the builder. auto profileStream = samplesCommon::makeCudaStream(); if (!profileStream) { return false; } config->setProfileStream(*profileStream); SampleUniquePtr plan{builder->buildSerializedNetwork(*network, *config)}; if (!plan) { return false; } SampleUniquePtr runtime{createInferRuntime(sample::gLogger.getTRTLogger())}; if (!runtime) { return false; } mEngine = std::shared_ptr( runtime->deserializeCudaEngine(plan->data(), plan->size()), samplesCommon::InferDeleter()); if (!mEngine) { return false; } ASSERT(network->getNbInputs() == 1); mInputDims = network->getInput(0)->getDimensions(); ASSERT(mInputDims.nbDims == 4); ASSERT(network->getNbOutputs() == 1); mOutputDims = network->getOutput(0)->getDimensions(); ASSERT(mOutputDims.nbDims == 2); return true; } //! //! \brief Uses a ONNX parser to create the Onnx MNIST Network and marks the //! output layers //! //! \param network Pointer to the network that will be populated with the Onnx MNIST network //! //! \param builder Pointer to the engine builder //! bool SampleOnnxMNIST::constructNetwork(SampleUniquePtr& builder, SampleUniquePtr& network, SampleUniquePtr& config, SampleUniquePtr& parser) { auto parsed = parser->parseFromFile(locateFile(mParams.onnxFileName, mParams.dataDirs).c_str(), static_cast(sample::gLogger.getReportableSeverity())); if (!parsed) { return false; } if (mParams.fp16) { config->setFlag(BuilderFlag::kFP16); } if (mParams.int8) { config->setFlag(BuilderFlag::kINT8); samplesCommon::setAllDynamicRanges(network.get(), 127.0f, 127.0f); } samplesCommon::enableDLA(builder.get(), config.get(), mParams.dlaCore); return true; } //! //! \brief Runs the TensorRT inference engine for this sample //! //! \details This function is the main execution function of the sample. It allocates the buffer, //! sets inputs and executes the engine. //! int SampleOnnxMNIST::infer() { // Create RAII buffer manager object samplesCommon::BufferManager buffers(mEngine); auto context = SampleUniquePtr(mEngine->createExecutionContext()); if (!context) { return 0; } // Read the input data into the managed buffers ASSERT(mParams.inputTensorNames.size() == 1); if (!processInput(buffers)) { return 0; } // Memcpy from host input buffers to device input buffers buffers.copyInputToDevice(); std::size_t total_duration = 0; for (int i = 0; i < RUNS; i++) { auto start = std::chrono::high_resolution_clock::now(); bool status = context->executeV2(buffers.getDeviceBindings().data()); auto stop = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast(stop - start); total_duration += duration.count(); if (!status) { return 0; } } // Memcpy from device output buffers to host output buffers buffers.copyOutputToHost(); /* // Verify results if (!verifyOutput(buffers)) { return false; } */ return total_duration; } //! //! \brief Reads the input and stores the result in a managed buffer //! bool SampleOnnxMNIST::processInput(const samplesCommon::BufferManager& buffers) { const int inputH = mInputDims.d[2]; const int inputW = mInputDims.d[3]; // Read a random digit file srand(unsigned(time(nullptr))); std::vector fileData(inputH * inputW); mNumber = rand() % 10; readPGMFile(locateFile(std::to_string(mNumber) + ".pgm", mParams.dataDirs), fileData.data(), inputH, inputW); // Print an ascii representation // // sample::gLogInfo << "Input:" << std::endl; // for (int i = 0; i < inputH * inputW; i++) // { // sample::gLogInfo << (" .:-=+*#%@"[fileData[i] / 26]) << (((i + 1) % inputW) ? "" : "\n"); // } // sample::gLogInfo << std::endl; float* hostDataBuffer = static_cast(buffers.getHostBuffer(mParams.inputTensorNames[0])); for (int i = 0; i < inputH * inputW; i++) { hostDataBuffer[i] = 1.0 - float(fileData[i] / 255.0); } return true; } //! //! \brief Classifies digits and verify result //! //! \return whether the classification output matches expectations //! bool SampleOnnxMNIST::verifyOutput(const samplesCommon::BufferManager& buffers) { const int outputSize = mOutputDims.d[1]; float* output = static_cast(buffers.getHostBuffer(mParams.outputTensorNames[0])); float val{0.0f}; int idx{0}; // Calculate Softmax float sum{0.0f}; for (int i = 0; i < outputSize; i++) { output[i] = exp(output[i]); sum += output[i]; } sample::gLogInfo << "Output:" << std::endl; for (int i = 0; i < outputSize; i++) { output[i] /= sum; val = std::max(val, output[i]); if (val == output[i]) { idx = i; } sample::gLogInfo << " Prob " << i << " " << std::fixed << std::setw(5) << std::setprecision(4) << output[i] << " " << "Class " << i << ": " << std::string(int(std::floor(output[i] * 10 + 0.5f)), '*') << std::endl; } sample::gLogInfo << std::endl; return idx == mNumber && val > 0.9f; } //! //! \brief Initializes members of the params struct using the command line args //! samplesCommon::OnnxSampleParams initializeSampleParams(const samplesCommon::Args& args) { samplesCommon::OnnxSampleParams params; if (args.dataDirs.empty()) // Use default directories if user hasn't provided directory paths { params.dataDirs.push_back("data/mnist/"); params.dataDirs.push_back("data/samples/mnist/"); } else // Use the data directory provided by the user { params.dataDirs = args.dataDirs; } params.onnxFileName = "mnist.onnx"; params.inputTensorNames.push_back("Input3"); params.outputTensorNames.push_back("Plus214_Output_0"); params.dlaCore = args.useDLACore; params.int8 = args.runInInt8; params.fp16 = args.runInFp16; return params; } //! //! \brief Prints the help information for running this sample //! void printHelpInfo() { std::cout << "Usage: ./sample_onnx_mnist [-h or --help] [-d or --datadir=] [--useDLACore=]" << std::endl; std::cout << "--help Display help information" << std::endl; std::cout << "--datadir Specify path to a data directory, overriding the default. This option can be used " "multiple times to add multiple directories. If no data directories are given, the default is to use " "(data/samples/mnist/, data/mnist/)" << std::endl; std::cout << "--useDLACore=N Specify a DLA engine for layers that support DLA. Value can range from 0 to n-1, " "where n is the number of DLA engines on the platform." << std::endl; std::cout << "--int8 Run in Int8 mode." << std::endl; std::cout << "--fp16 Run in FP16 mode." << std::endl; } std::atomic READY_FLAG { false }; bool worker(int worker_id, const samplesCommon::Args& args) { cudaSetDevice(worker_id); SampleOnnxMNIST sample(initializeSampleParams(args)); sample::gLogInfo << "Building and running a GPU inference engine for Onnx MNIST" << std::endl; if (!sample.build()) { return false; } // Warming up if (!sample.infer()) return false; // busy wait so all thread launch at the same time while (!READY_FLAG) { std::this_thread::sleep_for(std::chrono::microseconds(10)); } auto total_duration = sample.infer(); auto mean_duration = static_cast(total_duration) / static_cast(RUNS); std::cout << "worker: " << worker_id << " - mean infer runtime (microseconds): " << mean_duration << std::endl; return true; } int main(int argc, char** argv) { sample::setReportableSeverity(sample::Logger::Severity::kERROR); samplesCommon::Args args; bool argsOK = samplesCommon::parseArgs(args, argc, argv); if (!argsOK) { sample::gLogError << "Invalid arguments" << std::endl; printHelpInfo(); return EXIT_FAILURE; } if (args.help) { printHelpInfo(); return EXIT_SUCCESS; } int num_devices = 0; cudaGetDeviceCount(&num_devices); std::vector threads; for (int i = 0; i < num_devices; i++) { threads.push_back(std::thread(worker, i, args)); } // wait for all thread to complete init std::this_thread::sleep_for(std::chrono::seconds(60)); std::cout << "LAUNCH!" << std::endl; // LAUNCH! READY_FLAG = true; for (auto& thread: threads) { thread.join(); } std::cout << "DONE" << std::endl; return 0; }