使用C++将yolov8 onnx格式转化为tensorrt格式
作者:爱喝生椰的程序员
这篇文章主要为大家详细介绍了如何使用C++将yolov8 onnx格式转化为tensorrt格式,文中的示例代码讲解详细,感兴趣的小伙伴可以了解一下
我们使用TensorRT进行加速推理时,需要先将onnx格式转化为tensorrt格式,以下是使用C++来进行转化代码以及对应的CMakeLists.txt文件
操作系统:ubuntu20.04
C++代码:
// main.cpp #include <iostream> #include <memory> #include <fstream> #include <assert.h> #include "NvInfer.h" #include "NvOnnxParser.h" #include "common.h" class Logger : public nvinfer1::ILogger{ void log(Severity severity, const char* msg) noexcept override { // suppress info-level messages if (severity <= Severity::kWARNING) std::cout << msg << std::endl; } } logger; int main(int argc, char** argv){ if(argc !=2){ std::cerr << "usage: ./build [onnx_file_path]" <<std::endl; return -1; } // 获取onnx文件路径 char* onnx_file_path = argv[1]; //================1.创建builder==================== auto builder = std::unique_ptr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(logger)); if(!builder){ std::cerr << "Failed to creater builder" <<std::endl; return -1; } //===============2.创建network===================== const auto explicitBatch= 1U <<static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); auto network = std::unique_ptr<nvinfer1::INetworkDefinition>(builder->createNetworkV2(explicitBatch)); if (!network){ std::cout << "Failed to create network" << std::endl; return -1; } // =============创建onnxparser用于解析onnx文件=========== auto parser = std::unique_ptr<nvonnxparser::IParser>(nvonnxparser::createParser(*network, logger)); // 调用onnxparser的parseFromFile方法解析onnx文件 auto parsed = parser->parseFromFile(onnx_file_path, static_cast<int>(nvinfer1::ILogger::Severity::kWARNING)); if (!parsed){ std::cout << "Failed to parse onnx file" << std::endl; return -1; } // 配置网络参数 auto input = network->getInput(0); auto profile = builder->createOptimizationProfile(); profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMIN, nvinfer1::Dims4{1, 3, 960, 960}); // 设置最小尺寸 profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT, nvinfer1::Dims4{1, 3, 960, 960}); // 设置最优尺寸 profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMAX, nvinfer1::Dims4{1, 3, 960, 960}); // 设置最大尺寸 //==============创建config配置===================== auto config = std::unique_ptr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig()); if (!config){ std::cout << "Failed to create config" << std::endl; return -1; } config->addOptimizationProfile(profile); // 设置精度,设置为FP16,设置为INT8需要额外calibrator config->setFlag(nvinfer1::BuilderFlag::kFP16); // 设置最大batchsize builder->setMaxBatchSize(1); // 设置最大工作空间 config->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kWORKSPACE, 1 << 30); //创建流 auto profileStream = samplesCommon::makeCudaStream(); if(!profileStream){ return -1; } config->setProfileStream(*profileStream); // ==========创建engine ========== auto plan = std::unique_ptr<nvinfer1::IHostMemory>(builder->buildSerializedNetwork(*network, *config)); if (!plan){ std::cout << "Failed to create engine" << std::endl; return -1; } // ========== 5. 序列化保存engine ========== std::ofstream engine_file("./yolov8x.engine", std::ios::binary); assert(engine_file.is_open() && "Failed to open engine file"); engine_file.write((char *)plan->data(), plan->size()); engine_file.close(); std::cout << "Engine build success!" << std::endl; return 0; }
CMakeLists.txt文件:
cmake_minimum_required(VERSION 3.10) project(TensorRT_Test LANGUAGES CXX CUDA) set(CMAKE_CUDA_STANDARD 14) set(CMAKE_CXX_STANDARD 14) # 添加头文件路径 cuda tensorRT include_directories(/usr/local/cuda-11.8/include) include_directories(/xxx/tensorRT/TensorRT-8.6.1.6/include) include_directories(/xxx/tensorRT/TensorRT-8.6.1.6/samples/common/) # 添加库文件 link_directories(/usr/local/cuda-11.8/lib64) link_directories(/xxx/tensorRT/TensorRT-8.6.1.6/lib) add_executable(build main.cpp) target_link_libraries(build nvinfer nvonnxparser cudart)
注意:
在CMakeLists.txt中cuda及TensorRT的头文件和库文件路径需要改成自己的
使用方法:
以yolov8为例,使用cmake编译好后会生成build可执行文件,执行以下命令即可等待生成yolov8x.engine文件
./build <onnx_path>
到此这篇关于使用C++将yolov8 onnx格式转化为tensorrt格式的文章就介绍到这了,更多相关C++ yolov8 onnx转tensorrt内容请搜索脚本之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持脚本之家!