采用ONNX格式的deeplabv3+网络进行语义分割的C++代码
时间: 2024-03-08 14:48:30 浏览: 126
以下是使用ONNX格式的deeplabv3+网络进行语义分割的C++代码示例:
```c++
#include <iostream>
#include <opencv2/opencv.hpp>
#include <onnxruntime_cxx_api.h>
using namespace std;
using namespace cv;
using namespace Ort;
int main()
{
// 加载ONNX模型
Env env(ORT_LOGGING_LEVEL_WARNING, "test");
SessionOptions session_options;
session_options.SetIntraOpNumThreads(1);
session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);
session_options.SetExecutionMode(ExecutionMode::ORT_SEQUENTIAL);
session_options.SetLogSeverityLevel(4);
session_options.SetOptimizationLevel(OptimizationLevel::ORT_ENABLE_EXTENDED);
session_options.SetInterOpNumThreads(1);
session_options.SetSessionGraphOptimizationLevel(SessionGraphOptimizationLevel::ORT_ENABLE_ALL);
session_options.SetSessionLogLevel(ORT_LOGGING_LEVEL_WARNING);
session_options.SetCPUExecutionProviderOptions(0, {{"openmp_threads", "1"}});
OrtSession session(env, "deeplabv3plus.onnx", session_options);
// 获取输入输出信息
auto input_info = session.GetInputTypeInfo(0);
auto output_info = session.GetOutputTypeInfo(0);
auto input_name = input_info.GetOnnxName();
auto output_name = output_info.GetOnnxName();
auto input_dims = input_info.GetTensorTypeAndShapeInfo().GetShape();
auto output_dims = output_info.GetTensorTypeAndShapeInfo().GetShape();
auto input_type = input_info.GetTensorElementType();
// 打印输入输出信息
cout << "input_name: " << input_name << endl;
cout << "output_name: " << output_name << endl;
cout << "input_dims: ";
for (auto dim : input_dims)
cout << dim << " ";
cout << endl;
cout << "output_dims: ";
for (auto dim : output_dims)
cout << dim << " ";
cout << endl;
cout << "input_type: " << input_type << endl;
// 加载测试图像
Mat image = imread("test.png");
// 图像预处理
int img_h = input_dims[2];
int img_w = input_dims[3];
Mat input_image;
resize(image, input_image, Size(img_w, img_h));
input_image.convertTo(input_image, CV_32FC3, 1.0 / 255.0);
Mat mean = (Mat_<float>(1, 1, 3) << 0.485, 0.456, 0.406);
Mat std = (Mat_<float>(1, 1, 3) << 0.229, 0.224, 0.225);
subtract(input_image, mean, input_image);
divide(input_image, std, input_image);
input_image = input_image.t();
// 创建输入tensor
vector<int64_t> input_shape = {1, 3, img_h, img_w};
auto memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeDefault);
OrtValue input_tensor = OrtValue::CreateTensor(memory_info, input_image.data, input_image.total() * input_image.elemSize(), input_shape.data(), input_shape.size(), input_type);
// 执行推理
auto output_tensors = session.Run(Ort::RunOptions{nullptr}, input_name.c_str(), &input_tensor, 1, &output_name, 1);
// 获取输出tensor
auto output_tensor = output_tensors.front().Get<Tensor>();
auto output_data = output_tensor.Data<float>();
// 后处理
Mat output_image(img_h, img_w, CV_8UC1);
for (int i = 0; i < img_h; i++)
{
for (int j = 0; j < img_w; j++)
{
int idx = i * img_w + j;
output_image.at<uchar>(i, j) = output_data[idx] > 0.5 ? 255 : 0; // 阈值为0.5,二值化
}
}
// 显示结果
imshow("input", image);
imshow("output", output_image);
waitKey();
return 0;
}
```
需要注意的是,该代码仅作为示例,具体实现可能会因模型、输入输出信息等因素而有所不同。在实际使用中,需要根据具体情况进行调整和优化。
阅读全文