用C++写一个LeNet-5识别手写数字的程序并带注释
时间: 2024-03-05 21:49:35 浏览: 118
好的,下面是一个简单的LeNet-5识别手写数字的C++程序,带有注释解释每个部分的功能和作用。代码仅供参考。
```c++
#include <iostream>
#include <fstream>
#include <vector>
#include <cmath>
using namespace std;
// 定义LeNet-5的每一层的结构
struct ConvLayer {
int in_channels, out_channels, kernel_size, stride, padding;
vector<vector<vector<double>>> weights;
vector<double> bias;
};
struct PoolLayer {
int kernel_size, stride;
};
struct FCLayer {
int in_features, out_features;
vector<vector<double>> weights;
vector<double> bias;
};
// 定义LeNet-5的整体结构
class LeNet5 {
private:
ConvLayer conv1, conv2;
PoolLayer pool1, pool2;
FCLayer fc1, fc2, fc3;
// 激活函数
double relu(double x) {
return max(0.0, x);
}
// Softmax函数
vector<double> softmax(vector<double> x) {
vector<double> result(x.size());
double sum = 0.0;
for (int i = 0; i < x.size(); i++) {
result[i] = exp(x[i]);
sum += result[i];
}
for (int i = 0; i < x.size(); i++) {
result[i] = result[i] / sum;
}
return result;
}
// 卷积运算
vector<vector<vector<double>>> convolve(vector<vector<vector<double>>> input, ConvLayer conv) {
int in_height = input.size(), in_width = input[0].size();
int out_height = (in_height - conv.kernel_size + 2 * conv.padding) / conv.stride + 1;
int out_width = (in_width - conv.kernel_size + 2 * conv.padding) / conv.stride + 1;
vector<vector<vector<double>>> output(conv.out_channels, vector<vector<double>>(out_height, vector<double>(out_width, 0.0)));
for (int i = 0; i < conv.out_channels; i++) {
for (int j = 0; j < out_height; j++) {
for (int k = 0; k < out_width; k++) {
double sum = 0.0;
for (int l = 0; l < conv.in_channels; l++) {
for (int m = 0; m < conv.kernel_size; m++) {
for (int n = 0; n < conv.kernel_size; n++) {
int p = j * conv.stride + m - conv.padding;
int q = k * conv.stride + n - conv.padding;
if (p >= 0 && p < in_height && q >= 0 && q < in_width) {
sum += input[l][p][q] * conv.weights[i][l][m * conv.kernel_size + n];
}
}
}
}
sum += conv.bias[i];
output[i][j][k] = relu(sum);
}
}
}
return output;
}
// 池化运算
vector<vector<vector<double>>> pool(vector<vector<vector<double>>> input, PoolLayer pool) {
int in_height = input[0].size(), in_width = input[0][0].size();
int out_height = (in_height - pool.kernel_size) / pool.stride + 1;
int out_width = (in_width - pool.kernel_size) / pool.stride + 1;
vector<vector<vector<double>>> output(input.size(), vector<vector<double>>(out_height, vector<double>(out_width, 0.0)));
for (int i = 0; i < input.size(); i++) {
for (int j = 0; j < out_height; j++) {
for (int k = 0; k < out_width; k++) {
double max_val = -1e9;
for (int l = 0; l < pool.kernel_size; l++) {
for (int m = 0; m < pool.kernel_size; m++) {
int p = j * pool.stride + l;
int q = k * pool.stride + m;
if (input[i][p][q] > max_val) {
max_val = input[i][p][q];
}
}
}
output[i][j][k] = max_val;
}
}
}
return output;
}
// 全连接层运算
vector<double> fc(vector<double> input, FCLayer fc) {
vector<double> output(fc.out_features, 0.0);
for (int i = 0; i < fc.out_features; i++) {
double sum = 0.0;
for (int j = 0; j < fc.in_features; j++) {
sum += input[j] * fc.weights[i][j];
}
sum += fc.bias[i];
output[i] = relu(sum);
}
return output;
}
public:
LeNet5() {
// 初始化每一层的结构
conv1.in_channels = 1;
conv1.out_channels = 6;
conv1.kernel_size = 5;
conv1.stride = 1;
conv1.padding = 0;
conv1.weights = vector<vector<vector<double>>>(conv1.out_channels, vector<vector<double>>(conv1.in_channels, vector<double>(conv1.kernel_size * conv1.kernel_size, 0.0)));
conv1.bias = vector<double>(conv1.out_channels, 0.0);
conv2.in_channels = 6;
conv2.out_channels = 16;
conv2.kernel_size = 5;
conv2.stride = 1;
conv2.padding = 0;
conv2.weights = vector<vector<vector<double>>>(conv2.out_channels, vector<vector<double>>(conv2.in_channels, vector<double>(conv2.kernel_size * conv2.kernel_size, 0.0)));
conv2.bias = vector<double>(conv2.out_channels, 0.0);
pool1.kernel_size = 2;
pool1.stride = 2;
pool2.kernel_size = 2;
pool2.stride = 2;
fc1.in_features = 16 * 5 * 5;
fc1.out_features = 120;
fc1.weights = vector<vector<double>>(fc1.out_features, vector<double>(fc1.in_features, 0.0));
fc1.bias = vector<double>(fc1.out_features, 0.0);
fc2.in_features = 120;
fc2.out_features = 84;
fc2.weights = vector<vector<double>>(fc2.out_features, vector<double>(fc2.in_features, 0.0));
fc2.bias = vector<double>(fc2.out_features, 0.0);
fc3.in_features = 84;
fc3.out_features = 10;
fc3.weights = vector<vector<double>>(fc3.out_features, vector<double>(fc3.in_features, 0.0));
fc3.bias = vector<double>(fc3.out_features, 0.0);
}
// 加载权重参数
void load_weights(string filename) {
ifstream fin(filename);
for (int i = 0; i < conv1.out_channels; i++) {
for (int j = 0; j < conv1.in_channels; j++) {
for (int k = 0; k < conv1.kernel_size * conv1.kernel_size; k++) {
fin >> conv1.weights[i][j][k];
}
}
}
for (int i = 0; i < conv1.out_channels; i++) {
fin >> conv1.bias[i];
}
for (int i = 0; i < conv2.out_channels; i++) {
for (int j = 0; j < conv2.in_channels; j++) {
for (int k = 0; k < conv2.kernel_size * conv2.kernel_size; k++) {
fin >> conv2.weights[i][j][k];
}
}
}
for (int i = 0; i < conv2.out_channels; i++) {
fin >> conv2.bias[i];
}
for (int i = 0; i < fc1.out_features; i++) {
for (int j = 0; j < fc1.in_features; j++) {
fin >> fc1.weights[i][j];
}
}
for (int i = 0; i < fc1.out_features; i++) {
fin >> fc1.bias[i];
}
for (int i = 0; i < fc2.out_features; i++) {
for (int j = 0; j < fc2.in_features; j++) {
fin >> fc2.weights[i][j];
}
}
for (int i = 0; i < fc2.out_features; i++) {
fin >> fc2.bias[i];
}
for (int i = 0; i < fc3.out_features; i++) {
for (int j = 0; j < fc3.in_features; j++) {
fin >> fc3.weights[i][j];
}
}
for (int i = 0; i < fc3.out_features; i++) {
fin >> fc3.bias[i];
}
fin.close();
}
// 前向传播
vector<double> forward(vector<vector<double>> input) {
// 卷积层1
auto conv1_output = convolve(vector<vector<vector<double>>>{input}, conv1);
// 激活函数
for (int i = 0; i < conv1_output.size(); i++) {
for (int j = 0; j < conv1_output[i].size(); j++) {
for (int k = 0; k < conv1_output[i][j].size(); k++) {
conv1_output[i][j][k] = relu(conv1_output[i][j][k]);
}
}
}
// 池化层1
auto pool1_output = pool(conv1_output, pool1);
// 卷积层2
auto conv2_output = convolve(pool1_output, conv2);
// 激活函数
for (int i = 0; i < conv2_output.size(); i++) {
for (int j = 0; j < conv2_output[i].size(); j++) {
for (int k = 0; k < conv2_output[i][j].size(); k++) {
conv2_output[i][j][k] = relu(conv2_output[i][j][k]);
}
}
}
// 池化层2
auto pool2_output = pool(conv2_output, pool2);
// 将池化层的输出展平
vector<double> fc_input(pool2_output.size() * pool2_output[0].size() * pool2_output[0][0].size());
int idx = 0;
for (int i = 0; i < pool2_output.size(); i++) {
for (int j = 0; j < pool2_output[i].size(); j++) {
for (int k = 0; k < pool2_output[i][j].size(); k++) {
fc_input[idx++] = pool2_output[i][j][k];
}
}
}
// 全连接层1
auto fc1_output = fc(fc_input, fc1);
// 全连接层2
auto fc2_output = fc(fc1_output, fc2);
// 全连接层3
auto fc3_output = fc(fc2_output, fc3);
// Softmax
return softmax(fc3_output);
}
};
int main() {
LeNet5 model;
model.load_weights("weights.txt");
// 读取测试数据
ifstream fin("test_data.txt");
int num_samples;
fin >> num_samples;
int correct = 0;
for (int i = 0; i < num_samples; i++) {
vector<vector<double>> input(28, vector<double>(28, 0.0));
for (int j = 0; j < 28; j++) {
for (int k = 0; k < 28; k++) {
fin >> input[j][k];
}
}
vector<double> output = model.forward(input);
int pred = 0;
double max_val = -1e9;
for (int j = 0; j < output.size(); j++) {
if (output[j] > max_val) {
max_val = output[j];
pred = j;
}
}
cout << "Predicted: " << pred << endl;
int label;
fin >> label;
if (pred == label) {
correct++;
}
}
fin.close();
cout << "Accuracy: " << (double)correct / num_samples << endl;
return 0;
}
```
这个程序实现了一个简单的LeNet-5模型,可以识别手写数字。具体来说,它包含了卷积层、池化层和全连接层。在程序中,我们使用了一些结构体来表示每一层的结构,方便后续的实现和维护。在主函数中,我们首先加载了预先训练好的权重参数,然后读取测试数据,对每个样本进行前向传播,并计算模型的准确率。
阅读全文