bp代码c++
时间: 2023-07-09 13:18:04 浏览: 97
BP算法c++代码
下面是一个简单的C++实现BP算法的代码,假设我们要训练一个多层感知器(MLP)来实现二分类问题:
```c++
#include <iostream>
#include <vector>
#include <cmath>
using namespace std;
class MLP
{
public:
MLP(int inputSize, int hiddenSize, int outputSize)
{
// 初始化权值和偏置
for (int i = 0; i < hiddenSize; ++i)
{
vector<double> w;
for (int j = 0; j < inputSize; ++j)
w.push_back(rand() / double(RAND_MAX));
m_w1.push_back(w);
m_b1.push_back(rand() / double(RAND_MAX));
}
for (int i = 0; i < outputSize; ++i)
{
vector<double> w;
for (int j = 0; j < hiddenSize; ++j)
w.push_back(rand() / double(RAND_MAX));
m_w2.push_back(w);
m_b2.push_back(rand() / double(RAND_MAX));
}
}
vector<double> forward(const vector<double>& x)
{
// 前向传播
vector<double> h(m_w1.size());
for (int i = 0; i < h.size(); ++i)
{
double z = 0.0;
for (int j = 0; j < x.size(); ++j)
z += m_w1[i][j] * x[j];
z += m_b1[i];
h[i] = sigmoid(z);
}
vector<double> y(m_w2.size());
for (int i = 0; i < y.size(); ++i)
{
double z = 0.0;
for (int j = 0; j < h.size(); ++j)
z += m_w2[i][j] * h[j];
z += m_b2[i];
y[i] = sigmoid(z);
}
return y;
}
void backward(const vector<double>& x, const vector<double>& y, const vector<double>& t,
double learningRate)
{
// 反向传播
vector<double> delta2(m_w2.size());
for (int i = 0; i < delta2.size(); ++i)
delta2[i] = (y[i] - t[i]) * sigmoidDerivative(y[i]);
vector<double> delta1(m_w1.size());
for (int i = 0; i < delta1.size(); ++i)
{
double z = 0.0;
for (int j = 0; j < delta2.size(); ++j)
z += m_w2[j][i] * delta2[j];
delta1[i] = z * sigmoidDerivative(m_b1[i]);
}
for (int i = 0; i < m_w2.size(); ++i)
{
for (int j = 0; j < m_w2[i].size(); ++j)
m_w2[i][j] -= learningRate * delta2[i] * m_w1[j][i];
m_b2[i] -= learningRate * delta2[i];
}
for (int i = 0; i < m_w1.size(); ++i)
{
for (int j = 0; j < m_w1[i].size(); ++j)
m_w1[i][j] -= learningRate * delta1[i] * x[j];
m_b1[i] -= learningRate * delta1[i];
}
}
private:
vector<vector<double>> m_w1; // 隐藏层权值
vector<double> m_b1; // 隐藏层偏置
vector<vector<double>> m_w2; // 输出层权值
vector<double> m_b2; // 输出层偏置
double sigmoid(double x)
{
return 1.0 / (1.0 + exp(-x));
}
double sigmoidDerivative(double y)
{
return y * (1 - y);
}
};
int main()
{
// 训练数据
vector<pair<vector<double>, vector<double>>> trainData = {
{{0, 0}, {0}},
{{0, 1}, {1}},
{{1, 0}, {1}},
{{1, 1}, {0}}
};
MLP mlp(2, 3, 1); // 输入层2个节点,隐藏层3个节点,输出层1个节点
// 训练模型
for (int epoch = 1; epoch <= 10000; ++epoch)
{
for (auto& data : trainData)
{
vector<double> x = data.first;
vector<double> t = data.second;
vector<double> y = mlp.forward(x);
mlp.backward(x, y, t, 0.1);
}
}
// 测试模型
for (auto& data : trainData)
{
vector<double> x = data.first;
vector<double> t = data.second;
vector<double> y = mlp.forward(x);
cout << "x = (" << x[0] << ", " << x[1] << "), y = " << y[0] << ", t = " << t[0] << endl;
}
return 0;
}
```
在上面的代码中,MLP类封装了一个具有输入、隐藏和输出层的多层感知器。forward()方法实现了前向传播,backward()方法实现了误差反向传播,并根据学习率来调整网络中的权值和偏置。在主函数中,我们使用训练数据对模型进行训练,并使用测试数据对模型进行测试。
阅读全文