改用c++实现bp神经网络
时间: 2023-11-23 13:08:20 浏览: 155
以下是使用C++实现BP神经网络的步骤:
1.定义神经元结构体
```c++
struct Neuron {
double output; // 神经元输出值
double gradient; // 神经元梯度
vector<double> outputWeights; // 神经元输出权重
vector<double> deltaOutputWeights; // 神经元输出权重的变化量
unsigned index; // 神经元索引
double sumDOW(const Layer &nextLayer) const; // 计算神经元的误差项
void calcOutputGradients(double targetVal); // 计算输出层神经元的梯度
void calcHiddenGradients(const Layer &nextLayer); // 计算隐藏层神经元的梯度
void updateInputWeights(Layer &prevLayer); // 更新神经元的输入权重
};
```
2.定义层结构体
```c++
typedef vector<Neuron> Layer;
class Net {
public:
Net(const vector<unsigned> &topology); // 构造函数
void feedForward(const vector<double> &inputVals); // 前向传播
void backProp(const vector<double> &targetVals); // 反向传播
void getResults(vector<double> &resultVals) const; // 获取输出层的结果
double getRecentAverageError() const { return m_recentAverageError; } // 获取最近的平均误差
private:
vector<Layer> m_layers; // 神经网络的层
double m_error; // 神经网络的误差
double m_recentAverageError; // 最近的平均误差
double m_recentAverageSmoothingFactor; // 平均误差的平滑因子
};
```
3.实现构造函数
```c++
Net::Net(const vector<unsigned> &topology) {
unsigned numLayers = topology.size();
for (unsigned layerNum = 0; layerNum < numLayers; ++layerNum) {
m_layers.push_back(Layer());
unsigned numOutputs = layerNum == topology.size() - 1 ? 0 : topology[layerNum + 1];
for (unsigned neuronNum = 0; neuronNum <= topology[layerNum]; ++neuronNum) {
m_layers.back().push_back(Neuron(numOutputs, neuronNum));
cout << "Made a Neuron!" << endl;
}
m_layers.back().back().setOutputVal(1.0);
}
}
```
4.实现前向传播
```c++
void Net::feedForward(const vector<double> &inputVals) {
assert(inputVals.size() == m_layers[0].size() - 1);
for (unsigned i = 0; i < inputVals.size(); ++i) {
m_layers[0][i].setOutputVal(inputVals[i]);
}
for (unsigned layerNum = 1; layerNum < m_layers.size(); ++layerNum) {
Layer &prevLayer = m_layers[layerNum - 1];
for (unsigned n = 0; n < m_layers[layerNum].size() - 1; ++n) {
m_layers[layerNum][n].feedForward(prevLayer);
}
}
}
```
5.实现反向传播
```c++
void Net::backProp(const vector<double> &targetVals) {
Layer &outputLayer = m_layers.back();
m_error = 0.0;
for (unsigned n = 0; n < outputLayer.size() - 1; ++n) {
double delta = targetVals[n] - outputLayer[n].getOutputVal();
m_error += delta * delta;
}
m_error /= outputLayer.size() - 1;
m_error = sqrt(m_error);
m_recentAverageError = (m_recentAverageError * m_recentAverageSmoothingFactor + m_error) / (m_recentAverageSmoothingFactor + 1.0);
for (unsigned n = 0; n < outputLayer.size() - 1; ++n) {
outputLayer[n].calcOutputGradients(targetVals[n]);
}
for (unsigned layerNum = m_layers.size() - 2; layerNum > 0; --layerNum) {
Layer &hiddenLayer = m_layers[layerNum];
Layer &nextLayer = m_layers[layerNum + 1];
for (unsigned n = 0; n < hiddenLayer.size(); ++n) {
hiddenLayer[n].calcHiddenGradients(nextLayer);
}
}
for (unsigned layerNum = m_layers.size() - 1; layerNum > 0; --layerNum) {
Layer &layer = m_layers[layerNum];
Layer &prevLayer = m_layers[layerNum - 1];
for (unsigned n = 0; n < layer.size() - 1; ++n) {
layer[n].updateInputWeights(prevLayer);
}
}
}
```
6.实现获取输出层结果
```c++
void Net::getResults(vector<double> &resultVals) const {
resultVals.clear();
for (unsigned n = 0; n < m_layers.back().size() - 1; ++n) {
resultVals.push_back(m_layers.back()[n].getOutputVal());
}
}
```
阅读全文