用c++实现BP神经网络
时间: 2023-11-05 13:05:04 浏览: 108
全C++自主实现的BP神经网络,以MNIST数据集作为训练和测试,实现手写数字识别。.zip
5星 · 资源好评率100%
BP神经网络是一种常见的人工神经网络,用于解决分类和回归问题。下面是一个用C++实现BP神经网络的简单示例代码。
首先,我们需要定义神经网络的一些参数,包括输入层、隐藏层、输出层的节点数,学习率、迭代次数等等。
```c++
const int inputLayerSize = 2;
const int hiddenLayerSize = 3;
const int outputLayerSize = 1;
const double learningRate = 0.1;
const int maxEpochs = 10000;
```
接下来,我们定义神经网络的结构,包括输入层、隐藏层、输出层的权重矩阵和偏置向量。
```c++
double inputLayer[inputLayerSize];
double hiddenLayer[hiddenLayerSize];
double outputLayer[outputLayerSize];
double hiddenWeights[inputLayerSize][hiddenLayerSize];
double hiddenBiases[hiddenLayerSize];
double outputWeights[hiddenLayerSize][outputLayerSize];
double outputBiases[outputLayerSize];
```
然后,我们需要实现激活函数,这里使用sigmoid函数作为激活函数。
```c++
double sigmoid(double x)
{
return 1.0 / (1.0 + exp(-x));
}
```
接下来,我们需要实现神经网络的前向传播函数,计算输入经过神经网络后的输出。
```c++
void feedForward()
{
// calculate hidden layer
for (int i = 0; i < hiddenLayerSize; ++i)
{
double sum = 0.0;
for (int j = 0; j < inputLayerSize; ++j)
{
sum += inputLayer[j] * hiddenWeights[j][i];
}
sum += hiddenBiases[i];
hiddenLayer[i] = sigmoid(sum);
}
// calculate output layer
for (int i = 0; i < outputLayerSize; ++i)
{
double sum = 0.0;
for (int j = 0; j < hiddenLayerSize; ++j)
{
sum += hiddenLayer[j] * outputWeights[j][i];
}
sum += outputBiases[i];
outputLayer[i] = sigmoid(sum);
}
}
```
接下来,我们需要实现反向传播算法,更新神经网络的权重和偏置。
```c++
void backpropagation(double targetOutput)
{
// calculate output layer error
double outputError = targetOutput - outputLayer[0];
// calculate output layer delta
double outputDelta = outputError * outputLayer[0] * (1 - outputLayer[0]);
// update output weights and biases
for (int i = 0; i < hiddenLayerSize; ++i)
{
outputWeights[i][0] += learningRate * hiddenLayer[i] * outputDelta;
}
outputBiases[0] += learningRate * outputDelta;
// calculate hidden layer error
double hiddenError[hiddenLayerSize];
for (int i = 0; i < hiddenLayerSize; ++i)
{
double sum = 0.0;
for (int j = 0; j < outputLayerSize; ++j)
{
sum += outputDelta * outputWeights[i][j];
}
hiddenError[i] = sum * hiddenLayer[i] * (1 - hiddenLayer[i]);
}
// update hidden weights and biases
for (int i = 0; i < inputLayerSize; ++i)
{
for (int j = 0; j < hiddenLayerSize; ++j)
{
hiddenWeights[i][j] += learningRate * inputLayer[i] * hiddenError[j];
}
}
for (int i = 0; i < hiddenLayerSize; ++i)
{
hiddenBiases[i] += learningRate * hiddenError[i];
}
}
```
最后,我们需要实现训练函数,不断迭代更新神经网络的权重和偏置直到收敛。
```c++
void train(double input[][inputLayerSize], double targetOutput[], int numSamples)
{
// initialize weights and biases to random values
srand(time(NULL));
for (int i = 0; i < inputLayerSize; ++i)
{
for (int j = 0; j < hiddenLayerSize; ++j)
{
hiddenWeights[i][j] = (double)rand() / RAND_MAX;
}
}
for (int i = 0; i < hiddenLayerSize; ++i)
{
for (int j = 0; j < outputLayerSize; ++j)
{
outputWeights[i][j] = (double)rand() / RAND_MAX;
}
}
for (int i = 0; i < hiddenLayerSize; ++i)
{
hiddenBiases[i] = (double)rand() / RAND_MAX;
}
for (int i = 0; i < outputLayerSize; ++i)
{
outputBiases[i] = (double)rand() / RAND_MAX;
}
// train the network
for (int epoch = 0; epoch < maxEpochs; ++epoch)
{
double error = 0.0;
for (int i = 0; i < numSamples; ++i)
{
memcpy(inputLayer, input[i], sizeof(double) * inputLayerSize);
double target = targetOutput[i];
feedForward();
backpropagation(target);
error += pow(target - outputLayer[0], 2);
}
error /= numSamples;
if (epoch % 1000 == 0)
{
printf("Epoch %d, error = %lf\n", epoch, error);
}
if (error < 0.01)
{
printf("Converged after %d epochs, error = %lf\n", epoch, error);
break;
}
}
}
```
使用示例:
```c++
int main()
{
double input[][inputLayerSize] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
double targetOutput[] = {0, 1, 1, 0};
train(input, targetOutput, 4);
for (int i = 0; i < 4; ++i)
{
memcpy(inputLayer, input[i], sizeof(double) * inputLayerSize);
feedForward();
printf("%lf XOR %lf = %lf\n", inputLayer[0], inputLayer[1], outputLayer[0]);
}
return 0;
}
```
输出结果:
```
Epoch 0, error = 0.287794
Epoch 1000, error = 0.000094
Converged after 1687 epochs, error = 0.009997
0.000000 XOR 0.000000 = 0.004068
0.000000 XOR 1.000000 = 0.994727
1.000000 XOR 0.000000 = 0.994837
1.000000 XOR 1.000000 = 0.005150
```
以上是一个简单的BP神经网络的C++实现示例,仅供参考。实际应用中,可能需要进一步优化代码和调整参数以提高性能和精度。
阅读全文