bp神经网络c++实例
时间: 2023-08-08 16:07:29 浏览: 106
当然可以!以下是一个使用C++实现的简单的BP神经网络实例:
```cpp
#include <iostream>
#include <cmath>
#include <vector>
using namespace std;
// 定义激活函数
double sigmoid(double x) {
return 1 / (1 + exp(-x));
}
// 定义BP神经网络类
class NeuralNetwork {
private:
int inputSize; // 输入层大小
int hiddenSize; // 隐层大小
int outputSize; // 输出层大小
double learningRate; // 学习速率
vector<vector<double>> weightsIH; // 输入层到隐层的权重矩阵
vector<vector<double>> weightsHO; // 隐层到输出层的权重矩阵
public:
NeuralNetwork(int inputSize, int hiddenSize, int outputSize, double learningRate) {
this->inputSize = inputSize;
this->hiddenSize = hiddenSize;
this->outputSize = outputSize;
this->learningRate = learningRate;
// 初始化权重矩阵
weightsIH.resize(hiddenSize, vector<double>(inputSize));
weightsHO.resize(outputSize, vector<double>(hiddenSize));
// 随机初始化权重
for (int i = 0; i < hiddenSize; i++) {
for (int j = 0; j < inputSize; j++) {
weightsIH[i][j] = ((double)rand() / RAND_MAX) - 0.5;
}
}
for (int i = 0; i < outputSize; i++) {
for (int j = 0; j < hiddenSize; j++) {
weightsHO[i][j] = ((double)rand() / RAND_MAX) - 0.5;
}
}
}
// 前向传播
vector<double> forward(vector<double>& input) {
vector<double> hidden(hiddenSize);
vector<double> output(outputSize);
// 计算隐层输出
for (int i = 0; i < hiddenSize; i++) {
double sum = 0;
for (int j = 0; j < inputSize; j++) {
sum += weightsIH[i][j] * input[j];
}
hidden[i] = sigmoid(sum);
}
// 计算输出层输出
for (int i = 0; i < outputSize; i++) {
double sum = 0;
for (int j = 0; j < hiddenSize; j++) {
sum += weightsHO[i][j] * hidden[j];
}
output[i] = sigmoid(sum);
}
return output;
}
// 反向传播
void backward(vector<double>& input, vector<double>& target) {
vector<double> hidden(hiddenSize);
vector<double> output(outputSize);
// 计算隐层输出
for (int i = 0; i < hiddenSize; i++) {
double sum = 0;
for (int j = 0; j < inputSize; j++) {
sum += weightsIH[i][j] * input[j];
}
hidden[i] = sigmoid(sum);
}
// 计算输出层输出
for (int i = 0; i < outputSize; i++) {
double sum = 0;
for (int j = 0; j < hiddenSize; j++) {
sum += weightsHO[i][j] * hidden[j];
}
output[i] = sigmoid(sum);
}
// 计算输出层误差
vector<double> outputError(outputSize);
for (int i = 0; i < outputSize; i++) {
outputError[i] = (target[i] - output[i]) * output[i] * (1 - output[i]);
}
// 计算隐层误差
vector<double> hiddenError(hiddenSize);
for (int i = 0; i < hiddenSize; i++) {
double sum = 0;
for (int j = 0; j < outputSize; j++) {
sum += weightsHO[j][i] * outputError[j];
}
hiddenError[i] = sum * hidden[i] * (1 - hidden[i]);
}
// 更新权重矩阵
for (int i = 0; i < outputSize; i++) {
for (int j = 0; j < hiddenSize; j++) {
weightsHO[i][j] += learningRate * outputError[i] * hidden[j];
}
}
for (int i = 0; i < hiddenSize; i++) {
for (int j = 0; j < inputSize; j++) {
weightsIH[i][j] += learningRate * hiddenError[i] * input[j];
}
}
}
};
int main() {
// 创建一个输入为2维,隐层为3维,输出为1维的BP神经网络
NeuralNetwork nn(2, 3, 1, 0.1);
// 训练数据
vector<vector<double>> trainingData = {
{0, 0},
{0, 1},
{1, 0},
{1, 1}
};
vector<vector<double>> targetData = {
{0},
{1},
{1},
{0}
};
// 进行训练
for (int i = 0; i < 10000; i++) {
int index = rand() % trainingData.size();
nn.backward(trainingData[index], targetData[index]);
}
// 进行预测
for (int i = 0; i < trainingData.size(); i++) {
vector<double> output = nn.forward(trainingData[i]);
cout << "Input: " << trainingData[i][0] << " " << trainingData[i][1] << " ";
cout << "Output: " << output[0] << endl;
}
return 0;
}
```
这个例子实现了一个具有两个输入、三个隐藏节点和一个输出节点的简单BP神经网络。它使用随机初始化的权重进行训练,并使用梯度下降算法进行权重更新。训练数据是XOR问题的四个样本,输出结果应该接近于XOR函数的结果。
请注意,这只是一个简单的示例,实际应用中可能需要更复杂的网络结构和更多的训练数据来获得更好的结果。
阅读全文