BP神经网络C语言实现
时间: 2023-11-20 13:05:53 浏览: 76
BP神经网络的C语言实现
3星 · 编辑精心推荐
以下是一个简单的BP神经网络的C语言实现,其中包括了前向传播和反向传播两个过程:
```c
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define INPUT_NUM 2 // 输入层节点数
#define HIDDEN_NUM 3 // 隐藏层节点数
#define OUTPUT_NUM 1 // 输出层节点数
#define LEARNING_RATE 0.5 // 学习率
#define EPOCHS 10000 // 迭代次数
// sigmoid函数
double sigmoid(double x) {
return 1.0 / (1.0 + exp(-x));
}
// 前向传播
void forward(double input[INPUT_NUM], double hidden[HIDDEN_NUM], double output[OUTPUT_NUM], double w1[INPUT_NUM][HIDDEN_NUM], double w2[HIDDEN_NUM][OUTPUT_NUM]) {
// 计算隐藏层节点的值
for (int i = 0; i < HIDDEN_NUM; i++) {
double sum = 0.0;
for (int j = 0; j < INPUT_NUM; j++) {
sum += input[j] * w1[j][i];
}
hidden[i] = sigmoid(sum);
}
// 计算输出层节点的值
for (int i = 0; i < OUTPUT_NUM; i++) {
double sum = 0.0;
for (int j = 0; j < HIDDEN_NUM; j++) {
sum += hidden[j] * w2[j][i];
}
output[i] = sigmoid(sum);
}
}
// 反向传播
void backward(double input[INPUT_NUM], double hidden[HIDDEN_NUM], double output[OUTPUT_NUM], double target[OUTPUT_NUM], double w1[INPUT_NUM][HIDDEN_NUM], double w2[HIDDEN_NUM][OUTPUT_NUM]) {
double delta_output[OUTPUT_NUM];
double delta_hidden[HIDDEN_NUM];
// 计算输出层的误差
for (int i = 0; i < OUTPUT_NUM; i++) {
delta_output[i] = (target[i] - output[i]) * output[i] * (1.0 - output[i]);
}
// 计算隐藏层的误差
for (int i = 0; i < HIDDEN_NUM; i++) {
double sum = 0.0;
for (int j = 0; j < OUTPUT_NUM; j++) {
sum += delta_output[j] * w2[i][j];
}
delta_hidden[i] = hidden[i] * (1.0 - hidden[i]) * sum;
}
// 更新权重
for (int i = 0; i < INPUT_NUM; i++) {
for (int j = 0; j < HIDDEN_NUM; j++) {
w1[i][j] += LEARNING_RATE * delta_hidden[j] * input[i];
}
}
for (int i = 0; i < HIDDEN_NUM; i++) {
for (int j = 0; j < OUTPUT_NUM; j++) {
w2[i][j] += LEARNING_RATE * delta_output[j] * hidden[i];
}
}
}
int main() {
// 初始化权重
double w1[INPUT_NUM][HIDDEN_NUM];
double w2[HIDDEN_NUM][OUTPUT_NUM];
for (int i = 0; i < INPUT_NUM; i++) {
for (int j = 0; j < HIDDEN_NUM; j++) {
w1[i][j] = (double)rand() / RAND_MAX - 0.5;
}
}
for (int i = 0; i < HIDDEN_NUM; i++) {
for (int j = 0; j < OUTPUT_NUM; j++) {
w2[i][j] = (double)rand() / RAND_MAX - 0.5;
}
}
// 训练数据
double input[4][INPUT_NUM] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
double target[4][OUTPUT_NUM] = {{0}, {1}, {1}, {0}};
// 迭代训练
for (int epoch = 0; epoch < EPOCHS; epoch++) {
double error = 0.0;
for (int i = 0; i < 4; i++) {
double hidden[HIDDEN_NUM];
double output[OUTPUT_NUM];
forward(input[i], hidden, output, w1, w2);
backward(input[i], hidden, output, target[i], w1, w2);
error += 0.5 * (target[i][0] - output[0]) * (target[i][0] - output[0]);
}
if (epoch % 1000 == 0) {
printf("Epoch %d, error = %f\n", epoch, error);
}
}
// 测试数据
double test_input[4][INPUT_NUM] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
for (int i = 0; i < 4; i++) {
double hidden[HIDDEN_NUM];
double output[OUTPUT_NUM];
forward(test_input[i], hidden, output, w1, w2);
printf("%f %f -> %f\n", test_input[i][0], test_input[i][1], output[0]);
}
return 0;
}
```
阅读全文