写一个C语言的bp神经网络
时间: 2024-03-13 11:22:20 浏览: 66
用C语言实现bp神经网络.zip
5星 · 资源好评率100%
好的,以下是一个简单的 C 语言实现的 BP 神经网络的代码:
```c
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define INPUT_LAYER_SIZE 3 // 输入层神经元数量
#define HIDDEN_LAYER_SIZE 4 // 隐层神经元数量
#define OUTPUT_LAYER_SIZE 2 // 输出层神经元数量
#define LEARNING_RATE 0.5 // 学习率
// Sigmoid 函数
double sigmoid(double x)
{
return 1.0 / (1.0 + exp(-x));
}
// 计算误差
double calculate_error(double *output, double *expected_output)
{
double err = 0.0;
for (int i = 0; i < OUTPUT_LAYER_SIZE; i++) {
err += pow(output[i] - expected_output[i], 2);
}
return err / 2.0;
}
int main()
{
int epochs = 10000; // 迭代次数
double input_data[][INPUT_LAYER_SIZE] = {{0, 0, 1}, {0, 1, 1}, {1, 0, 1}, {1, 1, 1}}; // 输入数据
double output_data[][OUTPUT_LAYER_SIZE] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}}; // 期望输出
double input[INPUT_LAYER_SIZE];
double hidden[HIDDEN_LAYER_SIZE];
double output[OUTPUT_LAYER_SIZE];
// 初始化权重
double w1[INPUT_LAYER_SIZE][HIDDEN_LAYER_SIZE];
double w2[HIDDEN_LAYER_SIZE][OUTPUT_LAYER_SIZE];
for (int i = 0; i < INPUT_LAYER_SIZE; i++) {
for (int j = 0; j < HIDDEN_LAYER_SIZE; j++) {
w1[i][j] = ((double) rand() / (RAND_MAX)) - 0.5;
}
}
for (int i = 0; i < HIDDEN_LAYER_SIZE; i++) {
for (int j = 0; j < OUTPUT_LAYER_SIZE; j++) {
w2[i][j] = ((double) rand() / (RAND_MAX)) - 0.5;
}
}
// 开始训练
for (int epoch = 0; epoch < epochs; epoch++) {
for (int i = 0; i < 4; i++) {
// 前向传播
for (int j = 0; j < INPUT_LAYER_SIZE; j++) {
input[j] = input_data[i][j];
}
for (int j = 0; j < HIDDEN_LAYER_SIZE; j++) {
double net = 0.0;
for (int k = 0; k < INPUT_LAYER_SIZE; k++) {
net += input[k] * w1[k][j];
}
hidden[j] = sigmoid(net);
}
for (int j = 0; j < OUTPUT_LAYER_SIZE; j++) {
double net = 0.0;
for (int k = 0; k < HIDDEN_LAYER_SIZE; k++) {
net += hidden[k] * w2[k][j];
}
output[j] = sigmoid(net);
}
// 反向传播
double delta_output[OUTPUT_LAYER_SIZE];
for (int j = 0; j < OUTPUT_LAYER_SIZE; j++) {
delta_output[j] = (output[j] - output_data[i][j]) * output[j] * (1 - output[j]);
}
double delta_hidden[HIDDEN_LAYER_SIZE];
for (int j = 0; j < HIDDEN_LAYER_SIZE; j++) {
double error = 0.0;
for (int k = 0; k < OUTPUT_LAYER_SIZE; k++) {
error += w2[j][k] * delta_output[k];
}
delta_hidden[j] = error * hidden[j] * (1 - hidden[j]);
}
// 更新权重
for (int j = 0; j < INPUT_LAYER_SIZE; j++) {
for (int k = 0; k < HIDDEN_LAYER_SIZE; k++) {
w1[j][k] -= LEARNING_RATE * input[j] * delta_hidden[k];
}
}
for (int j = 0; j < HIDDEN_LAYER_SIZE; j++) {
for (int k = 0; k < OUTPUT_LAYER_SIZE; k++) {
w2[j][k] -= LEARNING_RATE * hidden[j] * delta_output[k];
}
}
// 输出误差
double err = calculate_error(output, output_data[i]);
printf("Epoch: %d, Sample: %d, Error: %f\n", epoch, i, err);
}
}
// 测试模型
printf("\nTest Model:\n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < INPUT_LAYER_SIZE; j++) {
input[j] = input_data[i][j];
}
for (int j = 0; j < HIDDEN_LAYER_SIZE; j++) {
double net = 0.0;
for (int k = 0; k < INPUT_LAYER_SIZE; k++) {
net += input[k] * w1[k][j];
}
hidden[j] = sigmoid(net);
}
for (int j = 0; j < OUTPUT_LAYER_SIZE; j++) {
double net = 0.0;
for (int k = 0; k < HIDDEN_LAYER_SIZE; k++) {
net += hidden[k] * w2[k][j];
}
output[j] = sigmoid(net);
}
printf("Input: %f, %f, %f, ", input_data[i][0], input_data[i][1], input_data[i][2]);
printf("Expected output: %f, %f, ", output_data[i][0], output_data[i][1]);
printf("Output: %f, %f\n", output[0], output[1]);
}
return 0;
}
```
希望对你有帮助!
阅读全文