基于神经网络的优化计算例子C语言完整代码
时间: 2023-06-27 17:06:39 浏览: 80
以下是一个基于神经网络的优化计算的C语言代码示例:
```
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define INPUT_SIZE 2
#define HIDDEN_SIZE 3
#define OUTPUT_SIZE 1
#define LEARNING_RATE 0.1
#define ITERATIONS 10000
double sigmoid(double x) {
return 1.0 / (1.0 + exp(-x));
}
double sigmoid_derivative(double x) {
return x * (1.0 - x);
}
int main() {
// Input data
double inputs[INPUT_SIZE][2] = {{0.0, 0.0}, {0.0, 1.0}, {1.0, 0.0}, {1.0, 1.0}};
double outputs[4] = {0.0, 1.0, 1.0, 0.0};
// Initialize weights with random values
double w1[INPUT_SIZE][HIDDEN_SIZE];
double w2[HIDDEN_SIZE][OUTPUT_SIZE];
for (int i = 0; i < INPUT_SIZE; i++) {
for (int j = 0; j < HIDDEN_SIZE; j++) {
w1[i][j] = (double) rand() / RAND_MAX;
}
}
for (int i = 0; i < HIDDEN_SIZE; i++) {
for (int j = 0; j < OUTPUT_SIZE; j++) {
w2[i][j] = (double) rand() / RAND_MAX;
}
}
// Train the neural network
for (int iteration = 0; iteration < ITERATIONS; iteration++) {
for (int i = 0; i < 4; i++) {
// Forward propagation
double hidden[HIDDEN_SIZE];
double output = 0.0;
for (int j = 0; j < HIDDEN_SIZE; j++) {
hidden[j] = 0.0;
for (int k = 0; k < INPUT_SIZE; k++) {
hidden[j] += inputs[i][k] * w1[k][j];
}
hidden[j] = sigmoid(hidden[j]);
}
for (int j = 0; j < OUTPUT_SIZE; j++) {
for (int k = 0; k < HIDDEN_SIZE; k++) {
output += hidden[k] * w2[k][j];
}
output = sigmoid(output);
}
// Backpropagation
double error = outputs[i] - output;
double output_delta = error * sigmoid_derivative(output);
double hidden_delta[HIDDEN_SIZE];
for (int j = 0; j < HIDDEN_SIZE; j++) {
hidden_delta[j] = sigmoid_derivative(hidden[j]) * output_delta * w2[j][0];
}
for (int j = 0; j < HIDDEN_SIZE; j++) {
for (int k = 0; k < OUTPUT_SIZE; k++) {
w2[j][k] += LEARNING_RATE * hidden[j] * output_delta;
}
}
for (int j = 0; j < INPUT_SIZE; j++) {
for (int k = 0; k < HIDDEN_SIZE; k++) {
w1[j][k] += LEARNING_RATE * inputs[i][j] * hidden_delta[k];
}
}
}
}
// Test the neural network
printf("0 XOR 0 = %lf\n", sigmoid(w2[0][0] * sigmoid(w1[0][0] * 0.0 + w1[1][0] * 0.0) + w2[1][0] * sigmoid(w1[0][1] * 0.0 + w1[1][1] * 0.0) + w2[2][0] * sigmoid(w1[0][2] * 0.0 + w1[1][2] * 0.0)));
printf("0 XOR 1 = %lf\n", sigmoid(w2[0][0] * sigmoid(w1[0][0] * 0.0 + w1[1][0] * 1.0) + w2[1][0] * sigmoid(w1[0][1] * 0.0 + w1[1][1] * 1.0) + w2[2][0] * sigmoid(w1[0][2] * 0.0 + w1[1][2] * 1.0)));
printf("1 XOR 0 = %lf\n", sigmoid(w2[0][0] * sigmoid(w1[0][0] * 1.0 + w1[1][0] * 0.0) + w2[1][0] * sigmoid(w1[0][1] * 1.0 + w1[1][1] * 0.0) + w2[2][0] * sigmoid(w1[0][2] * 1.0 + w1[1][2] * 0.0)));
printf("1 XOR 1 = %lf\n", sigmoid(w2[0][0] * sigmoid(w1[0][0] * 1.0 + w1[1][0] * 1.0) + w2[1][0] * sigmoid(w1[0][1] * 1.0 + w1[1][1] * 1.0) + w2[2][0] * sigmoid(w1[0][2] * 1.0 + w1[1][2] * 1.0)));
return 0;
}
```
此代码实现了一个简单的神经网络来解决XOR问题。它使用随机初始化的权重,并使用反向传播算法进行训练。最后,它测试了该网络的结果并输出了每个输入的预测值。
阅读全文