回归模型——多维BP神经网络模型 C++带类实现及案例
时间: 2023-10-25 22:10:11 浏览: 205
BP神经网络实现多输入多输出回归模型搭建,python,带数据集
5星 · 资源好评率100%
多维BP神经网络模型是一种常见的回归模型,用于解决多维输入与输出之间的映射关系。下面是使用C++带类实现多维BP神经网络模型的示例代码:
```cpp
#include <iostream>
#include <vector>
#include <cmath>
#include <ctime>
#include <cstdlib>
using namespace std;
class Neuron {
public:
Neuron(int num_inputs) {
for (int i = 0; i < num_inputs; i++) {
weights.push_back(rand() / double(RAND_MAX));
}
bias = rand() / double(RAND_MAX);
}
double fire(vector<double>& inputs) {
double activation = 0.0;
for (int i = 0; i < inputs.size(); i++) {
activation += inputs[i] * weights[i];
}
activation += bias;
return activation;
}
void update_weights(vector<double>& inputs, double delta, double learning_rate) {
for (int i = 0; i < inputs.size(); i++) {
weights[i] += learning_rate * delta * inputs[i];
}
bias += learning_rate * delta;
}
private:
vector<double> weights;
double bias;
};
class NeuralNetwork {
public:
NeuralNetwork(int num_inputs, int num_hidden, int num_outputs, double learning_rate) :
learning_rate(learning_rate), hidden_layer(num_hidden, Neuron(num_inputs)),
output_layer(num_outputs, Neuron(num_hidden)) {}
vector<double> predict(vector<double>& inputs) {
vector<double> hidden_activations;
for (int i = 0; i < hidden_layer.size(); i++) {
hidden_activations.push_back(sigmoid(hidden_layer[i].fire(inputs)));
}
vector<double> output_activations;
for (int i = 0; i < output_layer.size(); i++) {
output_activations.push_back(sigmoid(output_layer[i].fire(hidden_activations)));
}
return output_activations;
}
void train(vector<double>& inputs, vector<double>& targets) {
vector<double> hidden_activations;
for (int i = 0; i < hidden_layer.size(); i++) {
hidden_activations.push_back(sigmoid(hidden_layer[i].fire(inputs)));
}
vector<double> output_activations;
for (int i = 0; i < output_layer.size(); i++) {
output_activations.push_back(sigmoid(output_layer[i].fire(hidden_activations)));
}
vector<double> output_deltas;
for (int i = 0; i < output_layer.size(); i++) {
double error = targets[i] - output_activations[i];
output_deltas.push_back(error * sigmoid_derivative(output_activations[i]));
}
vector<double> hidden_deltas;
for (int i = 0; i < hidden_layer.size(); i++) {
double error = 0.0;
for (int j = 0; j < output_layer.size(); j++) {
error += output_deltas[j] * output_layer[j].get_weight(i);
}
hidden_deltas.push_back(error * sigmoid_derivative(hidden_activations[i]));
}
for (int i = 0; i < output_layer.size(); i++) {
output_layer[i].update_weights(hidden_activations, output_deltas[i], learning_rate);
}
for (int i = 0; i < hidden_layer.size(); i++) {
hidden_layer[i].update_weights(inputs, hidden_deltas[i], learning_rate);
}
}
private:
double learning_rate;
vector<Neuron> hidden_layer;
vector<Neuron> output_layer;
double sigmoid(double x) {
return 1.0 / (1.0 + exp(-x));
}
double sigmoid_derivative(double x) {
return sigmoid(x) * (1.0 - sigmoid(x));
}
};
int main() {
srand(time(NULL));
vector<vector<double>> inputs = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
vector<vector<double>> targets = {{0}, {1}, {1}, {0}};
NeuralNetwork nn(2, 10, 1, 0.1);
for (int i = 0; i < 10000; i++) {
int rand_index = rand() % inputs.size();
nn.train(inputs[rand_index], targets[rand_index]);
}
for (int i = 0; i < inputs.size(); i++) {
vector<double> output = nn.predict(inputs[i]);
cout << "Input: " << inputs[i][0] << ", " << inputs[i][1] <<
" Output: " << output[0] << endl;
}
return 0;
}
```
该示例代码实现了一个多维BP神经网络模型,用于解决XOR运算问题。其中,Neuron类表示神经元,NeuralNetwork类表示神经网络。在Neuron类中,weights表示每个输入的权重,bias表示偏差。在NeuralNetwork类中,hidden_layer和output_layer分别表示隐藏层和输出层。predict方法用于进行预测,train方法用于训练模型。sigmoid函数和sigmoid_derivative函数分别表示激活函数和激活函数的导数。在main函数中,我们使用该模型解决XOR运算问题。
阅读全文