用python定义一个类,实现有三层感知器的BP神经网络算法,使其将数据集划分为训练集和预测集,并输出多个评价指标:R2,RMSE,MSE,MAPE和MAE
时间: 2024-09-11 10:07:48 浏览: 27
要使用Python定义一个三层感知器的BP(Back Propagation)神经网络并实现训练、划分数据集以及计算多个评价指标,我们可以按照以下步骤进行:
1. 导入所需的库:
```python
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error, mean_absolute_percentage_error
```
2. 定义一个简单的神经元类,用于创建网络层:
```python
class Neuron:
def __init__(self):
self.weights = None
self.bias = None
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def forward(self, inputs):
return self.sigmoid(np.dot(inputs, self.weights) + self.bias)
```
3. 创建隐藏层和输出层的类:
```python
class HiddenLayer(Neuron):
pass
class OutputLayer(Neuron):
def __init__(self, output_size):
super().__init__()
self.output_size = output_size
# 初始化随机权重和偏置
self.weights = np.random.randn(1, output_size) * 0.01
self.bias = np.zeros(output_size)
class NeuralNetwork:
def __init__(self, input_size, hidden_layer_size, output_size):
self.input_layer = [Neuron() for _ in range(input_size)]
self.hidden_layer = [HiddenLayer() for _ in range(hidden_layer_size)]
self.output_layer = OutputLayer(output_size)
def feedforward(self, inputs):
layer_outputs = [inputs]
for layer in self.hidden_layer + [self.output_layer]:
outputs = []
for neuron in layer:
outputs.append(neuron.forward(layer_outputs[-1]))
layer_outputs.append(np.array(outputs))
return layer_outputs[-1]
def backpropagation(self, inputs, targets, learning_rate=0.1, momentum=0.9):
# 前向传播
outputs = self.feedforward(inputs)
# 计算误差
delta_output = targets - outputs
# 反向传播误差
delta_hidden = np.dot(delta_output, self.output_layer.weights.T) * self.output_layer.forward(inputs) * (1 - self.output_layer.forward(inputs))
# 更新权重和偏置
for i, neuron in enumerate(self.output_layer):
neuron.weights += learning_rate * delta_output[i] * inputs
neuron.bias += learning_rate * delta_output[i]
for i, layer in enumerate(reversed(self.hidden_layers)):
for j, neuron in enumerate(layer):
neuron.weights += learning_rate * delta_hidden[i][j] * layer_outputs[i+1][:, j].T
neuron.bias += learning_rate * delta_hidden[i][j]
```
4. 分割数据集和训练模型:
```python
def split_data(X, y, test_ratio=0.2):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio, random_state=42)
return X_train, X_test, y_train, y_test
def train_network(network, X_train, y_train, epochs=1000, batch_size=32):
for epoch in range(epochs):
for i in range(0, len(X_train), batch_size):
batch_X = X_train[i:i+batch_size]
batch_y = y_train[i:i+batch_size]
network.backpropagation(batch_X, batch_y)
# 使用数据集训练网络
X_train, X_test, y_train, y_test = split_data(X, y)
nn = NeuralNetwork(input_size, hidden_layer_size, output_size)
train_network(nn, X_train, y_train)
```
5. 预测和计算评价指标:
```python
def predict(network, X_test):
predictions = network.feedforward(X_test)
return predictions
r2 = r2_score(y_test, predict(nn, X_test))
rmse = np.sqrt(mean_squared_error(y_test, predict(nn, X_test)))
mse = mean_squared_error(y_test, predict(nn, X_test))
mape = mean_absolute_percentage_error(y_test, predict(nn, X_test))
mae = mean_absolute_error(y_test, predict(nn, X_test))
print(f"R2 Score: {r2}")
print(f"RMSE: {rmse}")
print(f"MSE: {mse}")
print(f"MAPE: {mape}")
print(f"MAE: {mae}")
```
现在你可以根据你的具体需求调整`input_size`、`hidden_layer_size`、`output_size`,`epochs`和`batch_size`等参数。这个示例没有包含实际的数据读取和预处理,你需要先准备好你的数据。