使用python写一个使用Xavier初始化、正则化方法和MSGD的BP网络(8个输入,1个输出,5个隐藏节点),并写一个main实现将训练集投入网络训练,训练后将测试集投入得出预测输出。训练和测试数据均存在同一个xlsx文件的同一个页面表格中。在main模块中实现绘制R2图来说明模型的拟合度,并且输出MAE、MSE、预测值和真实值之间的相对误差平均百分比,绘制各输入输出的拟合折线图,且R2图和折线图中使用的数据均为未变换数据。
时间: 2023-12-10 19:38:42 浏览: 114
BP神经网络预测(python)
好的,这里给出一个可能的代码实现,需要用到 pandas、numpy、sklearn、matplotlib 库:
```python
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import matplotlib.pyplot as plt
# Xavier初始化
def xavier_init(in_dim, out_dim):
xavier_stddev = np.sqrt(2.0 / (in_dim + out_dim))
return np.random.normal(0, xavier_stddev, (in_dim, out_dim))
# 正则化方法
def l2_regularization(lamda, w1, w2):
return lamda * (np.sum(w1 ** 2) + np.sum(w2 ** 2))
# sigmoid激活函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# BP网络类
class BPNet:
def __init__(self, input_dim, hidden_dim, output_dim, lr, lamda):
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.lr = lr
self.lamda = lamda
self.w1 = xavier_init(self.input_dim, self.hidden_dim)
self.b1 = np.zeros((1, self.hidden_dim))
self.w2 = xavier_init(self.hidden_dim, self.output_dim)
self.b2 = np.zeros((1, self.output_dim))
# 前向传播
def forward(self, X):
self.z1 = np.dot(X, self.w1) + self.b1
self.a1 = sigmoid(self.z1)
self.z2 = np.dot(self.a1, self.w2) + self.b2
y_pred = self.z2
return y_pred
# 反向传播
def backward(self, X, y_true, y_pred):
delta2 = y_pred - y_true
dw2 = np.dot(self.a1.T, delta2)
db2 = np.sum(delta2, axis=0)
delta1 = np.dot(delta2, self.w2.T) * self.a1 * (1 - self.a1)
dw1 = np.dot(X.T, delta1)
db1 = np.sum(delta1, axis=0)
dw2 += self.lamda * self.w2
dw1 += self.lamda * self.w1
self.w2 -= self.lr * dw2
self.b2 -= self.lr * db2
self.w1 -= self.lr * dw1
self.b1 -= self.lr * db1
# 训练函数
def train(self, X_train, y_train, epochs):
for i in range(epochs):
y_pred = self.forward(X_train)
loss = mean_squared_error(y_train, y_pred) + l2_regularization(self.lamda, self.w1, self.w2)
self.backward(X_train, y_train, y_pred)
if i % 100 == 0:
print("Epoch:", i, "Loss:", loss)
# 预测函数
def predict(self, X_test):
y_pred = self.forward(X_test)
return y_pred
# 读取数据
data = pd.read_excel("data.xlsx", sheet_name="Sheet1")
X = data.iloc[:, :8].values
y = data.iloc[:, -1].values.reshape(-1, 1)
# 数据归一化
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
y = scaler.fit_transform(y)
# 划分训练集和测试集
X_train, X_test = X[:100], X[100:]
y_train, y_test = y[:100], y[100:]
# 训练模型
bpnet = BPNet(input_dim=8, hidden_dim=5, output_dim=1, lr=0.1, lamda=0.01)
bpnet.train(X_train, y_train, epochs=1000)
# 预测并逆归一化
y_pred = bpnet.predict(X_test)
y_test = scaler.inverse_transform(y_test)
y_pred = scaler.inverse_transform(y_pred)
# 计算误差指标
r2 = r2_score(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
error_pct = np.mean(np.abs(y_test - y_pred) / y_test) * 100
# 绘制R2图
plt.plot(y_test, y_test, "r--")
plt.scatter(y_test, y_pred, alpha=0.5)
plt.title(f"R2 Score: {r2:.4f}")
plt.xlabel("True Value")
plt.ylabel("Predicted Value")
plt.show()
# 绘制拟合折线图
inputs = ["Input 1", "Input 2", "Input 3", "Input 4", "Input 5", "Input 6", "Input 7", "Input 8"]
fig, axs = plt.subplots(nrows=2, ncols=4, figsize=(16, 8))
for i, ax in enumerate(axs.flatten()):
ax.plot(y_test[:, 0], label="True Value")
ax.plot(y_pred[:, 0], label="Predicted Value")
ax.set_xlabel(inputs[i])
ax.set_ylabel("Output")
ax.legend()
plt.tight_layout()
plt.show()
# 输出误差指标
print(f"MAE: {mae:.4f}")
print(f"MSE: {mse:.4f}")
print(f"Error Percentage: {error_pct:.2f}%")
```
需要注意的是,这里使用了 MinMaxScaler 对输入和输出数据进行了归一化处理,因此在计算误差指标时需要将预测值和真实值逆归一化。同时,由于数据量较小,没有进行交叉验证等进一步的模型评估。
阅读全文