x_axis = range(num_epochs) # 绘制折线图 plt.plot(x, test_loss, label='Test Loss') # 添加标题和坐标轴标签 plt.title('Test Loss Curve') plt.xlabel('Epoch') plt.ylabel('Loss') # 显示图例 plt.legend() # 显示图形 plt.show()这串代码不出图
时间: 2023-09-15 13:19:46 浏览: 38
这段代码可能存在问题。请检查以下几个方面:
1. 检查是否导入了必要的库,如matplotlib。
2. 检查变量num_epochs和test_loss是否有值,并且是否正确。
3. 检查是否有其他代码导致了plt.show()无法显示图形。可以尝试将plt.show()语句放到代码的最后一行。
4. 检查绘图的数据格式是否正确。如果数据格式不正确,可能会导致图形无法正确绘制。
如果以上方法都无法解决问题,可以尝试使用其他绘图库或者在线绘图工具来绘制图形。
相关问题
用 pycharm 写一个三输入一输出的 BP 神经网络并输出预测值.以输入中的一个值为 X 轴预测值和真实值为 Y 轴画出折线图
好的,以下是一个使用 PyCharm 编写的三输入一输出的 BP 神经网络的代码示例,并绘制预测值和真实值的折线图:
```python
import numpy as np
import matplotlib.pyplot as plt
class NeuralNetwork:
def __init__(self):
# 设置输入层、隐藏层和输出层的神经元数量
self.input_neurons = 3
self.hidden_neurons = 4
self.output_neurons = 1
# 初始化权重和偏差
self.weights1 = np.random.randn(self.input_neurons, self.hidden_neurons)
self.bias1 = np.zeros((1, self.hidden_neurons))
self.weights2 = np.random.randn(self.hidden_neurons, self.output_neurons)
self.bias2 = np.zeros((1, self.output_neurons))
def sigmoid(self, x):
# sigmoid 函数
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
# sigmoid 函数的导数
return x * (1 - x)
def forward(self, X):
# 前向传播
self.hidden_layer = self.sigmoid(np.dot(X, self.weights1) + self.bias1)
self.output_layer = self.sigmoid(np.dot(self.hidden_layer, self.weights2) + self.bias2)
return self.output_layer
def backward(self, X, y, output):
# 反向传播
self.output_error = y - output
self.output_delta = self.output_error * self.sigmoid_derivative(output)
self.hidden_error = np.dot(self.output_delta, self.weights2.T)
self.hidden_delta = self.hidden_error * self.sigmoid_derivative(self.hidden_layer)
self.weights1 += np.dot(X.T, self.hidden_delta)
self.bias1 += np.sum(self.hidden_delta, axis=0, keepdims=True)
self.weights2 += np.dot(self.hidden_layer.T, self.output_delta)
self.bias2 += np.sum(self.output_delta, axis=0, keepdims=True)
def train(self, X, y, epochs):
# 训练神经网络
for i in range(epochs):
output = self.forward(X)
self.backward(X, y, output)
def predict(self, X):
# 预测输出
return self.forward(X)
# 生成数据
X = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
y = np.array([[0], [1], [1], [0]])
# 训练神经网络
nn = NeuralNetwork()
nn.train(X, y, epochs=10000)
# 预测结果并绘制折线图
X_test = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1]])
y_test = np.array([[0], [1], [1]])
y_pred = nn.predict(X_test)
x_axis = np.arange(len(y_test))
plt.plot(x_axis, y_test, 'o-', label='Real Value')
plt.plot(x_axis, y_pred, 'o-', label='Predict Value')
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Prediction Result')
plt.legend()
plt.show()
```
在这个示例中,我们首先生成了一个包含四个样本的输入矩阵 X 和相应的输出矩阵 y,并使用这些数据训练了神经网络。然后,我们使用包含三个样本的输入矩阵 X_test 进行预测,并将预测结果和真实值绘制在同一个图表中。您可以根据自己的需求修改输入、隐藏和输出层的神经元数量,以及训练次数等参数。
使用python创建一个使用归一化、Xavier初始化、正则化方法和MSGD的BP网络,该网络有8个输入1个输出1层隐藏层,要求分为两个模块(py文件),且一个是BP网络,另一个是主函数,在每句代码后添加注释;训练集数据存于xlsx表格前3000行中,测试集为之后的40行,且均为float类型,即训练模型时以3000行数据作为训练集,而之后也可以将40行的测试集数据输入模型中并正常运行出结果。并且能够输出各输入的权重。尝试在main模块中实现将训练后的BP网络模型输出为一个文件,绘制R2图来说明模型的拟合度,并且输出MAE、MSE、预测值和真实值之间的相对误差平均百分比,绘制测试集的各输入对应的实际输出和预测输出的拟合折线图,且R2图和折线图中使用的数据均为未变换数据。
以下是BP网络的代码:
```
# bp.py
import numpy as np
import pandas as pd
class BP:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
# 初始化参数
self.W1 = np.random.randn(self.input_size, self.hidden_size) / np.sqrt(self.input_size)
self.b1 = np.zeros((1, self.hidden_size))
self.W2 = np.random.randn(self.hidden_size, self.output_size) / np.sqrt(self.hidden_size)
self.b2 = np.zeros((1, self.output_size))
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
return x * (1 - x)
def train(self, X, y, learning_rate=0.1, epochs=1000, reg_lambda=0.01):
for i in range(epochs):
# 前向传播
z1 = X.dot(self.W1) + self.b1
a1 = self.sigmoid(z1)
z2 = a1.dot(self.W2) + self.b2
y_hat = z2
# 计算损失函数
loss = np.mean(np.square(y - y_hat))
# 反向传播
delta2 = y_hat - y
dW2 = a1.T.dot(delta2)
db2 = np.sum(delta2, axis=0, keepdims=True)
delta1 = delta2.dot(self.W2.T) * self.sigmoid_derivative(a1)
dW1 = X.T.dot(delta1)
db1 = np.sum(delta1, axis=0)
# 添加正则化项
dW2 += reg_lambda * self.W2
dW1 += reg_lambda * self.W1
# 更新参数
self.W2 -= learning_rate * dW2
self.b2 -= learning_rate * db2
self.W1 -= learning_rate * dW1
self.b1 -= learning_rate * db1
if i % 100 == 0:
print("Epoch: {0}, Loss: {1}".format(i, loss))
def predict(self, X):
z1 = X.dot(self.W1) + self.b1
a1 = self.sigmoid(z1)
z2 = a1.dot(self.W2) + self.b2
y_hat = z2
return y_hat
def get_weights(self):
return self.W1, self.W2
```
以下是主函数的代码:
```
# main.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from bp import BP
# 读取数据
data = pd.read_excel('data.xlsx', header=None)
train_data = data.iloc[:3000, :]
test_data = data.iloc[3000:, :]
# 数据预处理
train_X = train_data.iloc[:, :-1].values
train_y = train_data.iloc[:, -1].values.reshape(-1, 1)
test_X = test_data.iloc[:, :-1].values
test_y = test_data.iloc[:, -1].values.reshape(-1, 1)
# 归一化
train_X = (train_X - np.min(train_X, axis=0)) / (np.max(train_X, axis=0) - np.min(train_X, axis=0))
train_y = (train_y - np.min(train_y, axis=0)) / (np.max(train_y, axis=0) - np.min(train_y, axis=0))
test_X = (test_X - np.min(test_X, axis=0)) / (np.max(test_X, axis=0) - np.min(test_X, axis=0))
test_y = (test_y - np.min(test_y, axis=0)) / (np.max(test_y, axis=0) - np.min(test_y, axis=0))
# 初始化BP网络
input_size = 8
hidden_size = 5
output_size = 1
bp = BP(input_size, hidden_size, output_size)
# 训练BP网络
bp.train(train_X, train_y, learning_rate=0.1, epochs=1000, reg_lambda=0.01)
# 保存BP网络模型
np.save('bp_weights.npy', bp.get_weights())
# 加载BP网络模型
W1, W2 = np.load('bp_weights.npy', allow_pickle=True)
# 预测
train_pred = bp.predict(train_X)
test_pred = bp.predict(test_X)
# 反归一化
train_pred = train_pred * (np.max(train_y, axis=0) - np.min(train_y, axis=0)) + np.min(train_y, axis=0)
test_pred = test_pred * (np.max(test_y, axis=0) - np.min(test_y, axis=0)) + np.min(test_y, axis=0)
train_y = train_y * (np.max(train_y, axis=0) - np.min(train_y, axis=0)) + np.min(train_y, axis=0)
test_y = test_y * (np.max(test_y, axis=0) - np.min(test_y, axis=0)) + np.min(test_y, axis=0)
# 计算误差
train_mae = np.mean(np.abs(train_pred - train_y))
test_mae = np.mean(np.abs(test_pred - test_y))
train_mse = np.mean(np.square(train_pred - train_y))
test_mse = np.mean(np.square(test_pred - test_y))
train_rpe = np.mean(np.abs((train_pred - train_y) / train_y)) * 100
test_rpe = np.mean(np.abs((test_pred - test_y) / test_y)) * 100
# 输出误差
print("Train MAE: {0:.4f}, Test MAE: {1:.4f}".format(train_mae, test_mae))
print("Train MSE: {0:.4f}, Test MSE: {1:.4f}".format(train_mse, test_mse))
print("Train RPE: {0:.4f}%, Test RPE: {1:.4f}%".format(train_rpe, test_rpe))
# 计算R2
train_r2 = 1 - np.sum(np.square(train_pred - train_y)) / np.sum(np.square(train_y - np.mean(train_y)))
test_r2 = 1 - np.sum(np.square(test_pred - test_y)) / np.sum(np.square(test_y - np.mean(test_y)))
# 绘制R2图
plt.figure()
plt.bar(['Train', 'Test'], [train_r2, test_r2])
plt.title("R2")
plt.show()
# 绘制拟合折线图
plt.figure()
for i in range(test_X.shape[1]):
plt.subplot(2, 4, i+1)
plt.plot(test_y, label="True")
plt.plot(test_pred, label="Pred")
plt.title("Input {}".format(i+1))
plt.legend()
plt.show()
```