写一个用jupyter实现LSTM预测的代码,用自己的含4种特征的1000*4的数据集,要实现训练集和测试集都有好的预测结果,要有评价指标,有对比图,要同LSTM自编码器预测的方法作比较
时间: 2023-06-26 20:03:20 浏览: 84
Pytorch框架实现基于多特征CNN-LSTM时间序列预测python源码+数据集(可作毕业设计).zip
5星 · 资源好评率100%
好的,我可以为您提供一个基础的LSTM预测代码,但是您需要自己将其修改为适合您的数据集和特征。以下是代码:
```python
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
# 读取数据集
dataset = pd.read_csv('your_dataset.csv')
# 将数据集转换为numpy数组
data = dataset.values
# 将数据集中的每个特征进行归一化处理
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data)
# 定义训练集和测试集
train_size = int(len(scaled_data) * 0.8)
test_size = len(scaled_data) - train_size
train_data = scaled_data[0:train_size, :]
test_data = scaled_data[train_size:len(scaled_data), :]
# 创建训练集和测试集
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i + look_back), :]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
look_back = 10
trainX, trainY = create_dataset(train_data, look_back)
testX, testY = create_dataset(test_data, look_back)
# 将训练集和测试集重新调整为LSTM输入格式
trainX = np.reshape(trainX, (trainX.shape[0], look_back, trainX.shape[2]))
testX = np.reshape(testX, (testX.shape[0], look_back, testX.shape[2]))
# 定义LSTM模型
model = Sequential()
model.add(LSTM(50, input_shape=(look_back, train_data.shape[1]), return_sequences=True))
model.add(LSTM(50))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
# 训练LSTM模型
model.fit(trainX, trainY, epochs=100, batch_size=64, verbose=2)
# 使用模型进行预测并将结果还原为原来的比例
train_predict = model.predict(trainX)
test_predict = model.predict(testX)
train_predict = scaler.inverse_transform(train_predict)
trainY = scaler.inverse_transform([trainY])
test_predict = scaler.inverse_transform(test_predict)
testY = scaler.inverse_transform([testY])
# 计算训练集和测试集的RMSE
train_rmse = np.sqrt(np.mean(np.power(trainY - train_predict, 2)))
test_rmse = np.sqrt(np.mean(np.power(testY - test_predict, 2)))
print('Train RMSE: %.2f' % (train_rmse))
print('Test RMSE: %.2f' % (test_rmse))
# 画出预测结果和真实结果的对比图
import matplotlib.pyplot as plt
train_predict_plot = np.empty_like(data)
train_predict_plot[:, :] = np.nan
train_predict_plot[look_back:len(train_predict)+look_back, :] = train_predict
test_predict_plot = np.empty_like(data)
test_predict_plot[:, :] = np.nan
test_predict_plot[len(train_predict)+(look_back*2)+1:len(data)-1, :] = test_predict
plt.plot(scaler.inverse_transform(data))
plt.plot(train_predict_plot)
plt.plot(test_predict_plot)
plt.show()
```
对于评价指标,我们采用RMSE(均方根误差)来评估模型的预测效果。同时,我们还可以将LSTM预测和LSTM自编码器预测的结果进行比较,并画出对比图。
以下是修改后的代码:
```python
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
# 读取数据集
dataset = pd.read_csv('your_dataset.csv')
# 将数据集转换为numpy数组
data = dataset.values
# 将数据集中的每个特征进行归一化处理
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data)
# 定义训练集和测试集
train_size = int(len(scaled_data) * 0.8)
test_size = len(scaled_data) - train_size
train_data = scaled_data[0:train_size, :]
test_data = scaled_data[train_size:len(scaled_data), :]
# 创建训练集和测试集
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i + look_back), :]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
look_back = 10
trainX, trainY = create_dataset(train_data, look_back)
testX, testY = create_dataset(test_data, look_back)
# 将训练集和测试集重新调整为LSTM输入格式
trainX = np.reshape(trainX, (trainX.shape[0], look_back, trainX.shape[2]))
testX = np.reshape(testX, (testX.shape[0], look_back, testX.shape[2]))
# 定义LSTM模型
model = Sequential()
model.add(LSTM(50, input_shape=(look_back, train_data.shape[1]), return_sequences=True))
model.add(LSTM(50))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
# 训练LSTM模型
model.fit(trainX, trainY, epochs=100, batch_size=64, verbose=2)
# 使用模型进行预测并将结果还原为原来的比例
train_predict = model.predict(trainX)
test_predict = model.predict(testX)
train_predict = scaler.inverse_transform(train_predict)
trainY = scaler.inverse_transform([trainY])
test_predict = scaler.inverse_transform(test_predict)
testY = scaler.inverse_transform([testY])
# 计算训练集和测试集的RMSE
train_rmse = np.sqrt(np.mean(np.power(trainY - train_predict, 2)))
test_rmse = np.sqrt(np.mean(np.power(testY - test_predict, 2)))
print('LSTM Train RMSE: %.2f' % (train_rmse))
print('LSTM Test RMSE: %.2f' % (test_rmse))
# 画出LSTM预测结果和真实结果的对比图
import matplotlib.pyplot as plt
train_predict_plot = np.empty_like(data)
train_predict_plot[:, :] = np.nan
train_predict_plot[look_back:len(train_predict)+look_back, :] = train_predict
test_predict_plot = np.empty_like(data)
test_predict_plot[:, :] = np.nan
test_predict_plot[len(train_predict)+(look_back*2)+1:len(data)-1, :] = test_predict
plt.plot(scaler.inverse_transform(data))
plt.plot(train_predict_plot)
plt.plot(test_predict_plot)
plt.title('LSTM Prediction vs Real Data')
plt.show()
# 定义LSTM自编码器模型
autoencoder = Sequential()
autoencoder.add(LSTM(50, input_shape=(look_back, train_data.shape[1]), return_sequences=True))
autoencoder.add(LSTM(50, return_sequences=True))
autoencoder.add(LSTM(50))
autoencoder.add(Dense(look_back))
autoencoder.compile(loss='mean_squared_error', optimizer='adam')
# 训练LSTM自编码器模型
autoencoder.fit(trainX, trainX, epochs=100, batch_size=64, verbose=2)
# 使用LSTM自编码器模型进行预测并将结果还原为原来的比例
train_predict_autoencoder = autoencoder.predict(trainX)
test_predict_autoencoder = autoencoder.predict(testX)
train_predict_autoencoder = scaler.inverse_transform(train_predict_autoencoder)
test_predict_autoencoder = scaler.inverse_transform(test_predict_autoencoder)
# 计算训练集和测试集的RMSE
train_rmse_autoencoder = np.sqrt(np.mean(np.power(train_data - train_predict_autoencoder, 2)))
test_rmse_autoencoder = np.sqrt(np.mean(np.power(test_data - test_predict_autoencoder, 2)))
print('LSTM Autoencoder Train RMSE: %.2f' % (train_rmse_autoencoder))
print('LSTM Autoencoder Test RMSE: %.2f' % (test_rmse_autoencoder))
# 画出LSTM预测和LSTM自编码器预测的结果对比图
train_predict_autoencoder_plot = np.empty_like(data)
train_predict_autoencoder_plot[:, :] = np.nan
train_predict_autoencoder_plot[look_back:len(train_predict_autoencoder)+look_back, :] = train_predict_autoencoder
test_predict_autoencoder_plot = np.empty_like(data)
test_predict_autoencoder_plot[:, :] = np.nan
test_predict_autoencoder_plot[len(train_predict_autoencoder)+(look_back*2)+1:len(data)-1, :] = test_predict_autoencoder
plt.plot(scaler.inverse_transform(data))
plt.plot(train_predict_plot)
plt.plot(train_predict_autoencoder_plot)
plt.title('LSTM vs LSTM Autoencoder (Train Set)')
plt.legend(['Real Data', 'LSTM Prediction', 'LSTM Autoencoder Prediction'], loc='upper left')
plt.show()
plt.plot(scaler.inverse_transform(data))
plt.plot(test_predict_plot)
plt.plot(test_predict_autoencoder_plot)
plt.title('LSTM vs LSTM Autoencoder (Test Set)')
plt.legend(['Real Data', 'LSTM Prediction', 'LSTM Autoencoder Prediction'], loc='upper left')
plt.show()
```
在上面的代码中,我们首先定义了一个LSTM模型,并将其用于预测。然后,我们定义了一个LSTM自编码器模型,并将其用于与LSTM模型进行比较。最后,我们计算了LSTM模型和LSTM自编码器模型的RMSE,并画出了预测结果的对比图。
希望这个代码能够帮助到您。如果您有任何问题,请随时联系我。
阅读全文