python实现lstm预测模型
时间: 2023-09-03 15:12:06 浏览: 115
以下为一个简单的LSTM预测模型示例:
```python
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
# 读取数据
data = pd.read_csv('data.csv')
# 数据预处理
scaler = MinMaxScaler()
data['scaled'] = scaler.fit_transform(data['value'].values.reshape(-1, 1))
# 划分训练集和测试集
train_size = int(len(data) * 0.8)
test_size = len(data) - train_size
train, test = data.iloc[0:train_size], data.iloc[train_size:len(data)]
print(train.shape, test.shape)
# 定义函数,用于生成序列数据
def create_dataset(dataset, look_back=1):
X, Y = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
X.append(a)
Y.append(dataset[i + look_back, 0])
return np.array(X), np.array(Y)
# 生成训练集和测试集的序列数据
look_back = 3
train_X, train_Y = create_dataset(train['scaled'].values.reshape(-1, 1), look_back)
test_X, test_Y = create_dataset(test['scaled'].values.reshape(-1, 1), look_back)
# 转化为TensorFlow的可用数据格式
train_X = train_X.reshape(-1, look_back, 1)
test_X = test_X.reshape(-1, look_back, 1)
# 定义LSTM模型
model = tf.keras.Sequential([
tf.keras.layers.LSTM(50, input_shape=(look_back, 1)),
tf.keras.layers.Dense(1)
])
# 编译模型
model.compile(loss='mean_squared_error', optimizer='adam')
# 训练模型
model.fit(train_X, train_Y, epochs=100, batch_size=1, verbose=2)
# 预测
train_predict = model.predict(train_X)
test_predict = model.predict(test_X)
# 反归一化
train_predict = scaler.inverse_transform(train_predict)
train_Y = scaler.inverse_transform([train_Y])
test_predict = scaler.inverse_transform(test_predict)
test_Y = scaler.inverse_transform([test_Y])
# 计算误差
train_score = np.sqrt(np.mean((train_predict - train_Y)**2))
test_score = np.sqrt(np.mean((test_predict - test_Y)**2))
print('Train Score: %.2f RMSE' % (train_score))
print('Test Score: %.2f RMSE' % (test_score))
```
阅读全文