将冒号后面的代码改写成一个nn.module类:data1 = pd.read_csv("终极1.csv", usecols=[17], encoding='gb18030') df = data1.fillna(method='ffill') data = df.values.reshape(-1, 1) scaler = MinMaxScaler(feature_range=(0, 1)) data = scaler.fit_transform(data) train_size = int(len(data) * 0.8) test_size = len(data) - train_size train, test = data[0:train_size, :], data[train_size:len(data), :] def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return np.array(dataX), np.array(dataY) look_back = 30 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1])) model = Sequential() model.add(LSTM(50, input_shape=(1, look_back), return_sequences=True)) model.add(LSTM(50)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, epochs=6, batch_size=1, verbose=2) trainPredict = model.predict(trainX) testPredict = model.predict(testX) trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY])
时间: 2024-03-10 16:51:16 浏览: 148
以下是将代码改写为nn.module类的示例:
```python
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
class LSTMModel(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super(LSTMModel, self).__init__()
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
out = self.fc(out[:, -1, :])
return out
# 读取数据
data1 = pd.read_csv("终极1.csv", usecols=[17], encoding='gb18030')
df = data1.fillna(method='ffill')
data = df.values.reshape(-1, 1)
# 数据归一化
scaler = MinMaxScaler(feature_range=(0, 1))
data = scaler.fit_transform(data)
# 划分数据集
train_size = int(len(data) * 0.8)
test_size = len(data) - train_size
train, test = data[0:train_size, :], data[train_size:len(data), :]
# 创建数据集
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
look_back = 30
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
# 模型训练
input_dim = 1
hidden_dim = 50
output_dim = 1
num_layers = 2
model = LSTMModel(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
num_epochs = 6
for epoch in range(num_epochs):
outputs = model(trainX)
optimizer.zero_grad()
loss = criterion(outputs, trainY)
loss.backward()
optimizer.step()
if epoch % 1 == 0:
print("Epoch: %d, loss: %1.5f" % (epoch, loss.item()))
# 预测结果
trainPredict = model(trainX)
testPredict = model(testX)
trainPredict = scaler.inverse_transform(trainPredict.detach().numpy())
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict.detach().numpy())
testY = scaler.inverse_transform([testY])
```
阅读全文