for epoch in range(100): for batch in dataloader: optimizer.zero_grad() x = batch[:, :-1, :] y = batch[:, 1:, :]报错TypeError: list indices must be integers or slices, not tuple
时间: 2024-03-09 15:46:03 浏览: 151
这个错误很可能是因为你的dataloader返回的是元组而不是列表。你可以尝试将其转换为列表,如下所示:
```
for epoch in range(100):
for batch in dataloader:
optimizer.zero_grad()
batch = list(batch)
x = batch[:, :-1, :]
y = batch[:, 1:, :]
```
或者你可以在创建dataloader时指定返回的元素类型为列表,如下所示:
```
dataloader = DataLoader(dataset, batch_size=32, collate_fn=list)
```
相关问题
for epoch in range(num_epochs): model.train() train_loss = 0 train_mae = 0 for batch_data, batch_target in train_dataloader: optimizer.zero_grad() output = model(batch
_data) # 前向传播
loss = loss_fn(output, batch_target) # 计算loss
train_loss += loss.item()
mae = mean_absolute_error(output.detach().cpu().numpy(), batch_target.detach().cpu().numpy()) # 计算MAE
train_mae += mae
loss.backward() # 反向传播
optimizer.step() # 更新梯度
train_loss /= len(train_dataloader)
train_mae /= len(train_dataloader)
model.eval() # 切换到评估模式
eval_loss = 0
eval_mae = 0
with torch.no_grad():
for batch_data, batch_target in eval_dataloader:
output = model(batch_data)
loss = loss_fn(output, batch_target)
eval_loss += loss.item()
mae = mean_absolute_error(output.detach().cpu().numpy(), batch_target.detach().cpu().numpy())
eval_mae += mae
eval_loss /= len(eval_dataloader)
eval_mae /= len(eval_dataloader)
print(f"Epoch {epoch+1}/{num_epochs}, Train Loss: {train_loss:.4f}, Train MAE: {train_mae:.4f}, Eval Loss: {eval_loss:.4f}, Eval MAE: {eval_mae:.4f}")
if eval_loss < best_eval_loss:
best_eval_loss = eval_loss
torch.save(model.state_dict(), save_path)
print("Save model successfully!")
这段代码中加一个test loss功能 class LSTM(nn.Module): def __init__(self, input_size, hidden_size, num_layers, output_size, batch_size, device): super().__init__() self.device = device self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.output_size = output_size self.num_directions = 1 # 单向LSTM self.batch_size = batch_size self.lstm = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, batch_first=True) self.linear = nn.Linear(65536, self.output_size) def forward(self, input_seq): h_0 = torch.randn(self.num_directions * self.num_layers, self.batch_size, self.hidden_size).to(self.device) c_0 = torch.randn(self.num_directions * self.num_layers, self.batch_size, self.hidden_size).to(self.device) output, _ = self.lstm(input_seq, (h_0, c_0)) pred = self.linear(output.contiguous().view(self.batch_size, -1)) return pred if __name__ == '__main__': # 加载已保存的模型参数 saved_model_path = '/content/drive/MyDrive/危急值/model/dangerous.pth' device = 'cuda:0' lstm_model = LSTM(input_size=1, hidden_size=64, num_layers=1, output_size=3, batch_size=256, device='cuda:0').to(device) state_dict = torch.load(saved_model_path) lstm_model.load_state_dict(state_dict) dataset = ECGDataset(X_train_df.to_numpy()) dataloader = DataLoader(dataset, batch_size=256, shuffle=True, num_workers=0, drop_last=True) loss_fn = nn.CrossEntropyLoss() optimizer = optim.SGD(lstm_model.parameters(), lr=1e-4) for epoch in range(200000): print(f'epoch:{epoch}') lstm_model.train() epoch_bar = tqdm(dataloader) for x, y in epoch_bar: optimizer.zero_grad() x_out = lstm_model(x.to(device).type(torch.cuda.FloatTensor)) loss = loss_fn(x_out, y.long().to(device)) loss.backward() epoch_bar.set_description(f'loss:{loss.item():.4f}') optimizer.step() if epoch % 100 == 0 or epoch == epoch - 1: torch.save(lstm_model.state_dict(), "/content/drive/MyDrive/危急值/model/dangerous.pth") print("权重成功保存一次")
以下是加入 Test Loss 功能后的代码:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size, batch_size, device):
super().__init__()
self.device = device
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.output_size = output_size
self.num_directions = 1 # 单向LSTM
self.batch_size = batch_size
self.lstm = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, batch_first=True)
self.linear = nn.Linear(65536, self.output_size)
def forward(self, input_seq):
h_0 = torch.randn(self.num_directions * self.num_layers, self.batch_size, self.hidden_size).to(self.device)
c_0 = torch.randn(self.num_directions * self.num_layers, self.batch_size, self.hidden_size).to(self.device)
output, _ = self.lstm(input_seq, (h_0, c_0))
pred = self.linear(output.contiguous().view(self.batch_size, -1))
return pred
if __name__ == '__main__':
# 加载已保存的模型参数
saved_model_path = '/content/drive/MyDrive/危急值/model/dangerous.pth'
device = 'cuda:0'
lstm_model = LSTM(input_size=1, hidden_size=64, num_layers=1, output_size=3, batch_size=256, device=device).to(device)
state_dict = torch.load(saved_model_path)
lstm_model.load_state_dict(state_dict)
# 加载测试数据集
test_dataset = ECGDataset(X_test_df.to_numpy())
test_loader = DataLoader(test_dataset, batch_size=256, shuffle=False, num_workers=0, drop_last=False)
# 定义损失函数和优化器
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(lstm_model.parameters(), lr=1e-4)
# 开始训练
for epoch in range(200000):
print(f'epoch:{epoch}')
lstm_model.train()
epoch_bar = tqdm(dataloader)
for x, y in epoch_bar:
optimizer.zero_grad()
x_out = lstm_model(x.to(device).type(torch.cuda.FloatTensor))
loss = loss_fn(x_out, y.long().to(device))
loss.backward()
epoch_bar.set_description(f'loss:{loss.item():.4f}')
optimizer.step()
# 计算测试集上的平均损失
test_loss = 0.0
lstm_model.eval()
with torch.no_grad():
for x, y in test_loader:
x_out = lstm_model(x.to(device).type(torch.cuda.FloatTensor))
loss = loss_fn(x_out, y.long().to(device))
test_loss += loss.item() * x.size(0)
test_loss /= len(test_dataset)
print(f'Test Loss: {test_loss:.4f}')
if epoch % 100 == 0 or epoch == epoch - 1:
torch.save(lstm_model.state_dict(), "/content/drive/MyDrive/危急值/model/dangerous.pth")
print("权重成功保存一次")
```
在这个代码中,我们首先加载了测试数据集 `test_dataset` 和测试数据加载器 `test_loader`。在每个 epoch 完成后,我们计算测试集上的平均损失值。我们使用 `lstm_model.eval()` 将模型设为评估模式,并利用 `torch.no_grad()` 避免计算梯度,以加速计算。最后,我们输出测试集上的平均损失值。
阅读全文
相关推荐















