用 Pytorch 定义MLP模型训练测试模型并记录优秀模型的代码
时间: 2023-06-04 17:07:14 浏览: 258
好的,以下是用 PyTorch 定义 MLP 模型,训练测试模型并记录优秀模型的相关代码:
```
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
# 定义MLP模型
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
x = nn.functional.relu(self.fc1(x))
x = nn.functional.softmax(self.fc2(x), dim=1)
return x
# 定义训练函数
def train(model, train_dataloader, criterion, optimizer):
model.train()
train_loss = 0
for i, (inputs, targets) in enumerate(train_dataloader):
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss = train_loss / len(train_dataloader)
return train_loss
# 定义测试函数
def test(model, test_dataloader, criterion):
model.eval()
test_loss = 0
num_correct = 0
with torch.no_grad():
for i, (inputs, targets) in enumerate(test_dataloader):
outputs = model(inputs)
test_loss += criterion(outputs, targets).item()
_, predicted = torch.max(outputs.data, 1)
num_correct += (predicted == targets).sum().item()
test_loss = test_loss / len(test_dataloader)
accuracy = num_correct / len(test_dataloader.dataset)
return test_loss, accuracy
# 定义训练过程
def train_process(model, train_dataloader, test_dataloader, criterion, optimizer, n_epochs, save_path):
best_test_loss = float('inf')
for epoch in range(n_epochs):
train_loss = train(model, train_dataloader, criterion, optimizer)
test_loss, accuracy = test(model, test_dataloader, criterion)
print("Epoch: {}, Train Loss: {:.6f}, Test Loss: {:.6f}, Accuracy: {:.6f}".format(epoch, train_loss, test_loss, accuracy))
if test_loss < best_test_loss:
torch.save(model.state_dict(), save_path)
print("Save model, test loss: {:.6f}".format(test_loss))
best_test_loss = test_loss
# 定义数据集
class MyDataset(torch.utils.data.Dataset):
def __init__(self, inputs, targets):
self.inputs = inputs
self.targets = targets
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
input = torch.FloatTensor(self.inputs[idx])
target = torch.LongTensor([self.targets[idx]])
return input, target
# 参数设定
input_dim = 10
hidden_dim = 100
output_dim = 2
n_epochs = 10
batch_size = 64
learning_rate = 0.001
# 模型、数据和优化器初始化
model = MLP(input_dim, hidden_dim, output_dim)
train_inputs = torch.randn(1000, input_dim)
train_targets = (torch.rand(1000) * 2).long()
test_inputs = torch.randn(100, input_dim)
test_targets = (torch.rand(100) * 2).long()
train_dataset = MyDataset(train_inputs, train_targets)
test_dataset = MyDataset(test_inputs, test_targets)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# 开始训练
train_process(model, train_dataloader, test_dataloader, criterion, optimizer, n_epochs, "model.pth")
```
上述代码中,我们定义了一个简单的 MLP 模型,以及训练和测试函数。在数据初始化之后,我们设定了若干超参数,如学习率、训练次数等。然后我们初始化了数据和优化器,依次进入训练和测试循环,在训练完成后,保存效果最好的模型到 "model.pth"。
希望这个回答对您有所帮助!
阅读全文