用 Pytorch 定义MLP模型训练模型的代码
时间: 2023-06-04 08:05:53 浏览: 185
下面是一个用 Pytorch 定义并训练 MLP 模型的代码:
```
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
class MLP(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
# define hyperparameters
input_size = 784 # image size 28x28=784
hidden_size = 128
output_size = 10 # 10 classes (0-9)
learning_rate = 0.001
batch_size = 64
num_epochs = 10
# load data
train_data = ...
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
# define model
model = MLP(input_size, hidden_size, output_size)
# define loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# train model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# reshape images and move to GPU if available
images = images.reshape(-1, input_size).to(device)
labels = labels.to(device)
# forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# backward pass and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print status
if (i+1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
print('Training finished.')
```
阅读全文