pytorch双标签训练例子
时间: 2023-09-06 16:14:23 浏览: 37
以下是一个使用PyTorch进行双标签分类任务的例子:
首先,我们需要导入必要的库:
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
```
接下来,我们需要定义我们的数据集。这里我们使用CIFAR-10数据集。我们将标签分成两个部分:前5个类别为一个标签,后5个类别为另一个标签。
```python
class CIFAR10(Dataset):
def __init__(self, root, transform=None):
self.data = []
self.targets1 = [] # first label
self.targets2 = [] # second label
self.transform = transform
# Load the cifar data
cifar_data = torchvision.datasets.CIFAR10(root=root, train=True, download=True)
# Split the data into two labels
for i, (image, label) in enumerate(cifar_data):
if label < 5:
self.data.append(image)
self.targets1.append(label)
self.targets2.append(0) # label 0 for the first label
else:
self.data.append(image)
self.targets1.append(0) # label 0 for the second label
self.targets2.append(label-5) # label 0-4 for the second label
def __len__(self):
return len(self.data)
def __getitem__(self, index):
image = self.data[index]
target1 = self.targets1[index]
target2 = self.targets2[index]
if self.transform:
image = self.transform(image)
return image, target1, target2
```
接下来,我们定义我们的模型。这里我们使用一个简单的卷积神经网络。我们的输出有两个部分,分别对应于两个标签。
```python
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 5) # first label has 5 classes
self.fc4 = nn.Linear(84, 5) # second label has 5 classes
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
out1 = self.fc3(x) # first label output
out2 = self.fc4(x) # second label output
return out1, out2
```
然后我们需要定义我们的训练和测试函数:
```python
def train(model, train_loader, criterion, optimizer, device):
model.train()
for batch_idx, (data, target1, target2) in enumerate(train_loader):
data, target1, target2 = data.to(device), target1.to(device), target2.to(device)
optimizer.zero_grad()
output1, output2 = model(data)
loss1 = criterion(output1, target1)
loss2 = criterion(output2, target2)
loss = loss1 + loss2
loss.backward()
optimizer.step()
def test(model, test_loader, criterion, device):
model.eval()
test_loss = 0
correct1 = 0
correct2 = 0
with torch.no_grad():
for data, target1, target2 in test_loader:
data, target1, target2 = data.to(device), target1.to(device), target2.to(device)
output1, output2 = model(data)
test_loss += criterion(output1, target1).item() + criterion(output2, target2).item()
pred1 = output1.argmax(dim=1, keepdim=True)
pred2 = output2.argmax(dim=1, keepdim=True)
correct1 += pred1.eq(target1.view_as(pred1)).sum().item()
correct2 += pred2.eq(target2.view_as(pred2)).sum().item()
test_loss /= len(test_loader.dataset)
correct1 /= len(test_loader.dataset)
correct2 /= len(test_loader.dataset)
print('Average loss: {:.4f}, Accuracy 1: {}/{} ({:.2f}%), Accuracy 2: {}/{} ({:.2f}%)\n'.format(
test_loss, correct1, len(test_loader.dataset), 100. * correct1, correct2, len(test_loader.dataset), 100. * correct2))
```
最后,我们定义我们的训练和测试循环:
```python
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 128
epochs = 10
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_dataset = CIFAR10(root='./data', transform=transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = CIFAR10(root='./data', transform=transform)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
model = Net().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
for epoch in range(1, epochs+1):
train(model, train_loader, criterion, optimizer, device)
test(model, test_loader, criterion, device)
```
这样就完成了一个使用PyTorch进行双标签分类任务的例子。