vision transformer实战
时间: 2023-10-13 17:06:07 浏览: 94
vision transformer是一种基于self-attention机制的图像分类算法。它采用了transformer的框架,将输入的图像通过一系列的transformer模块,最终输出分类结果。下面是一个简单的vision transformer实战流程:
1. 导入必要的库和数据集
```
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision import transforms
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_dataset = CIFAR10(root='./data', train=True, transform=transform, download=True)
test_dataset = CIFAR10(root='./data', train=False, transform=transform, download=True)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
```
2. 定义vision transformer模型
```
class VisionTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=10, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4.0):
super(VisionTransformer, self).__init__()
self.patch_size = patch_size
self.num_patches = (img_size // patch_size) ** 2
self.patch_embed = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=0.5)
self.transformer = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=embed_dim, nhead=num_heads, dim_feedforward=int(embed_dim * mlp_ratio)),
num_layers=depth)
self.norm = nn.LayerNorm(embed_dim)
self.fc = nn.Linear(embed_dim, num_classes)
def forward(self, x):
B = x.size(0)
x = self.patch_embed(x).flatten(2).transpose(1, 2)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embed[:, :(self.num_patches + 1)]
x = self.pos_drop(x)
x = self.transformer(x)
x = self.norm(x)
cls_tokens = x[:, 0]
x = self.fc(cls_tokens)
return x
```
3. 定义损失函数和优化器
```
model = VisionTransformer(num_classes=10).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
```
4. 训练模型
```
for epoch in range(10):
for i, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.cuda(), labels.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if epoch % 2 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'
.format(epoch+1, 10, loss.item()))
```
5. 测试模型
```
with torch.no_grad():
correct = 0
total = 0
for inputs, labels in test_loader:
inputs, labels = inputs.cuda(), labels.cuda()
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
```
阅读全文