pytorch多gpu并行案例
时间: 2023-05-10 07:02:18 浏览: 154
在机器学习和深度学习领域中,PyTorch已成为众多数据科学家、研究人员和开发人员的首选框架。PyTorch具有交互式和动态图形特性,因此易于使用和调试。一方面,PyTorch也支持多GPU并行运算,可以加速训练过程并提升深度学习模型的效率与精度。
下面将介绍一些pytorch多GPU并行的案例:
1. 使用nn.DataParallel
nn.DataParallel是pytorch提供的一种简单的方式,用于在多个GPU上进行模型训练。该方法适用于需要在单个GPU上加载并训练大型模型的情景。
这里提供一个范例:利用ResNet模型对CIFAR-10数据集进行分类。可以通过以下代码来实现多GPU并行训练。
```
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import copy
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if not os.path.isdir('data'):
os.mkdir('data')
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'data/cifar10'
image_datasets = {x: datasets.CIFAR10(data_dir, train=(x == 'train'),
download=True, transform=data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=32,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model_wts)
return model
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = nn.DataParallel(model_ft)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25)
```
2. 使用nn.parallel.DistributedDataParallel
如果我们希望加快模型训练的速度,那么就可以考虑使用nn.parallel.DistributedDataParallel。这个工具允许我们在多个GPU上宏观地将模型分发,从而更高效地进行深度学习。
使用DistributedDataParallel进行多GPU并行的方法如下:
(1)启动多个进程
首先,我们需要启动多个进程。代码如下所示:
```
python3 -m torch.distributed.launch --nproc_per_node=2 train.py --dist-url='tcp://127.0.0.1:8000' --world-size=2
```
这就会以两个进程启动主脚本。这两个进程实际上对应两个GPU,它们之间会进行通信。
(2)编写代码
在主脚本中,需要如下所示编写代码:
```
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import os
import copy
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '8000'
# initialize the process group
dist.init_process_group("nccl", rank=rank, world_size=world_size)
def cleanup():
dist.destroy_process_group()
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def train(rank, world_size):
setup(rank, world_size)
model = MyModel()
model = DDP(model.to(rank))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
train_loader = get_data_loader(num_replicas=world_size, rank=rank)
for epoch in range(25):
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data.to(rank))
loss = criterion(output, target.to(rank))
loss.backward()
optimizer.step()
cleanup()
if __name__ == '__main__':
mp.spawn(train, args=(2, 2), nprocs=2, join=True)
```
其中,setup()函数启动了进程组,并使所有进程可以互相通信。cleanup()函数在训练完之后关闭进程组。
```MyModel```代表了一个简单的卷积神经网络,通过DistributedDataParallel方法进行多GPU负载均衡,可以将训练速度加快至少两倍。
总结起来,PyTorch的多GPU并行训练的方法较为灵活,可根据实际需求选择适用的方法。无论是使用nn.DataParallel还是nn.parallel.DistributedDataParallel,都能在多个GPU上加速模型训练。
阅读全文