DCGAN-DenseNet图像分类代码
时间: 2023-10-10 12:14:55 浏览: 47
这里是一个使用PyTorch实现的DCGAN和DenseNet的图像分类代码示例:
DCGAN代码示例:
```python
import torch
import torch.nn as nn
# 定义生成器 Generator
class Generator(nn.Module):
def __init__(self, nz, ngf, nc):
super(Generator, self).__init__()
self.main = nn.Sequential(
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, input):
return self.main(input)
# 定义判别器 Discriminator
class Discriminator(nn.Module):
def __init__(self, nc, ndf):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input).view(-1, 1).squeeze(1)
# 定义训练过程
def train(netG, netD, dataloader, criterion, optimizerG, optimizerD, device, nz, ngf, ndf, epochs):
for epoch in range(epochs):
for i, data in enumerate(dataloader, 0):
# 训练判别器
netD.zero_grad()
real_imgs = data[0].to(device)
b_size = real_imgs.size(0)
label = torch.full((b_size,), 1, dtype=torch.float, device=device)
output = netD(real_imgs).view(-1)
errD_real = criterion(output, label)
noise = torch.randn(b_size, nz, 1, 1, device=device)
fake_imgs = netG(noise)
label.fill_(0)
output = netD(fake_imgs.detach()).view(-1)
errD_fake = criterion(output, label)
errD = errD_real + errD_fake
errD.backward()
optimizerD.step()
# 训练生成器
netG.zero_grad()
label.fill_(1)
output = netD(fake_imgs).view(-1)
errG = criterion(output, label)
errG.backward()
optimizerG.step()
# 输出训练状态
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f'
% (epoch, epochs, i, len(dataloader),
errD.item(), errG.item()))
# 定义超参
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
nz = 100
ngf = 64
ndf = 64
epochs = 5
lr = 0.0002
beta1 = 0.5
batch_size = 128
image_size = 64
nc = 3
# 加载数据集
dataset = torchvision.datasets.CIFAR10(root='./data', download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(image_size),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=2)
# 初始化网络
netG = Generator(nz, ngf, nc).to(device)
netD = Discriminator(nc, ndf).to(device)
criterion = nn.BCELoss()
optimizerG = torch.optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerD = torch.optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
# 训练网络
train(netG, netD, dataloader, criterion, optimizerG, optimizerD, device, nz, ngf, ndf, epochs)
```
DenseNet代码示例:
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
# 定义DenseNet模型
class DenseNet(nn.Module):
def __init__(self):
super(DenseNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Sequential(*self._make_dense_layers(64, 12)),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 128, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.AvgPool2d(kernel_size=8, stride=1),
)
self.classifier = nn.Linear(128, 10)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _make_dense_layers(self, in_channels, num_blocks):
layers = []
for i in range(num_blocks):
layers.append(Bottleneck(in_channels))
in_channels += 32
return layers
# 定义Bottleneck模块
class Bottleneck(nn.Module):
def __init__(self, in_channels):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(32)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = nn.ReLU(inplace=True)(out)
out = self.conv2(out)
out = self.bn2(out)
out = nn.ReLU(inplace=True)(out)
out = torch.cat((x, out), 1)
return out
# 训练模型
def train(net, trainloader, criterion, optimizer, device, epochs):
for epoch in range(epochs):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
# 定义超参
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
epochs = 10
lr = 0.1
momentum = 0.9
batch_size = 128
image_size = 32
# 加载数据集
transform_train = transforms.Compose([
transforms.RandomCrop(image_size, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
# 初始化网络
net = DenseNet().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum)
# 训练网络
train(net, trainloader, criterion, optimizer, device, epochs)
```
这里的代码示例可以用来训练和测试CIFAR-10数据集的分类任务。注意要根据数据集的要求调整模型的输入和输出大小。