使用pytorch训练一个基于多尺度自编码网络,实现红外与可见光图像融合的模型的包括测试训练等代码
时间: 2024-01-22 12:18:48 浏览: 91
使用深度学习框架(Pytorch) 的 红外和可见光图像融合_Jupyter_python代码_下载
5星 · 资源好评率100%
这是一个比较复杂的任务,需要耐心阅读并理解以下代码。首先需要安装 PyTorch 和一些常用的 Python 库(如 Numpy、Pillow 等)。
训练代码:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
from PIL import Image
class MultiScaleAutoencoder(nn.Module):
def __init__(self):
super(MultiScaleAutoencoder, self).__init__()
self.encoder1 = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(64, 128, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(128, 256, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
)
self.encoder2 = nn.Sequential(
nn.Conv2d(3, 16, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(16, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(256+64, 128, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(64, 32, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(16, 3, 3, stride=2, padding=1, output_padding=1),
nn.Sigmoid(),
)
def forward(self, x1, x2):
x1_1 = self.encoder1(x1)
x1_2 = self.encoder2(x1)
x2_1 = self.encoder1(x2)
x2_2 = self.encoder2(x2)
x = torch.cat((x1_1, x2_2), dim=1)
x = self.decoder(x)
return x
class IRVISDataset(Dataset):
def __init__(self, ir_dir, vis_dir):
self.ir_files = [ir_dir + f for f in os.listdir(ir_dir)]
self.vis_files = [vis_dir + f for f in os.listdir(vis_dir)]
def __len__(self):
return len(self.ir_files)
def __getitem__(self, idx):
ir_image = Image.open(self.ir_files[idx]).convert('RGB')
vis_image = Image.open(self.vis_files[idx]).convert('RGB')
ir_image = np.array(ir_image)
vis_image = np.array(vis_image)
ir_image = torch.from_numpy(np.transpose(ir_image, (2, 0, 1))).float()
vis_image = torch.from_numpy(np.transpose(vis_image, (2, 0, 1))).float()
return ir_image, vis_image
def train(model, train_loader, criterion, optimizer, device):
model.train()
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
ir_images, vis_images = data
ir_images, vis_images = ir_images.to(device), vis_images.to(device)
optimizer.zero_grad()
outputs = model(ir_images, vis_images)
loss = criterion(outputs, vis_images)
loss.backward()
optimizer.step()
running_loss += loss.item()
return running_loss / len(train_loader)
def test(model, test_loader, criterion, device):
model.eval()
running_loss = 0.0
with torch.no_grad():
for i, data in enumerate(test_loader, 0):
ir_images, vis_images = data
ir_images, vis_images = ir_images.to(device), vis_images.to(device)
outputs = model(ir_images, vis_images)
loss = criterion(outputs, vis_images)
running_loss += loss.item()
return running_loss / len(test_loader)
if __name__ == '__main__':
ir_dir = 'path/to/ir/dir/'
vis_dir = 'path/to/vis/dir/'
dataset = IRVISDataset(ir_dir, vis_dir)
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
model = MultiScaleAutoencoder().to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
for epoch in range(50):
train_loss = train(model, train_loader, criterion, optimizer, device)
test_loss = test(model, test_loader, criterion, device)
print('[Epoch %d] Train loss: %.4f | Test loss: %.4f' % (epoch + 1, train_loss, test_loss))
torch.save(model.state_dict(), 'model.pth')
```
测试代码:
```python
import torch
import torch.nn as nn
from PIL import Image
import numpy as np
class MultiScaleAutoencoder(nn.Module):
def __init__(self):
super(MultiScaleAutoencoder, self).__init__()
self.encoder1 = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(64, 128, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(128, 256, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
)
self.encoder2 = nn.Sequential(
nn.Conv2d(3, 16, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(16, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(256+64, 128, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(64, 32, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(16, 3, 3, stride=2, padding=1, output_padding=1),
nn.Sigmoid(),
)
def forward(self, x1, x2):
x1_1 = self.encoder1(x1)
x1_2 = self.encoder2(x1)
x2_1 = self.encoder1(x2)
x2_2 = self.encoder2(x2)
x = torch.cat((x1_1, x2_2), dim=1)
x = self.decoder(x)
return x
def test(model_path, ir_path, vis_path, output_path):
model = MultiScaleAutoencoder()
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
model.eval()
ir_image = Image.open(ir_path).convert('RGB')
vis_image = Image.open(vis_path).convert('RGB')
ir_image = np.array(ir_image)
vis_image = np.array(vis_image)
ir_image = torch.from_numpy(np.transpose(ir_image, (2, 0, 1))).float()
vis_image = torch.from_numpy(np.transpose(vis_image, (2, 0, 1))).float()
with torch.no_grad():
output = model(ir_image.unsqueeze(0), vis_image.unsqueeze(0))
output = output.squeeze(0).permute(1, 2, 0).numpy()
output = (output * 255).astype(np.uint8)
output_image = Image.fromarray(output)
output_image.save(output_path)
if __name__ == '__main__':
model_path = 'path/to/model.pth'
ir_path = 'path/to/ir/image.jpg'
vis_path = 'path/to/vis/image.jpg'
output_path = 'path/to/output/image.jpg'
test(model_path, ir_path, vis_path, output_path)
```
以上代码仅供参考,具体实现需要根据数据集和具体任务进行修改。
阅读全文