结构一致性ssim损失函数
时间: 2025-01-03 16:43:09 浏览: 32
### 结构一致性 SSIM 损失函数的应用
结构相似性指数 (SSIM) 被广泛应用于图像处理领域,特别是在深度学习框架下用于优化生成对抗网络(GANs)[^1]。相比于传统的均方误差(MSE),SSIM 更加关注人类视觉系统的特性,能够更好地反映两幅图片间的感知差异。
#### 应用场景
- **超分辨率重建**:提高低分辨率图像的质量。
- **去噪**:去除噪声的同时保持边缘细节清晰可见。
- **风格迁移**:保留原始内容的基础上改变艺术风格。
- **医学影像分析**:辅助医生更精确地诊断疾病。
这些应用场景都依赖于高质量的图像恢复能力,而基于 SSIM 的损失函数正好满足这一需求。
### 实现方式
为了在 PyTorch 中实现 SSIM 损失函数,可以采用如下代码:
```python
import torch
import torch.nn.functional as F
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel=1):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
def ssim(img1, img2, val_range, window_size=11, window=None, size_average=True, full=False):
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2)
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, val_range=None):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
# Assume 1 channel for SSIM
self.channel = 1
self.window = create_window(window_size)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.dtype == img1.dtype:
window = self.window
else:
window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype)
self.window = window
self.channel = channel
return ssim(img1, img2, val_range=self.val_range, window=window,
window_size=self.window_size, size_average=self.size_average)
def msssim(img1, img2, val_range, normalize=False):
device = img1.device
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
levels = weights.size()[0]
mssim = []
mcs = []
for _ in range(levels):
sim, cs = ssim(img1, img2, val_range=val_range, full=True)
mssim.append(sim)
mcs.append(cs)
filtered_im1 = F.avg_pool2d(img1, kernel_size=2, stride=2)
filtered_im2 = F.avg_pool2d(img2, kernel_size=2, stride=2)
img1 = filtered_im1
img2 = filtered_im2
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
# Normalize (to avoid NaNs during training unstable models, not compliant with original definition)
if normalize:
while mssim.dim() > 0:
mssim = mssim.mean()
mcs = mcs.mean()
power_factors = weights
mssim = torch.prod(mcs[:-1] ** power_factors[:-1] * mssim[-1] ** power_factors[-1])
return mssim
mssim = (mssim + 1) / 2
msc = (mcs + 1) / 2
pow1 = msc ** weights
pow2 = mssim ** weights
output = torch.prod(pow1[:-1] * pow2[-1])
return output
class MSSSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, channel=3):
super(MSSSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = channel
def forward(self, img1, img2):
# TODO: store windows between calls if possible.
return msssim(img1, img2, normalize=True)
```
此段代码定义了一个简单的 `SSIM` 类以及一个多尺度版本 `MSSSIM` ,可用于计算两张输入图像之间的 SSIM 值。这有助于训练过程中最小化预测图与目标图之间差距,从而提升最终输出效果。
阅读全文