t1 = time.time() # Note that compactness is defined differently because a grid is not used. Lower compactness for maskSLIC is equivalent segments = seg.slic(img, compactness=10, seed_type='nplace', mask=roi, n_segments=120, recompute_seeds=True, plot_examples=True, enforce_connectivity=True) print("Time: {:.2f} s".format(time.time() - t1)) plt.figure() plt.imshow(mark_boundaries(img, segments)) plt.contour(roi, contours=1, colors='red', linewidths=0.5) plt.axis('off') plt.savefig(r'I:\18Breakageratecalculation\mask-slic use\maskSLIC-master\result\split\result3\maskslic.png') 怎么保存每一块超像素图像
时间: 2024-03-17 13:43:46 浏览: 90
可以使用以下代码将每个超像素图像保存到磁盘上:
```
for segment_id in np.unique(segments):
mask = segments == segment_id
masked_image = img.copy()
masked_image[~mask] = 0
cv2.imwrite('segment_{}.png'.format(segment_id), masked_image)
```
这将循环遍历所有的超像素 ID,将每个超像素的掩模应用于原始图像并将其保存到磁盘上。您可以修改保存的文件名和路径以适合您的需求。
相关问题
img = imread(r'I:\\18Breakageratecalculation\\mask-slic use\\maskSLIC-master\\1\\056.jpg') # The ROI is also stored as an image for viewing convenience # But the roi input input maskSLIC should be a binary image with the same spatial # Dimensions as the image (in this case 300x451) roi = imread(r'I:\\18Breakageratecalculation\\mask-slic use\\maskSLIC-master\\1\\0562.png') # The alpha channel is used to store the ROI in this case and is converted into a logical array of 0s and 1s roi = roi[:, :, 3] > 0 # Alternatively a mask could be created manually with for example a disk: # roi = np.zeros((img.shape[0], img.shape[1])) # a, b = 150, 150 # r = 100 # y,x = np.ogrid[-a:img.shape[0]-a, -b:img.shape[1]-b] # mask = x*x + y*y <= r*r # roi[mask] = 1 # ~~~~~~~~~~~~ Example 1: maskSLIC ~~~~~~~~~~~~~ t1 = time.time() # Note that compactness is defined differently because a grid is not used. Lower compactness for maskSLIC is equivalent segments = seg.slic(img, compactness=10, seed_type='nplace', mask=roi, n_segments=120, recompute_seeds=True, plot_examples=True, enforce_connectivity=True) print("Time: {:.2f} s".format(time.time() - t1)) plt.figure() plt.imshow(mark_boundaries(img, segments)) plt.contour(roi, contours=1, colors='red', linewidths=0.5) plt.axis('off') # ~~~~~~~~~~~ Example 2: SLIC ~~~~~~~~~~~~~~~~~ t1 = time.time() segments = seg.slic(img, compactness=10, seed_type='grid', n_segments=100, plot_examples=False, enforce_connectivity=True) # segments[roi==0] = -1 print("Time: {:.2f} s".format(time.time() - t1)) plt.figure() plt.imshow(mark_boundaries(img, segments)) plt.contour(roi, contours=1, colors='red', linewidths=1) plt.axis('off') plt.show() plt.show()怎么保存结果
如果想要保存分割结果,可以使用matplotlib库中的imsave函数来保存图像。
例如,可以在代码中添加以下语句来保存Example 1中的分割结果:
```
plt.imshow(mark_boundaries(img, segments))
plt.contour(roi, contours=1, colors='red', linewidths=0.5)
plt.axis('off')
plt.savefig('example1.png') # 保存图片
plt.show()
```
这样就会在当前路径下生成一个名为example1.png的图片文件,其中包含分割结果和ROI边界。需要保存Example 2的分割结果时,也可以类似地添加保存语句。
import torch import torch.nn as nn import torch.nn.functional as F import torchvision.transforms as transforms import skimage.segmentation as seg import numpy as np # 超参数 from PIL import Image num_superpixels = 1000 compactness = 10 sigma = 1 # 定义模型 class SuperpixelSegmentation(nn.Module): def init(self): super(SuperpixelSegmentation, self).init() self.convs = nn.Sequential( nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.Conv2d(64, num_superpixels, kernel_size=1, stride=1) ) def forward(self, x): x = self.convs(x) return x # 加载图像 imgA = Image.open('1.png').convert('RGB') imgB = Image.open('2.jpg').convert('RGB') # 超像素分割 imgA_np = np.array(imgA) segments = seg.slic(imgA_np, n_segments=num_superpixels, compactness=compactness, sigma=sigma) segments = torch.from_numpy(segments).unsqueeze(0).unsqueeze(0).float() segments = F.interpolate(segments, size=(imgA.height, imgA.width), mode='nearest').long() # 应用超像素块范围到图像B imgB_np = np.array(imgB) for i in range(num_superpixels): mask = (segments == i) imgB_np[mask.expand(3, -1, -1)] = np.mean(imgB_np[mask.expand(3, -1, -1)], axis=1, keepdims=True) # 显示超像素分割图像 imgA_segments = np.zeros_like(imgA_np) for i in range(num_superpixels): mask = (segments == i) imgA_segments[mask.expand(3, -1, -1)] = np.random.randint(0, 255, size=(3,)) imgA_segments = Image.fromarray(imgA_segments.astype(np.uint8)) imgB_segments = Image.fromarray(imgB_np) # 显示图像 transforms.ToPILImage()(imgA).show() transforms.ToPILImage()(imgB).show() imgA_segments.show() imgB_segments.show()上述代码出现错误:RuntimeError: expand(CPUBoolType{[1, 1, 512, 512]}, size=[3, -1, -1]): the number of sizes provided (3) must be greater or equal to the number of dimensions in the tensor (4)
这个错误出现的原因是在这行代码中:
```
imgB_np[mask.expand(3, -1, -1)] = np.mean(imgB_np[mask.expand(3, -1, -1)], axis=1, keepdims=True)
```
`mask.expand(3, -1, -1)`的输出的维度是 `[3, 1, 512, 512]`,但是`imgB_np`的维度是 `[512, 512, 3]`,所以二者无法进行广播操作。你需要将`imgB_np`的维度改为`[3, 512, 512]`,然后再进行操作。修改的代码如下所示:
```
imgB_np = np.transpose(imgB_np, (2, 0, 1))
for i in range(num_superpixels):
mask = (segments == i)
imgB_np[:, mask] = np.mean(imgB_np[:, mask], axis=1, keepdims=True)
imgB_np = np.transpose(imgB_np, (1, 2, 0))
```
这里我们先对`imgB_np`进行了转置操作,将通道维度放在最前面,reshape成了`[3, 512, 512]`的维度,然后进行超像素块的操作,最后再将维度转置回来,得到了`[512, 512, 3]`的维度。
阅读全文