import cv2 as cv from numpy import * import numpy as np from skimage import io import glob
时间: 2023-09-26 22:14:54 浏览: 93
这是一个 Python 的代码段,它导入了以下库:
- cv2:OpenCV 库,用于处理图像和视频;
- numpy:Python 的科学计算库,提供了多维数组对象和各种派生对象(如掩码数组、矩阵等)以及用于数组操作的函数;
- skimage:基于 Python 的图像处理库,提供了大量的图像处理功能;
- glob:Python 的标准库之一,用于查找符合特定规则的文件路径名。
此外,代码中还定义了一个名为 io 的函数,但是在代码中并没有使用到。
相关问题
import numpy as np from skimage import io from glob import glob from matplotlib import pyplot as plt import cv2 x = [] y = [] for i in glob('man_woman/a_resized/*.jpg')[:800]: try: image = io.imread(i) x.append(image) y.append(0) except: continue for i in glob('man_woman/b_resized/*.jpg')[:800]: try: image = io.imread(i) x.append(image) y.append(1) except: continue plt.subplot(221) plt.imshow(x[0]) plt.title('Male') plt.subplot(222) plt.imshow(x[1]) plt.title('Male') plt.subplot(223) plt.imshow(x[-1]) plt.title('FeMale') plt.subplot(224) plt.imshow(x[-2]) plt.title('FeMale') plt.show()
这段代码是一个简单的图像分类器,用于区分男性和女性的面部图像。首先,它使用`glob`模块获取文件夹中的图像文件路径,然后使用`skimage`模块中的`io.imread`函数读取图像文件并将其添加到`x`列表中。同时,根据文件夹中图像的性别分类,将男性图像的标签设置为0,将女性图像的标签设置为1,并将标签添加到`y`列表中。
接下来,代码使用`matplotlib`模块中的`pyplot`函数显示四张图像,其中第一张和第二张图像是男性的面部图像,最后两张图像是女性的面部图像。这里使用`plt.subplot`函数将四张图像分组并显示在同一个窗口中。
下面代码转化为paddle2.2.2代码 :from __future__ import division import os, time, scipy.io import torch import torch.nn as nn import torch.optim as optim import numpy as np import glob import cv2 import argparse from PIL import Image from skimage.measure import compare_psnr,compare_ssim from tensorboardX import SummaryWriter from models import RViDeNet from utils import * parser = argparse.ArgumentParser(description='Pretrain denoising model') parser.add_argument('--gpu_id', dest='gpu_id', type=int, default=0, help='gpu id') parser.add_argument('--num_epochs', dest='num_epochs', type=int, default=33, help='num_epochs') parser.add_argument('--patch_size', dest='patch_size', type=int, default=128, help='patch_size') parser.add_argument('--batch_size', dest='batch_size', type=int, default=1, help='batch_size') args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id) save_dir = './pretrain_model' if not os.path.isdir(save_dir): os.makedirs(save_dir) gt_paths1 = glob.glob('./data/SRVD_data/raw_clean/MOT17-02_raw/*.tiff') gt_paths2 = glob.glob('./data/SRVD_data/raw_clean/MOT17-09_raw/*.tiff') gt_paths3 = glob.glob('./data/SRVD_data/raw_clean/MOT17-10_raw/*.tiff') gt_paths4 = glob.glob('./data/SRVD_data/raw_clean/MOT17-11_raw/*.tiff') gt_paths = gt_paths1 + gt_paths2 + gt_paths3 + gt_paths4 ps = args.patch_size # patch size for training batch_size = args.batch_size # batch size for training
把原始代码转换为PaddlePaddle 2.2.2 的代码如下:
```python
import os
import time
import argparse
import glob
import cv2
import numpy as np
from PIL import Image
from skimage.measure import compare_psnr, compare_ssim
from tensorboardX import SummaryWriter
from models import RViDeNet
from utils import *
import paddle
from paddle import nn
from paddle.optimizer import optim
paddle.set_device('gpu')
parser = argparse.ArgumentParser(description='Pretrain denoising model')
parser.add_argument('--gpu_id', dest='gpu_id', type=int, default=0, help='gpu id')
parser.add_argument('--num_epochs', dest='num_epochs', type=int, default=33, help='num_epochs')
parser.add_argument('--patch_size', dest='patch_size', type=int, default=128, help='patch_size')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=1, help='batch_size')
args = parser.parse_args()
save_dir = './pretrain_model'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
gt_paths1 = glob.glob('./data/SRVD_data/raw_clean/MOT17-02_raw/*.tiff')
gt_paths2 = glob.glob('./data/SRVD_data/raw_clean/MOT17-09_raw/*.tiff')
gt_paths3 = glob.glob('./data/SRVD_data/raw_clean/MOT17-10_raw/*.tiff')
gt_paths4 = glob.glob('./data/SRVD_data/raw_clean/MOT17-11_raw/*.tiff')
gt_paths = gt_paths1 + gt_paths2 + gt_paths3 + gt_paths4
ps = args.patch_size # patch size for training
batch_size = args.batch_size # batch size for training
num_epochs = args.num_epochs
train_dataset = DatasetDenoising(gt_paths, ps=ps)
train_loader = paddle.io.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
model = RViDeNet()
model.train()
optimizer = optim.Adam(learning_rate=1e-4, parameters=model.parameters())
writer = SummaryWriter()
for epoch in range(num_epochs):
epoch_start_time = time.time()
epoch_loss = 0
for i, (noisy_patches, gt_patches) in enumerate(train_loader()):
noisy_patches = paddle.to_tensor(noisy_patches)
gt_patches = paddle.to_tensor(gt_patches)
output = model(noisy_patches)
loss = nn.functional.mse_loss(output, gt_patches)
optimizer.clear_grad()
loss.backward()
optimizer.step()
epoch_loss += loss
epoch_time = time.time() - epoch_start_time
epoch_loss = epoch_loss / len(train_loader)
print("Epoch [{}/{}] Loss: {:.5f} [{:.2f}s]".format(epoch + 1, num_epochs, epoch_loss, epoch_time))
writer.add_scalar("Loss/train", epoch_loss, epoch + 1)
if (epoch + 1) % 10 == 0:
model_path = os.path.join(save_dir, 'RViDeNet_epoch{}.pdparams'.format(epoch + 1))
paddle.save(model.state_dict(), model_path)
print("Saving model to: {}".format(model_path))
writer.close()
```
阅读全文