train_data = train_gen.flow_from_directory( train_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical')
时间: 2023-08-17 19:05:02 浏览: 167
这段代码用于从目标目录中生成图像数据的生成器对象。具体来说,它将目标目录中的图像加载到内存中,并将它们调整为 `img_width` 和 `img_height` 指定的大小。然后,它将图像分成大小为 `batch_size` 的批次,并将它们转换为分类标签。该函数会返回一个生成器对象,我们可以使用它来逐批地训练我们的模型。这个函数的输出可以作为 `fit_generator()` 函数的输入。
相关问题
帮我把下面这个代码从TensorFlow改成pytorch import tensorflow as tf import os import numpy as np import matplotlib.pyplot as plt os.environ["CUDA_VISIBLE_DEVICES"] = "0" base_dir = 'E:/direction/datasetsall/' train_dir = os.path.join(base_dir, 'train_img/') validation_dir = os.path.join(base_dir, 'val_img/') train_cats_dir = os.path.join(train_dir, 'down') train_dogs_dir = os.path.join(train_dir, 'up') validation_cats_dir = os.path.join(validation_dir, 'down') validation_dogs_dir = os.path.join(validation_dir, 'up') batch_size = 64 epochs = 50 IMG_HEIGHT = 128 IMG_WIDTH = 128 num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val train_image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255) validation_image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255) train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='categorical') val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size, directory=validation_dir, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='categorical') sample_training_images, _ = next(train_data_gen) model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(2, activation='softmax') ]) model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) model.summary() history = model.fit_generator( train_data_gen, steps_per_epoch=total_train // batch_size, epochs=epochs, validation_data=val_data_gen, validation_steps=total_val // batch_size ) # 可视化训练结果 acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) model.save("./model/timo_classification_128_maxPool2D_dense256.h5")
import torch import os import numpy as np import matplotlib.pyplot as plt os.environ["CUDA_VISIBLE_DEVICES"] = "0" base_dir = 'E:/direction/datasetsall/' train_dir = os.path.join(base_dir, 'train_img/') validation_dir = os.path.join(base_dir, 'val_img/') train_cats_dir = os.path.join(train_dir, 'down') train_dogs_dir = os.path.join(train_dir, 'up') validation_cats_dir = os.path.join(validation_dir, 'down') validation_dogs_dir = os.path.join(validation_dir, 'up') batch_size = 64 epochs = 50 IMG_HEIGHT = 128 IMG_WIDTH = 128 num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val train_image_generator = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(train_dir, transform=transforms.Compose([transforms.Resize((IMG_HEIGHT, IMG_WIDTH)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])), batch_size=batch_size, shuffle=True) validation_image_generator = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(validation_dir, transform=transforms.Compose([transforms.Resize((IMG_HEIGHT, IMG_WIDTH)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])), batch_size=batch_size) model = torch.nn.Sequential( torch.nn.Conv2d(3, 16, kernel_size=3, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(2), torch.nn.Conv2d(16, 32, kernel_size=3, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(2), torch.nn.Conv2d(32, 64, kernel_size=3, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(2), torch.nn.Flatten(), torch.nn.Linear(64*16*16, 256), torch.nn.ReLU(), torch.nn.Linear(256, 2), torch.nn.Softmax() ) criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.001) for epoch in range(epochs): running_loss = 0.0 for i, data in enumerate(train_image_generator, 0): inputs, labels = data optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() epoch_loss = running_loss / (len(train_data_gen) / batch_size) print('Epoch: %d, Loss: %.3f' % (epoch + 1, epoch_loss)) correct = 0 total = 0 with torch.no_grad(): for data in validation_image_generator: images, labels = data outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Validation Accuracy: %.2f%%' % (100 * correct / total))
阅读全文