import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' import os from tensorflow import keras import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.keras import Model from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense np.set_printoptions(threshold=np.inf) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense from tensorflow.keras.utils import plot_model # 创建模型 model = Sequential() model.add(Conv2D(6, (3, 3), activation='relu', input_shape=(3, 1024, 1))) model.add(Conv2D(16, (3, 3), activation='relu')) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.1)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(225, activation='sigmoid')) # 输出模型结构图表 plot_model(model, show_shapes=True) ValueError: Negative dimension size caused by subtracting 3 from 1 for '{{node conv2d_5/Conv2D}} = Conv2D[T=DT_FLOAT, data_format="NHWC", dilations=[1, 1, 1, 1], explicit_paddings=[], padding="VALID", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true](Placeholder, conv2d_5/Conv2D/ReadVariableOp)' with input shapes: [?,1,1022,6], [3,3,6,16].
时间: 2023-08-19 20:04:52 浏览: 183
这个错误是因为你输入的图像尺寸为 (3, 1024, 1),而第一层卷积的 filter 大小为 (3, 3),stride 为 (1, 1),没有 padding,所以输出的图像尺寸会减小。经过第一层卷积后,输出的图像尺寸为 (1, 1022, 6),再经过第二层卷积,输出的图像尺寸将会变成负数,这就导致了出现了这个错误。
建议你在输入图像时添加 padding,或者使用更小的 filter 大小和 stride。如果你想要保持输入图像的尺寸不变,可以使用 padding 或者 stride。例如:
```
model.add(Conv2D(6, (3, 3), activation='relu', padding='same', input_shape=(3, 1024, 1)))
```
这里使用了 padding='same',使得卷积后的输出图像尺寸和输入图像尺寸相同。
相关问题
import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' import os from tensorflow import keras import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.keras import Model from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense np.set_printoptions(threshold=np.inf) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense from tensorflow.keras.utils import plot_model # 创建模型 model = Sequential() model.add(Conv2D(6, (3, 3), activation='relu', padding='same', input_shape=(3, 1024, 1))) model.add(Conv2D(16, (3, 3), activation='relu'), padding='same') model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.1)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(225, activation='sigmoid')) # 输出模型结构图表 plot_model(model, show_shapes=True) TypeError: add() got an unexpected keyword argument 'padding'
在第6行和第7行中,您的代码中有一些语法错误。请注意在第6行中,由于缺少右括号,导致第7行的参数被视为model.add()函数的参数,而不是Conv2D()函数的参数。此外,在第7行中,您意外地在Conv2D()函数的括号之外使用了一个逗号。因此,您可以将代码更改如下所示:
```python
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import os
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
np.set_printoptions(threshold=np.inf)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.utils import plot_model
# 创建模型
model = Sequential()
model.add(Conv2D(6, (3, 3), activation='relu', padding='same', input_shape=(3, 1024, 1)))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(225, activation='sigmoid'))
# 输出模型结构图表
plot_model(model, show_shapes=True)
```
请注意,在第6行和第7行中,我们已经修复了语法错误,并将输入参数放在正确的位置。现在,代码应该可以正常运行,输出模型结构图表。
帮我把下面这个代码从TensorFlow改成pytorch import tensorflow as tf import os import numpy as np import matplotlib.pyplot as plt os.environ["CUDA_VISIBLE_DEVICES"] = "0" base_dir = 'E:/direction/datasetsall/' train_dir = os.path.join(base_dir, 'train_img/') validation_dir = os.path.join(base_dir, 'val_img/') train_cats_dir = os.path.join(train_dir, 'down') train_dogs_dir = os.path.join(train_dir, 'up') validation_cats_dir = os.path.join(validation_dir, 'down') validation_dogs_dir = os.path.join(validation_dir, 'up') batch_size = 64 epochs = 50 IMG_HEIGHT = 128 IMG_WIDTH = 128 num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val train_image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255) validation_image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255) train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='categorical') val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size, directory=validation_dir, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='categorical') sample_training_images, _ = next(train_data_gen) model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(2, activation='softmax') ]) model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) model.summary() history = model.fit_generator( train_data_gen, steps_per_epoch=total_train // batch_size, epochs=epochs, validation_data=val_data_gen, validation_steps=total_val // batch_size ) # 可视化训练结果 acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) model.save("./model/timo_classification_128_maxPool2D_dense256.h5")
import torch import os import numpy as np import matplotlib.pyplot as plt os.environ["CUDA_VISIBLE_DEVICES"] = "0" base_dir = 'E:/direction/datasetsall/' train_dir = os.path.join(base_dir, 'train_img/') validation_dir = os.path.join(base_dir, 'val_img/') train_cats_dir = os.path.join(train_dir, 'down') train_dogs_dir = os.path.join(train_dir, 'up') validation_cats_dir = os.path.join(validation_dir, 'down') validation_dogs_dir = os.path.join(validation_dir, 'up') batch_size = 64 epochs = 50 IMG_HEIGHT = 128 IMG_WIDTH = 128 num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val train_image_generator = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(train_dir, transform=transforms.Compose([transforms.Resize((IMG_HEIGHT, IMG_WIDTH)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])), batch_size=batch_size, shuffle=True) validation_image_generator = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(validation_dir, transform=transforms.Compose([transforms.Resize((IMG_HEIGHT, IMG_WIDTH)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])), batch_size=batch_size) model = torch.nn.Sequential( torch.nn.Conv2d(3, 16, kernel_size=3, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(2), torch.nn.Conv2d(16, 32, kernel_size=3, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(2), torch.nn.Conv2d(32, 64, kernel_size=3, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(2), torch.nn.Flatten(), torch.nn.Linear(64*16*16, 256), torch.nn.ReLU(), torch.nn.Linear(256, 2), torch.nn.Softmax() ) criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.001) for epoch in range(epochs): running_loss = 0.0 for i, data in enumerate(train_image_generator, 0): inputs, labels = data optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() epoch_loss = running_loss / (len(train_data_gen) / batch_size) print('Epoch: %d, Loss: %.3f' % (epoch + 1, epoch_loss)) correct = 0 total = 0 with torch.no_grad(): for data in validation_image_generator: images, labels = data outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Validation Accuracy: %.2f%%' % (100 * correct / total))
阅读全文