tensorflowAlexNet进行官方CIFAR-10数据集分类并对结果进行可视化
时间: 2023-12-22 08:02:58 浏览: 39
以下是一个简单的示例代码,用于使用TensorFlow的AlexNet模型对CIFAR-10数据集进行分类,并对结果进行可视化。
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# 加载CIFAR-10数据集
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
# 对数据进行归一化
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# 定义类别数量
num_classes = 10
# 将标签进行one-hot编码
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
# 定义AlexNet模型
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(32,32,3)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D(pool_size=(3,3), strides=(2,2)),
tf.keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D(pool_size=(3,3), strides=(2,2)),
tf.keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D(pool_size=(3,3), strides=(2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4096, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(4096, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_classes, activation='softmax')
])
# 编译模型
model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
loss='categorical_crossentropy', metrics=['accuracy'])
# 训练模型
history = model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
# 可视化训练过程中的损失和准确率
plt.plot(history.history['loss'], label='train_loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.plot(history.history['accuracy'], label='train_acc')
plt.plot(history.history['val_accuracy'], label='val_acc')
plt.legend()
plt.show()
# 对测试集进行预测
y_pred = model.predict(x_test)
# 将预测结果转换为类别标签
y_pred = np.argmax(y_pred, axis=1)
y_test = np.argmax(y_test, axis=1)
# 计算准确率
accuracy = np.mean(y_pred == y_test)
print("Accuracy:", accuracy)
# 随机选择一些样本进行可视化
indices = np.random.choice(range(len(x_test)), 10)
images = x_test[indices]
labels = y_test[indices]
predictions = y_pred[indices]
# 可视化结果
fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(10,5))
axes = axes.ravel()
for i in np.arange(0, 10):
axes[i].imshow(images[i])
axes[i].set_title("True: %s \nPredict: %s" % (labels[i], predictions[i]))
axes[i].axis('off')
plt.subplots_adjust(wspace=1)