# 实例化模型 model = vgg("vgg16", 401, 512, 10) model.summary() # using keras low level api for training loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False) optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy') test_loss = tf.keras.metrics.Mean(name='test_loss') test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')
时间: 2024-02-19 22:59:35 浏览: 115
这段代码看起来是在使用 TensorFlow 2.0+ 的 Keras 高级 API 实现 VGG-16 模型的训练。使用的优化器是 Adam,学习率为 0.0001。同时,还定义了训练和测试过程中的损失函数和准确率的度量指标。训练时使用的是分类交叉熵损失函数,准确率指标使用的是分类准确率。测试时同样使用这两个指标。
相关问题
引入VGG16的训练模型
要在现有代码中引入VGG16模型进行训练,可以按照以下步骤进行修改:
1. **导入VGG16模型**:使用Keras中的`applications`模块来加载预训练的VGG16模型。
2. **调整模型结构**:将VGG16模型作为基础模型,并在其顶部添加自定义的分类层。
3. **冻结部分层**:为了防止在训练过程中破坏预训练权重,通常会冻结VGG16的部分或全部层。
4. **编译和训练模型**:与之前类似,编译模型并使用图像增强进行训练。
以下是修改后的代码示例:
```python
import os
import json
import numpy as np
import pandas as pd
from PIL import Image
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import f1_score, accuracy_score
# 定义路径常量
TRAIN_DATA_DIR = "C:\\Users\\24067\\Desktop\\peach_split\\train"
VAL_DATA_DIR = "C:\\Users\\24067\\Desktop\\peach_split\\val"
TEST_DATA_DIR = "C:\\Users\\24067\\Desktop\\peach_split\\test"
TRAIN_LABEL_PATH = "C:\\Users\\24067\\Desktop\\train_label.json"
VAL_LABEL_PATH = "C:\\Users\\24067\\Desktop\\val_label.json"
TEST_LABEL_PATH = "C:\\Users\\24067\\Desktop\\train_label.json"
OUTPUT_PATH = "C:\\Users\\24067\\Desktop\\data\\submission.csv"
# 加载数据集
def load_data(data_dir, label_path):
with open(label_path, 'r') as f:
labels_list = json.load(f)
labels = {item['文件名']: item['标签'] for item in labels_list if '文件名' in item and '标签' in item}
images = []
targets = []
for file_name, label in labels.items():
img_path = os.path.join(data_dir, file_name)
if not os.path.exists(img_path):
continue
img = Image.open(img_path).resize((224, 224)) # VGG16需要224x224的输入
img_array = np.array(img) / 255.0
images.append(img_array)
targets.append(label)
if len(images) == 0:
raise ValueError("No valid images found.")
return np.array(images), np.array(targets)
# 加载训练集和验证集
X_train, y_train = load_data(TRAIN_DATA_DIR, TRAIN_LABEL_PATH)
X_val, y_val = load_data(VAL_DATA_DIR, VAL_LABEL_PATH)
# 标签映射
label_map = {'特级': 3, '一级': 2, '二级': 1, '三级': 0}
y_train = np.array([label_map[label] for label in y_train])
y_val = np.array([label_map[label] for label in y_val])
# 创建模型
def create_model(input_shape=(224, 224, 3)):
base_model = VGG16(weights='imagenet', include_top=False, input_shape=input_shape)
# 冻结VGG16的基础层
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(4, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
return model
# 实例化模型
model = create_model()
# 编译模型
model.compile(optimizer=Adam(learning_rate=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# 图像增强
datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True
)
# 训练模型
history = model.fit(datagen.flow(X_train, y_train, batch_size=32), epochs=1, validation_data=(X_val, y_val))
# 评估模型
def evaluate_model(model, X, y):
predictions = model.predict(X)
predicted_labels = np.argmax(predictions, axis=1)
accuracy = accuracy_score(y, predicted_labels)
f1 = f1_score(y, predicted_labels, average='weighted')
print(f'Accuracy: {accuracy:.4f}')
print(f'F1 Score: {f1:.4f}')
return accuracy, f1
evaluate_model(model, X_val, y_val)
# 预测测试集并保存结果
def predict_and_save(test_data_dir, test_label_path, output_path):
test_images = []
test_file_names = []
with open(test_label_path, 'r') as f:
test_labels_list = json.load(f)
test_labels = {item['文件名']: item['标签'] for item in test_labels_list if '文件名' in item and '标签' in item}
for file_name in test_labels.keys():
img_path = os.path.join(test_data_dir, file_name)
if not os.path.exists(img_path):
continue
img = Image.open(img_path).resize((224, 224))
img_array = np.array(img) / 255.0
test_images.append(img_array)
test_file_names.append(file_name)
test_images = np.array(test_images)
predictions = model.predict(test_images)
predicted_labels = np.argmax(predictions, axis=1)
label_map_inv = {v: k for k, v in label_map.items()}
predicted_labels = [label_map_inv[label] for label in predicted_labels]
submission_df = pd.DataFrame({'文件名': test_file_names, '标签': predicted_labels})
submission_df.to_csv(output_path, index=False)
# 进行预测并保存结果
predict_and_save(TEST_DATA_DIR, TEST_LABEL_PATH, OUTPUT_PATH)
```
### 主要改动点:
1. **图像大小**:VGG16要求输入图像大小为224x224,因此在加载数据时将图像大小调整为224x224。
2. **模型创建**:使用预训练的VGG16模型作为基础模型,并在其顶部添加全局平均池化层、全连接层和Dropout层。
3. **冻结层**:冻结VGG16的基础层以避免破坏预训练权重。
4. **编译和训练**:保持原有的编译和训练流程不变。
这样,你就可以利用VGG16模型进行 peach 分类任务了。
将代码中的训练模型改为vgg
要将代码中的训练模型从ResNet50更改为VGG16,可以按照以下步骤进行修改:
1. **导入VGG16模型**:首先需要导入VGG16模型。
2. **创建VGG16模型**:使用VGG16作为基础模型,并添加自定义的全连接层。
3. **编译模型**:保持编译部分不变。
4. **训练模型**:保持训练部分不变。
以下是修改后的代码:
```python
import os
import json
import numpy as np
import pandas as pd
from PIL import Image
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import f1_score, accuracy_score
# 定义路径常量
TRAIN_DATA_DIR = "C:\\Users\\24067\\Desktop\\peach_split\\train"
VAL_DATA_DIR = "C:\\Users\\24067\\Desktop\\peach_split\\val"
TEST_DATA_DIR = "C:\\Users\\24067\\Desktop\\peach_split\\test"
TRAIN_LABEL_PATH = "C:\\Users\\24067\\Desktop\\train_label.json"
VAL_LABEL_PATH = "C:\\Users\\24067\\Desktop\\val_label.json"
OUTPUT_PATH = "C:\\Users\\24067\\Desktop\\data\\submission.csv"
MODEL_SAVE_PATH = "C:\\Users\\24067\\Desktop\\data\\model.h5"
# 加载数据集
def load_data(data_dir, label_path):
with open(label_path, 'r') as f:
labels_list = json.load(f)
labels = {item['文件名']: item['标签'] for item in labels_list if '文件名' in item and '标签' in item}
images = []
targets = []
for file_name, label in labels.items():
img_path = os.path.join(data_dir, file_name)
if not os.path.exists(img_path):
continue
img = Image.open(img_path).resize((128, 128))
img_array = np.array(img) / 255.0
images.append(img_array)
targets.append(label)
if len(images) == 0:
raise ValueError("No valid images found.")
return np.array(images), np.array(targets)
# 加载训练集和验证集
X_train, y_train = load_data(TRAIN_DATA_DIR, TRAIN_LABEL_PATH)
X_val, y_val = load_data(VAL_DATA_DIR, VAL_LABEL_PATH)
# 标签映射
label_map = {'特级': 3, '一级': 2, '二级': 1, '三级': 0}
y_train = np.array([label_map[label] for label in y_train])
y_val = np.array([label_map[label] for label in y_val])
# 创建模型
def create_vgg_model(input_shape=(128, 128, 3)):
base_model = VGG16(weights=None, include_top=False, input_shape=input_shape)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(4, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
return model
# 实例化模型
model = create_vgg_model()
# 编译模型
model.compile(optimizer=Adam(learning_rate=0.0001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# 图像增强
datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
zoom_range=0.15,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
# 训练模型
history = model.fit(datagen.flow(X_train, y_train, batch_size=32), epochs=50, validation_data=(X_val, y_val))
# 保存模型
model.save(MODEL_SAVE_PATH)
# 评估模型
def evaluate_model(model, X, y):
predictions = model.predict(X)
predicted_labels = np.argmax(predictions, axis=1)
accuracy = accuracy_score(y, predicted_labels)
f1 = f1_score(y, predicted_labels, average='weighted')
print(f'Accuracy: {accuracy:.4f}')
print(f'F1 Score: {f1:.4f}')
return accuracy, f1
evaluate_model(model, X_val, y_val)
# 预测测试集并保存结果
def predict_and_save(test_data_dir, output_path):
test_images = []
test_file_names = []
# 遍历测试集目录中的所有图像文件
for file_name in os.listdir(test_data_dir):
img_path = os.path.join(test_data_dir, file_name)
if not os.path.exists(img_path) or not file_name.lower().endswith(('.png', '.jpg', '.jpeg')):
continue
img = Image.open(img_path).resize((128, 128))
img_array = np.array(img) / 255.0
test_images.append(img_array)
test_file_names.append(file_name)
test_images = np.array(test_images)
predictions = model.predict(test_images)
predicted_labels = np.argmax(predictions, axis=1)
label_map_inv = {v: k for k, v in label_map.items()}
predicted_labels = [label_map_inv[label] for label in predicted_labels]
submission_df = pd.DataFrame({'文件名': test_file_names, '标签': predicted_labels})
submission_df.to_csv(output_path, index=False)
# 进行预测并保存结果
predict_and_save(TEST_DATA_DIR, OUTPUT_PATH)
```
### 主要修改点:
1. **导入VGG16**:`from tensorflow.keras.applications.vgg16 import VGG16`
2. **创建VGG16模型**:在 `create_vgg_model` 函数中使用 `VGG16` 作为基础模型。
3. **其他部分保持不变**:包括数据加载、编译、训练、评估和预测等部分。
这样,代码就成功地将模型从ResNet50更改为VGG16了。
阅读全文