改进代码,测试集:用于最终的模型性能评估,路径为 "C:\Users\24067\Desktop\peach_split\test"。没有标签集
时间: 2024-10-25 12:17:42 浏览: 15
peach-clone:django项目,用于测试peachtree系统克隆
为了改进代码以适应没有标签的测试集,并进行最终的模型性能评估,可以按照以下步骤进行修改:
1. **移除测试集的标签加载部分**:因为测试集没有标签,所以不需要加载标签。
2. **直接对测试集进行预测**:将预测结果保存到指定的输出路径。
3. **调整评估函数**:如果需要在验证集上进行评估,可以在训练后调用评估函数。
以下是改进后的代码:
```python
import os
import json
import numpy as np
import pandas as pd
from PIL import Image
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import f1_score, accuracy_score
# 定义数据路径
TRAIN_DATA_DIR = "C:\\Users\\24067\\Desktop\\peach_split\\train"
VAL_DATA_DIR = "C:\\Users\\24067\\Desktop\\peach_split\\val"
TEST_DATA_DIR = "C:\\Users\\24067\\Desktop\\peach_split\\test"
TRAIN_LABEL_PATH = "C:\\Users\\24067\\Desktop\\train_label.json"
VAL_LABEL_PATH = "C:\\Users\\24067\\Desktop\\val_label.json"
OUTPUT_PATH = "C:\\Users\\24067\\Desktop\\data\\submission.csv"
# 加载数据集
def load_data(data_dir, label_path):
with open(label_path, 'r') as f:
labels_list = json.load(f)
labels = {item['文件名']: item['标签'] for item in labels_list if '文件名' in item and '标签' in item}
images = []
targets = []
for file_name, label in labels.items():
img_path = os.path.join(data_dir, file_name)
if not os.path.exists(img_path):
continue
img = Image.open(img_path).resize((128, 128))
img_array = np.array(img) / 255.0
images.append(img_array)
targets.append(label)
if len(images) == 0:
raise ValueError("No valid images found.")
return np.array(images), np.array(targets)
# 加载训练数据
train_images, train_labels = load_data(TRAIN_DATA_DIR, TRAIN_LABEL_PATH)
# 加载验证数据
val_images, val_labels = load_data(VAL_DATA_DIR, VAL_LABEL_PATH)
# 标签映射
label_map = {'特级': 3, '一级': 2, '二级': 1, '三级': 0}
train_labels = np.array([label_map[label] for label in train_labels])
val_labels = np.array([label_map[label] for label in val_labels])
# 创建模型
def create_model(input_shape=(128, 128, 3)):
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Conv2D(128, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(4, activation='softmax')
])
return model
# 实例化模型
model = create_model()
# 编译模型
model.compile(optimizer=Adam(learning_rate=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# 图像增强
datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True
)
# 训练模型
history = model.fit(datagen.flow(train_images, train_labels, batch_size=32), epochs=1, validation_data=(val_images, val_labels))
# 评估模型
def evaluate_model(model, X, y):
predictions = model.predict(X)
predicted_labels = np.argmax(predictions, axis=1)
accuracy = accuracy_score(y, predicted_labels)
f1 = f1_score(y, predicted_labels, average='weighted')
print(f'Accuracy: {accuracy:.4f}')
print(f'F1 Score: {f1:.4f}')
return accuracy, f1
evaluate_model(model, val_images, val_labels)
# 预测测试集并保存结果
def predict_and_save(test_data_dir, output_path):
test_images = []
test_file_names = []
for file_name in os.listdir(test_data_dir):
img_path = os.path.join(test_data_dir, file_name)
if not os.path.exists(img_path):
continue
img = Image.open(img_path).resize((128, 128))
img_array = np.array(img) / 255.0
test_images.append(img_array)
test_file_names.append(file_name)
test_images = np.array(test_images)
predictions = model.predict(test_images)
predicted_labels = np.argmax(predictions, axis=1)
label_map_inv = {v: k for k, v in label_map.items()}
predicted_labels = [label_map_inv[label] for label in predicted_labels]
submission_df = pd.DataFrame({'文件名': test_file_names, '标签': predicted_labels})
submission_df.to_csv(output_path, index=False)
# 进行预测并保存结果
predict_and_save(TEST_DATA_DIR, OUTPUT_PATH)
```
### 主要改动点:
1. **移除了测试集标签加载的部分**:不再需要 `TEST_LABEL_PATH` 和相关加载逻辑。
2. **直接对测试集进行预测**:在 `predict_and_save` 函数中,遍历测试目录中的所有图像文件,进行预处理和预测,然后将结果保存到 CSV 文件中。
3. **保留了验证集的评估**:在训练完成后,仍然使用验证集进行模型性能评估。
阅读全文