classifier = KNeighborsClassifier(n_neighbors=20) train_model(classifier, x_train, y_train, x_test) show_roc_line(classifier, x_train, y_train) =========================KNeighbors========================= >>>在训练集上的表现: 0.717852684144819 >>>在测试集上的表现: 0.7 >>>预测的 Roc_auc:0.6611 >>>混淆矩阵
时间: 2023-08-09 20:04:22 浏览: 64
这段代码中,使用了k近邻分类器进行模型训练和测试,并对模型在训练集和测试集上的表现进行了评估,同时也计算了使用ROC曲线下面积作为评价指标的预测结果。此外,还展示了混淆矩阵来分析模型的分类效果,但你并没有给出混淆矩阵的具体内容。混淆矩阵是一个2x2的矩阵,其中第一行表示真实类别为0的样本数,第二行表示真实类别为1的样本数,第一列表示模型预测类别为0的样本数,第二列表示模型预测类别为1的样本数。因此,混淆矩阵的具体内容需要看你的具体输出。从输出结果可以看出,在使用k近邻分类器进行模型训练和测试时,训练集和测试集的表现比较接近,但ROC曲线下面积的预测结果不太理想。
相关问题
import seaborn as sns corrmat = df.corr() top_corr_features = corrmat.index plt.figure(figsize=(16,16)) #plot heat map g=sns.heatmap(df[top_corr_features].corr(),annot=True,cmap="RdYlGn") plt.show() sns.set_style('whitegrid') sns.countplot(x='target',data=df,palette='RdBu_r') plt.show() dataset = pd.get_dummies(df, columns = ['sex', 'cp', 'fbs','restecg', 'exang', 'slope', 'ca', 'thal']) from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler standardScaler = StandardScaler() columns_to_scale = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak'] dataset[columns_to_scale] = standardScaler.fit_transform(dataset[columns_to_scale]) dataset.head() y = dataset['target'] X = dataset.drop(['target'], axis=1) from sklearn.model_selection import cross_val_score knn_scores = [] for k in range(1, 21): knn_classifier = KNeighborsClassifier(n_neighbors=k) score = cross_val_score(knn_classifier, X, y, cv=10) knn_scores.append(score.mean()) plt.plot([k for k in range(1, 21)], knn_scores, color='red') for i in range(1, 21): plt.text(i, knn_scores[i - 1], (i, knn_scores[i - 1])) plt.xticks([i for i in range(1, 21)]) plt.xlabel('Number of Neighbors (K)') plt.ylabel('Scores') plt.title('K Neighbors Classifier scores for different K values') plt.show() knn_classifier = KNeighborsClassifier(n_neighbors = 12) score=cross_val_score(knn_classifier,X,y,cv=10) score.mean() from sklearn.ensemble import RandomForestClassifier randomforest_classifier= RandomForestClassifier(n_estimators=10) score=cross_val_score(randomforest_classifier,X,y,cv=10) score.mean()的roc曲线的代码
以下是绘制ROC曲线的代码:
```
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
cv = StratifiedKFold(n_splits=10)
# KNN Classifier ROC Curve
knn_classifier = KNeighborsClassifier(n_neighbors=12)
knn_tprs = []
knn_aucs = []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots()
for i, (train, test) in enumerate(cv.split(X, y)):
knn_classifier.fit(X.iloc[train], y.iloc[train])
knn_proba = knn_classifier.predict_proba(X.iloc[test])[:, 1]
knn_fpr, knn_tpr, knn_thresholds = roc_curve(y.iloc[test], knn_proba)
knn_tprs.append(np.interp(mean_fpr, knn_fpr, knn_tpr))
knn_tprs[-1][0] = 0.0
knn_roc_auc = auc(knn_fpr, knn_tpr)
knn_aucs.append(knn_roc_auc)
ax.plot(knn_fpr, knn_tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i+1, knn_roc_auc))
# Random Forest Classifier ROC Curve
randomforest_classifier = RandomForestClassifier(n_estimators=10)
rf_tprs = []
rf_aucs = []
for i, (train, test) in enumerate(cv.split(X, y)):
randomforest_classifier.fit(X.iloc[train], y.iloc[train])
rf_proba = randomforest_classifier.predict_proba(X.iloc[test])[:, 1]
rf_fpr, rf_tpr, rf_thresholds = roc_curve(y.iloc[test], rf_proba)
rf_tprs.append(np.interp(mean_fpr, rf_fpr, rf_tpr))
rf_tprs[-1][0] = 0.0
rf_roc_auc = auc(rf_fpr, rf_tpr)
rf_aucs.append(rf_roc_auc)
ax.plot(rf_fpr, rf_tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i+1, rf_roc_auc))
# Plot the mean ROC curves
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
knn_mean_tpr = np.mean(knn_tprs, axis=0)
knn_mean_tpr[-1] = 1.0
knn_mean_auc = auc(mean_fpr, knn_mean_tpr)
std_auc = np.std(knn_aucs)
ax.plot(mean_fpr, knn_mean_tpr, color='b',
label=r'KNN Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (knn_mean_auc, std_auc),
lw=2, alpha=.8)
rf_mean_tpr = np.mean(rf_tprs, axis=0)
rf_mean_tpr[-1] = 1.0
rf_mean_auc = auc(mean_fpr, rf_mean_tpr)
std_auc = np.std(rf_aucs)
ax.plot(mean_fpr, rf_mean_tpr, color='g',
label=r'RF Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (rf_mean_auc, std_auc),
lw=2, alpha=.8)
# Set the plot parameters
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title="Receiver operating characteristic example")
ax.legend(loc="lower right")
plt.show()
```
这段代码将绘制KNN分类器和随机森林分类器的ROC曲线,以及它们的平均曲线和AUC值。您需要将其与您的数据集和分类器参数一起使用。
你的代码出现错误Expected 'estimator' to be a binary classifier, but got GridSearchCV fit on multiclass (3 classes) data
这个错误是由于在二分类的情况下,模型输出的标签只有0和1两种可能,而在多分类的情况下,模型输出的标签有多个可能,因此需要使用不同的评估指标和绘图方法。
首先,你可以将KNeighborsClassifier中的参数weights设置为'uniform',这样可以保证每个邻居的权重相同。然后,你可以使用sklearn.metrics中的multiclass模块中的方法来计算多分类问题的性能指标。具体来说,你可以使用multiclass.confusion_matrix函数计算混淆矩阵,使用multiclass.classification_report函数计算分类报告。
以下是修改后的代码示例:
```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, classification_report, plot_precision_recall_curve, plot_roc_curve
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import label_binarize
# 加载数据集
iris = load_iris()
X = iris.data
y = iris.target
# 将标签进行二值化处理
y = label_binarize(y, classes=[0, 1, 2])
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 定义KNN模型
knn = KNeighborsClassifier(weights='uniform')
# 定义需要搜索的参数空间
param_grid = {'n_neighbors': np.arange(1, 21, 2)}
# 网格搜索优化KNN模型
grid_search = GridSearchCV(knn, param_grid=param_grid, cv=5)
grid_search.fit(X_train, y_train)
# 随机搜索优化KNN模型
random_search = RandomizedSearchCV(knn, param_distributions=param_grid, cv=5, n_iter=10)
random_search.fit(X_train, y_train)
# 输出最优模型和最优k值
print("Grid Search: Best Model -", grid_search.best_estimator_)
print("Grid Search: Best k -", grid_search.best_params_['n_neighbors'])
print("Random Search: Best Model -", random_search.best_estimator_)
print("Random Search: Best k -", random_search.best_params_['n_neighbors'])
# 在测试集上评估模型性能
y_pred_grid = grid_search.predict(X_test)
y_pred_random = random_search.predict(X_test)
# 计算混淆矩阵,输出分类报告
cm_grid = confusion_matrix(y_test.argmax(axis=1), y_pred_grid.argmax(axis=1))
cm_random = confusion_matrix(y_test.argmax(axis=1), y_pred_random.argmax(axis=1))
print("Grid Search: Confusion Matrix\n", cm_grid)
print("Random Search: Confusion Matrix\n", cm_random)
print("Grid Search: Classification Report\n", classification_report(y_test.argmax(axis=1), y_pred_grid.argmax(axis=1)))
print("Random Search: Classification Report\n", classification_report(y_test.argmax(axis=1), y_pred_random.argmax(axis=1)))
# 绘制PR曲线和ROC曲线
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
plot_precision_recall_curve(grid_search, X_test, y_test, ax=ax1, name="Grid Search")
plot_precision_recall_curve(random_search, X_test, y_test, ax=ax1, name="Random Search")
plot_roc_curve(grid_search, X_test, y_test, ax=ax2, name="Grid Search")
plot_roc_curve(random_search, X_test, y_test, ax=ax2, name="Random Search")
plt.show()
```
这份代码中,首先使用label_binarize将标签二值化,然后将KNeighborsClassifier中的参数weights设置为'uniform'。接着,计算混淆矩阵和分类报告时使用multiclass中的函数,并将预测标签和真实标签分别使用argmax函数转换为类别标签。最后,在绘制PR曲线和ROC曲线时,使用OneVsRestClassifier将KNN模型转换为多分类模型。
阅读全文