from sklearn.neighbors import KNeighborsClassifier k_score = [] k_range = range(1,228) for k in range(1,228): knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train,y_train) k_score.append([k,knn.score(X_test,y_test)]) print(k, k_score) k_score = np.array(k_score) k_score = pd.DataFrame(k_score,columns=['Value of K for KNN','Score']) ax = sns.lineplot(data=k_score,x="Value of K for KNN", y="Score",)。改写这段代码
时间: 2024-02-06 08:10:52 浏览: 162
以下是改写后的代码,主要对输出进行了优化,使用了f-string格式化字符串,并将k_score转换为pandas DataFrame后使用了seaborn绘制折线图:
```
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
k_score = []
for k in range(1, 228):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
score = knn.score(X_test, y_test)
k_score.append([k, score])
print(f"K={k}, Score={score:.4f}")
k_score = pd.DataFrame(k_score, columns=['Value of K for KNN', 'Score'])
sns.lineplot(data=k_score, x="Value of K for KNN", y="Score")
```
相关问题
import seaborn as sns corrmat = df.corr() top_corr_features = corrmat.index plt.figure(figsize=(16,16)) #plot heat map g=sns.heatmap(df[top_corr_features].corr(),annot=True,cmap="RdYlGn") plt.show() sns.set_style('whitegrid') sns.countplot(x='target',data=df,palette='RdBu_r') plt.show() dataset = pd.get_dummies(df, columns = ['sex', 'cp', 'fbs','restecg', 'exang', 'slope', 'ca', 'thal']) from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler standardScaler = StandardScaler() columns_to_scale = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak'] dataset[columns_to_scale] = standardScaler.fit_transform(dataset[columns_to_scale]) dataset.head() y = dataset['target'] X = dataset.drop(['target'], axis=1) from sklearn.model_selection import cross_val_score knn_scores = [] for k in range(1, 21): knn_classifier = KNeighborsClassifier(n_neighbors=k) score = cross_val_score(knn_classifier, X, y, cv=10) knn_scores.append(score.mean()) plt.plot([k for k in range(1, 21)], knn_scores, color='red') for i in range(1, 21): plt.text(i, knn_scores[i - 1], (i, knn_scores[i - 1])) plt.xticks([i for i in range(1, 21)]) plt.xlabel('Number of Neighbors (K)') plt.ylabel('Scores') plt.title('K Neighbors Classifier scores for different K values') plt.show() knn_classifier = KNeighborsClassifier(n_neighbors = 12) score=cross_val_score(knn_classifier,X,y,cv=10) score.mean() from sklearn.ensemble import RandomForestClassifier randomforest_classifier= RandomForestClassifier(n_estimators=10) score=cross_val_score(randomforest_classifier,X,y,cv=10) score.mean()的roc曲线的代码
以下是绘制ROC曲线的代码:
```
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
cv = StratifiedKFold(n_splits=10)
# KNN Classifier ROC Curve
knn_classifier = KNeighborsClassifier(n_neighbors=12)
knn_tprs = []
knn_aucs = []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots()
for i, (train, test) in enumerate(cv.split(X, y)):
knn_classifier.fit(X.iloc[train], y.iloc[train])
knn_proba = knn_classifier.predict_proba(X.iloc[test])[:, 1]
knn_fpr, knn_tpr, knn_thresholds = roc_curve(y.iloc[test], knn_proba)
knn_tprs.append(np.interp(mean_fpr, knn_fpr, knn_tpr))
knn_tprs[-1][0] = 0.0
knn_roc_auc = auc(knn_fpr, knn_tpr)
knn_aucs.append(knn_roc_auc)
ax.plot(knn_fpr, knn_tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i+1, knn_roc_auc))
# Random Forest Classifier ROC Curve
randomforest_classifier = RandomForestClassifier(n_estimators=10)
rf_tprs = []
rf_aucs = []
for i, (train, test) in enumerate(cv.split(X, y)):
randomforest_classifier.fit(X.iloc[train], y.iloc[train])
rf_proba = randomforest_classifier.predict_proba(X.iloc[test])[:, 1]
rf_fpr, rf_tpr, rf_thresholds = roc_curve(y.iloc[test], rf_proba)
rf_tprs.append(np.interp(mean_fpr, rf_fpr, rf_tpr))
rf_tprs[-1][0] = 0.0
rf_roc_auc = auc(rf_fpr, rf_tpr)
rf_aucs.append(rf_roc_auc)
ax.plot(rf_fpr, rf_tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i+1, rf_roc_auc))
# Plot the mean ROC curves
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
knn_mean_tpr = np.mean(knn_tprs, axis=0)
knn_mean_tpr[-1] = 1.0
knn_mean_auc = auc(mean_fpr, knn_mean_tpr)
std_auc = np.std(knn_aucs)
ax.plot(mean_fpr, knn_mean_tpr, color='b',
label=r'KNN Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (knn_mean_auc, std_auc),
lw=2, alpha=.8)
rf_mean_tpr = np.mean(rf_tprs, axis=0)
rf_mean_tpr[-1] = 1.0
rf_mean_auc = auc(mean_fpr, rf_mean_tpr)
std_auc = np.std(rf_aucs)
ax.plot(mean_fpr, rf_mean_tpr, color='g',
label=r'RF Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (rf_mean_auc, std_auc),
lw=2, alpha=.8)
# Set the plot parameters
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title="Receiver operating characteristic example")
ax.legend(loc="lower right")
plt.show()
```
这段代码将绘制KNN分类器和随机森林分类器的ROC曲线,以及它们的平均曲线和AUC值。您需要将其与您的数据集和分类器参数一起使用。
from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_digits from sklearn import svm, metrics from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix import numpy as np #使绘图支持中文字符 from matplotlib import rcParams rcParams['font.family'] = 'SimHei' plt.rcParams['axes.unicode_minus'] = False # In[2]: digits = load_digits() data = digits.data print(data[0]) print(digits.images[0]) print(digits.target[0]) plt.imshow(digits.images[0]) plt.show() train_x, test_x, train_y, test_y = train_test_split(data, digits.target, test_size=0.3, random_state=93) print(train_x) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(train_x, train_y) print("KNN训练集得分: %.4lf" % knn.score(train_x, train_y)) print("KNN测试集得分: %.4lf" % knn.score(test_x, test_y)) print(knn.predict(data)) plot_confusion_matrix(knn, test_x, test_y) plt.show() # In[5]: ################################################################# fig = plt.figure(figsize=(6,13)) for i in range(40): y_pred = knn.predict([data[i]]) plt.subplot(8,5,i+1) plt.imshow(digits.images[i], interpolation='none') plt.title("%d---->%d"%(digits.target[i],y_pred)) plt.show() 帮我修改一下代码,让测试集结果更准确
为了提高测试集结果的准确性,你可以尝试以下几个方法:
1. 标准化数据:使用`StandardScaler`对训练集和测试集的数据进行标准化处理。标准化可以使特征的均值为0,方差为1,有助于提高模型的性能。
```python
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train_x_scaled = scaler.fit_transform(train_x)
test_x_scaled = scaler.transform(test_x)
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(train_x_scaled, train_y)
print("KNN训练集得分: %.4lf" % knn.score(train_x_scaled, train_y))
print("KNN测试集得分: %.4lf" % knn.score(test_x_scaled, test_y))
```
2. 调整超参数:尝试不同的`n_neighbors`值,找到最佳的超参数。可以使用交叉验证进行网格搜索来寻找最佳的超参数。
```python
from sklearn.model_selection import GridSearchCV
param_grid = {
'n_neighbors': [3, 5, 7, 9, 11]
}
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid, cv=5)
grid_search.fit(train_x_scaled, train_y)
best_knn = grid_search.best_estimator_
print("最佳KNN模型参数: ", grid_search.best_params_)
print("最佳KNN训练集得分: %.4lf" % best_knn.score(train_x_scaled, train_y))
print("最佳KNN测试集得分: %.4lf" % best_knn.score(test_x_scaled, test_y))
```
3. 增加训练样本数量:如果训练样本数量较少,可以考虑增加训练样本的数量,以提高模型的泛化能力。
```python
additional_train_x, additional_train_y = load_additional_data() # 加载额外的训练数据
train_x_combined = np.concatenate((train_x, additional_train_x))
train_y_combined = np.concatenate((train_y, additional_train_y))
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(train_x_combined, train_y_combined)
print("KNN训练集得分: %.4lf" % knn.score(train_x_combined, train_y_combined))
print("KNN测试集得分: %.4lf" % knn.score(test_x_scaled, test_y))
```
尝试以上方法后,可以比较不同模型的性能,并选择表现最好的模型作为最终的选择。
阅读全文