#倒入相关库文件 import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score from sklearn.metrics import precision_score from sklearn.metrics import f1_score from sklearn.model_selection import train_test_split #首先我们先观察一下数据的总体描述 data = pd.read_csv('data.csv') data.describe(include='all') #观察数据的任意五行 data.sample(5) sns.countplot(data["target"]) plt.show() #target一共9个类别。由于是字符型,定义一个函数将target的类别标签转为index表示,方便后面计算交叉熵 def target2idx(targets): target_idx = [] target_labels = ['Class_1', 'Class_2', 'Class_3', 'Class_4', 'Class_5', 'Class_6', 'Class_7', 'Class_8', 'Class_9','Class_10'] for target in targets: target_idx.append(target_labels.index(target)) return target_idx #向量转化函数(提供参考,自行选择是否使用) def convert_to_vectors(c): m = len(c) k = np.max(c) + 1 y = np.zeros(m * k).reshape(m,k) for i in range(m): y[i][c[i]] = 1 return y #特征处理函数(提供参考,自行选择是否使用) def process_features(X): scaler = MinMaxScaler(feature_range=(0,1)) X = scaler.fit_transform(1.0*X) m, n = X.shape X = np.c_[np.ones((m, 1)), X] return X #数据获取样例,可自行处理 X = np.array(data)[:,1:-1].astype(float) c = target2idx(data['target']) y = convert_to_vectors(c) #划分训练集和测试集比例在0.1-0.9之间 X_train, X_test, y_train, y_test, c_train, c_test = train_test_split(X, y, c, random_state = 0, test_size = 0.2) #模型训练及预测 #计算指标,本指标使用加权的方式计算多分类问题,accuracy和recall相等,可将其原因写入报告 accuracy = accuracy_score(c_test, c_pred) precision = precision_score(c_test, c_pred,average = 'weighted') recall = recall_score(c_test, c_pred,average = 'weighted') f1 = f1_score(c_test, c_pred,average = 'weighted') print("accuracy = {}".format(accuracy)) print("precision = {}".format(precision)) print("recall = {}".format(recall)) print("f1 = {}".format(f1))补全代码
时间: 2023-12-09 20:05:18 浏览: 105
以下是代码补全:
```
#导入相关库文件
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
#首先我们先观察一下数据的总体描述
data = pd.read_csv('data.csv')
data.describe(include='all')
#观察数据的任意五行
data.sample(5)
sns.countplot(data["target"])
plt.show()
#target一共9个类别。由于是字符型,定义一个函数将target的类别标签转为index表示,方便后面计算交叉熵
def target2idx(targets):
target_idx = []
target_labels = ['Class_1', 'Class_2', 'Class_3', 'Class_4', 'Class_5', 'Class_6', 'Class_7', 'Class_8', 'Class_9','Class_10']
for target in targets:
target_idx.append(target_labels.index(target))
return target_idx
#向量转化函数(提供参考,自行选择是否使用)
def convert_to_vectors(c):
m = len(c)
k = np.max(c) + 1
y = np.zeros(m * k).reshape(m,k)
for i in range(m):
y[i][c[i]] = 1
return y
#特征处理函数(提供参考,自行选择是否使用)
def process_features(X):
scaler = MinMaxScaler(feature_range=(0,1))
X = scaler.fit_transform(1.0*X)
m, n = X.shape
X = np.c_[np.ones((m, 1)), X]
return X
#数据获取样例,可自行处理
X = np.array(data)[:,1:-1].astype(float)
c = target2idx(data['target'])
y = convert_to_vectors(c)
#划分训练集和测试集比例在0.1-0.9之间
X_train, X_test, y_train, y_test, c_train, c_test = train_test_split(X, y, c, random_state = 0, test_size = 0.2)
#模型训练及预测
#计算指标,本指标使用加权的方式计算多分类问题,accuracy和recall相等,可将其原因写入报告
accuracy = accuracy_score(c_test, c_pred)
precision = precision_score(c_test, c_pred,average = 'weighted')
recall = recall_score(c_test, c_pred,average = 'weighted')
f1 = f1_score(c_test, c_pred,average = 'weighted')
print("accuracy = {}".format(accuracy))
print("precision = {}".format(precision))
print("recall = {}".format(recall))
print("f1 = {}".format(f1))
```
阅读全文