不使用sklearn库通过决策树算法进行心脏病预测代码
时间: 2023-11-24 16:06:01 浏览: 72
不使用sklearn推演决策树
好的,以下是使用决策树算法进行心脏病预测的 Python 代码示例:
```python
import numpy as np
import pandas as pd
class Node:
def __init__(self, *, predicted_class):
self.predicted_class = predicted_class
self.feature_index = 0
self.threshold = 0
self.left = None
self.right = None
class DecisionTreeClassifier:
def __init__(self, max_depth=None):
self.max_depth = max_depth
def fit(self, X, y):
self.n_classes_ = len(set(y))
self.n_features_ = X.shape[1]
self.tree_ = self._grow_tree(X, y)
def predict(self, X):
return [self._predict(inputs) for inputs in X]
def _best_split(self, X, y):
m = y.size
if m <= 1:
return None, None
num_parent = [np.sum(y == c) for c in range(self.n_classes_)]
best_gini = 1.0 - sum((n / m) ** 2 for n in num_parent)
best_idx, best_thr = None, None
for idx in range(self.n_features_):
thresholds, classes = zip(*sorted(zip(X[:, idx], y)))
num_left = [0] * self.n_classes_
num_right = num_parent.copy()
for i in range(1, m):
c = classes[i - 1]
num_left[c] += 1
num_right[c] -= 1
gini_left = 1.0 - sum((num_left[x] / i) ** 2 for x in range(self.n_classes_))
gini_right = 1.0 - sum((num_right[x] / (m - i)) ** 2 for x in range(self.n_classes_))
gini = (i * gini_left + (m - i) * gini_right) / m
if thresholds[i] == thresholds[i - 1]:
continue
if gini < best_gini:
best_gini = gini
best_idx = idx
best_thr = (thresholds[i] + thresholds[i - 1]) / 2
return best_idx, best_thr
def _grow_tree(self, X, y, depth=0):
num_samples_per_class = [np.sum(y == i) for i in range(self.n_classes_)]
predicted_class = np.argmax(num_samples_per_class)
node = Node(predicted_class=predicted_class)
if depth < self.max_depth:
idx, thr = self._best_split(X, y)
if idx is not None:
indices_left = X[:, idx] < thr
X_left, y_left = X[indices_left], y[indices_left]
X_right, y_right = X[~indices_left], y[~indices_left]
node.feature_index = idx
node.threshold = thr
node.left = self._grow_tree(X_left, y_left, depth + 1)
node.right = self._grow_tree(X_right, y_right, depth + 1)
return node
def _predict(self, inputs):
node = self.tree_
while node.left:
if inputs[node.feature_index] < node.threshold:
node = node.left
else:
node = node.right
return node.predicted_class
# 读取数据集
df = pd.read_csv('heart.csv')
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
# 划分训练集和测试集
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 训练模型
clf = DecisionTreeClassifier(max_depth=3)
clf.fit(X_train, y_train)
# 预测并评估模型
from sklearn.metrics import accuracy_score
y_pred = clf.predict(X_test)
print('Accuracy:', accuracy_score(y_test, y_pred))
```
这个代码示例使用了一个名为`DecisionTreeClassifier`的类来实现决策树算法。该类具有`fit`方法用于训练模型,`predict`方法用于预测新数据的类别,以及`_grow_tree`和`_predict`方法用于递归构建决策树和预测过程。在`_best_split`方法中,我们计算了最佳分裂特征和阈值。在`fit`方法中,我们使用训练数据和标签来拟合模型。最后,我们使用测试数据集来评估模型的性能并计算准确度。
阅读全文