给出python实现ID3决策树算法的代码,不能调用sklearn库,能够输出决策树的可视化图形
时间: 2024-05-01 16:17:25 浏览: 10
以下是Python实现ID3决策树算法的代码,可以输出决策树的可视化图形,不调用sklearn库:
```
import numpy as np
from collections import Counter
from math import log2
import matplotlib.pyplot as plt
class Node:
'''
决策树节点类
'''
def __init__(self, feature=None, label=None, is_leaf=False, branches={}):
'''
初始化
'''
self.feature = feature # 该节点对应的特征
self.label = label # 该节点对应的标签
self.is_leaf = is_leaf # 是否是叶子节点
self.branches = branches # 子节点
def __str__(self):
'''
以字符串形式返回节点信息
'''
if self.is_leaf:
return f'[leaf,label:{self.label}]'
else:
return f'[internal,feature:{self.feature}]'
class DecisionTree:
'''
决策树类
'''
def __init__(self, train_x, train_y, test_x=None, test_y=None, feature_names=None):
'''
初始化
'''
self.train_x = train_x # 训练数据特征集
self.train_y = train_y # 训练数据标签集
self.test_x = test_x # 测试数据特征集
self.test_y = test_y # 测试数据标签集
self.feature_names = feature_names # 特征名称列表
self.root = None # 决策树根节点
def fit(self):
'''
构建决策树
'''
self.root = self.build_tree(self.train_x, self.train_y, feature_names=self.feature_names)
def predict(self, x):
'''
预测分类
'''
return self.classify(x, self.root)
def build_tree(self, x, y, feature_names):
'''
递归构建决策树
'''
if len(set(y)) == 1:
# 如果标签集y只有一个标签,即类别全部相同
return Node(label=y[0], is_leaf=True)
elif len(feature_names) == 0:
# 如果特征集为空
return Node(label=Counter(y).most_common(1)[0][0], is_leaf=True)
else:
# 选择最优特征
best_feature = self.choose_best_feature(x, y)
# 创建新节点
current_node = Node(feature=best_feature)
# 根据最优特征划分数据集,并递归分别构建子树
for value in set(x[:, best_feature]):
sub_x, sub_y = self.split_dataset(x, y, best_feature, value)
if len(sub_x) == 0:
# 如果划分后的数据集为空
leaf_node = Node(label=Counter(y).most_common(1)[0][0], is_leaf=True)
current_node.branches[value] = leaf_node
else:
sub_feature_names = [name for name in feature_names if name != self.feature_names[best_feature]]
sub_tree = self.build_tree(sub_x, sub_y, feature_names=sub_feature_names)
current_node.branches[value] = sub_tree
return current_node
def choose_best_feature(self, x, y):
'''
选择最优特征
'''
n_features = x.shape[1]
# 计算信息熵
entropy = self.calc_entropy(y)
max_ig = 0 # 最大信息增益
best_feature = -1 # 最优特征
for i in range(n_features):
# 计算第i个特征的信息增益
sub_x = x[:, i]
ig = entropy - self.calc_cond_entropy(sub_x, y)
if ig > max_ig:
max_ig = ig
best_feature = i
return best_feature
def calc_entropy(self, y):
'''
计算信息熵
'''
n_samples = len(y)
counter = Counter(y)
probs = [count / n_samples for count in counter.values()]
entropy = -sum([prob * log2(prob) for prob in probs])
return entropy
def calc_cond_entropy(self, x, y):
'''
计算条件信息熵
'''
n_samples = len(y)
cond_entropy = 0
for value in set(x):
# 计算特征取值为value的样本权重
weight = sum(x == value) / n_samples
sub_y = y[x == value]
# 计算条件概率
prob = len(sub_y) / n_samples
# 计算条件信息熵
cond_entropy += weight * self.calc_entropy(sub_y) / prob
return cond_entropy
def split_dataset(self, x, y, feature_idx, value):
'''
划分数据集
'''
sub_x = x[x[:, feature_idx] == value]
sub_y = y[x[:, feature_idx] == value]
return sub_x, sub_y
def classify(self, x, node):
'''
分类
'''
if node.is_leaf:
# 如果是叶子节点,返回该节点的标签
return node.label
else:
# 向下递归分类
value = x[node.feature]
sub_tree = node.branches[value]
return self.classify(x, sub_tree)
def plot(self):
'''
可视化决策树
'''
fig, ax = plt.subplots()
self.plot_tree(ax, self.root, None)
plt.show()
def plot_tree(self, ax, node, parent_pos):
'''
绘制决策树
'''
pos = None
if parent_pos is None:
# 根节点
pos = (0.5, 1.0)
else:
# 非根节点
x, y = parent_pos
offset = 1 / (2 ** self.depth(node))
if node.feature != parent_node.feature:
pos = (parent_pos[0] + offset, parent_pos[1] - 0.1)
ax.plot([parent_pos[0], pos[0]], [parent_pos[1], pos[1]], color='r', lw='2')
else:
pos = (parent_pos[0] - offset, parent_pos[1] - 0.1)
ax.plot([parent_pos[0], pos[0]], [parent_pos[1], pos[1]], color='b', lw='2')
if node.is_leaf:
# 叶子节点
label = node.label
ax.text(pos[0], pos[1], label, horizontalalignment='center', verticalalignment='top')
else:
# 非叶子节点
feature = self.feature_names[node.feature]
ax.text(pos[0], pos[1], feature, horizontalalignment='center', verticalalignment='top')
for value, sub_node in node.branches.items():
self.plot_tree(ax, sub_node, pos)
def depth(self, node):
'''
计算树的深度
'''
if node.is_leaf:
return 0
else:
return 1 + max([self.depth(sub_node) for value, sub_node in node.branches.items()])
if __name__ == '__main__':
# 示例代码
train_x = np.array([
['sunny', 'hot', 'high', 'weak'],
['sunny', 'hot', 'high', 'strong'],
['overcast', 'hot', 'high', 'weak'],
['rainy', 'mild', 'high', 'weak'],
['rainy', 'cool', 'normal', 'weak'],
['rainy', 'cool', 'normal', 'strong'],
['overcast', 'cool', 'normal', 'strong'],
['sunny', 'mild', 'high', 'weak'],
['sunny', 'cool', 'normal', 'weak'],
['rainy', 'mild', 'normal', 'weak'],
['sunny', 'mild', 'normal', 'strong'],
['overcast', 'mild', 'high', 'strong'],
['overcast', 'hot', 'normal', 'weak'],
['rainy', 'mild', 'high', 'strong']
])
train_y = np.array(['no', 'no', 'yes', 'yes', 'yes', 'no', 'yes', 'no', 'yes', 'yes', 'yes', 'yes', 'yes', 'no'])
feature_names = ['outlook', 'temperature', 'humidity', 'windy']
tree = DecisionTree(train_x, train_y, feature_names=feature_names)
tree.fit()
tree.plot()
```
该决策树的可视化图形将会显示在窗口中。