Python实现ID3决策树算法。
时间: 2023-11-29 11:44:30 浏览: 122
ID3决策树算法是一种基于信息熵的分类算法,它可以用于解决分类问题。在Python中,我们可以使用scikit-learn库来实现ID3决策树算法。具体实现步骤如下:
1. 导入需要的库和数据集
```python
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
```
2. 创建决策树分类器
```python
clf = DecisionTreeClassifier(criterion='entropy')
```
3. 训练模型
```python
clf.fit(X, y)
```
4. 预测新数据
```python
new_data = [[5.1, 3.5, 1.4, 0.2], [6.2, 3.4, 5.4, 2.3]]
print(clf.predict(new_data))
```
以上就是使用Python实现ID3决策树算法的基本步骤。需要注意的是,决策树算法容易出现过拟合的情况,因此在实际应用中需要进行适当的调参和剪枝。
相关问题
用python实现决策树算法_python实现ID3决策树算法
好的,下面是用 Python 实现 ID3 决策树算法的代码示例:
```python
import math
from collections import Counter
def find_entropy(data):
"""
计算数据集的信息熵
"""
# 统计数据集中每个类别的样本数
class_counts = Counter(data["label"])
# 计算每个类别样本数占总数的比例
class_probs = [class_count / len(data["label"]) for class_count in class_counts.values()]
# 计算信息熵
entropy = sum([-class_prob * math.log(class_prob, 2) for class_prob in class_probs])
return entropy
def find_best_split(data, features):
"""
找到最佳分裂特征和特征值
"""
# 计算数据集的信息熵
entropy = find_entropy(data)
# 初始化最佳分裂特征和特征值
best_feature, best_value = None, None
# 初始化最小信息增益
min_info_gain = float("inf")
# 遍历每个特征
for feature in features:
# 找到该特征的所有取值
values = set(data[feature])
# 遍历每个取值
for value in values:
# 将数据集分成两部分
left_data = data[data[feature] == value]
right_data = data[data[feature] != value]
# 如果分裂后的数据集不为空
if len(left_data) > 0 and len(right_data) > 0:
# 计算分裂后的信息熵
left_entropy = find_entropy(left_data)
right_entropy = find_entropy(right_data)
split_entropy = (len(left_data) / len(data)) * left_entropy + (len(right_data) / len(data)) * right_entropy
# 计算信息增益
info_gain = entropy - split_entropy
# 如果信息增益更大,则更新最佳分裂特征和特征值
if info_gain < min_info_gain:
best_feature, best_value = feature, value
min_info_gain = info_gain
# 返回最佳分裂特征和特征值
return best_feature, best_value
def build_tree(data, features):
"""
构建决策树
"""
# 如果数据集为空,则返回 None
if len(data) == 0:
return None
# 如果数据集中所有样本都属于同一类别,则返回该类别
if len(set(data["label"])) == 1:
return data["label"].iloc[0]
# 如果没有可用特征,则返回数据集中样本数最多的类别
if len(features) == 0:
return Counter(data["label"]).most_common(1)[0][0]
# 找到最佳分裂特征和特征值
best_feature, best_value = find_best_split(data, features)
# 如果信息增益小于等于 0,则返回数据集中样本数最多的类别
if best_feature is None or best_value is None:
return Counter(data["label"]).most_common(1)[0][0]
# 创建节点
node = {"feature": best_feature, "value": best_value, "left": None, "right": None}
# 将数据集分成两部分
left_data = data[data[best_feature] == best_value]
right_data = data[data[best_feature] != best_value]
# 递归构建左子树和右子树
node["left"] = build_tree(left_data, [feature for feature in features if feature != best_feature])
node["right"] = build_tree(right_data, [feature for feature in features if feature != best_feature])
# 返回节点
return node
```
该代码实现了 ID3 决策树算法,其中 `find_entropy` 函数用于计算数据集的信息熵,`find_best_split` 函数用于找到最佳分裂特征和特征值,`build_tree` 函数用于构建决策树。
给出python实现ID3决策树算法的代码,不能调用sklearn库,能够输出决策树的可视化图形
以下是Python实现ID3决策树算法的代码,可以输出决策树的可视化图形,不调用sklearn库:
```
import numpy as np
from collections import Counter
from math import log2
import matplotlib.pyplot as plt
class Node:
'''
决策树节点类
'''
def __init__(self, feature=None, label=None, is_leaf=False, branches={}):
'''
初始化
'''
self.feature = feature # 该节点对应的特征
self.label = label # 该节点对应的标签
self.is_leaf = is_leaf # 是否是叶子节点
self.branches = branches # 子节点
def __str__(self):
'''
以字符串形式返回节点信息
'''
if self.is_leaf:
return f'[leaf,label:{self.label}]'
else:
return f'[internal,feature:{self.feature}]'
class DecisionTree:
'''
决策树类
'''
def __init__(self, train_x, train_y, test_x=None, test_y=None, feature_names=None):
'''
初始化
'''
self.train_x = train_x # 训练数据特征集
self.train_y = train_y # 训练数据标签集
self.test_x = test_x # 测试数据特征集
self.test_y = test_y # 测试数据标签集
self.feature_names = feature_names # 特征名称列表
self.root = None # 决策树根节点
def fit(self):
'''
构建决策树
'''
self.root = self.build_tree(self.train_x, self.train_y, feature_names=self.feature_names)
def predict(self, x):
'''
预测分类
'''
return self.classify(x, self.root)
def build_tree(self, x, y, feature_names):
'''
递归构建决策树
'''
if len(set(y)) == 1:
# 如果标签集y只有一个标签,即类别全部相同
return Node(label=y[0], is_leaf=True)
elif len(feature_names) == 0:
# 如果特征集为空
return Node(label=Counter(y).most_common(1)[0][0], is_leaf=True)
else:
# 选择最优特征
best_feature = self.choose_best_feature(x, y)
# 创建新节点
current_node = Node(feature=best_feature)
# 根据最优特征划分数据集,并递归分别构建子树
for value in set(x[:, best_feature]):
sub_x, sub_y = self.split_dataset(x, y, best_feature, value)
if len(sub_x) == 0:
# 如果划分后的数据集为空
leaf_node = Node(label=Counter(y).most_common(1)[0][0], is_leaf=True)
current_node.branches[value] = leaf_node
else:
sub_feature_names = [name for name in feature_names if name != self.feature_names[best_feature]]
sub_tree = self.build_tree(sub_x, sub_y, feature_names=sub_feature_names)
current_node.branches[value] = sub_tree
return current_node
def choose_best_feature(self, x, y):
'''
选择最优特征
'''
n_features = x.shape[1]
# 计算信息熵
entropy = self.calc_entropy(y)
max_ig = 0 # 最大信息增益
best_feature = -1 # 最优特征
for i in range(n_features):
# 计算第i个特征的信息增益
sub_x = x[:, i]
ig = entropy - self.calc_cond_entropy(sub_x, y)
if ig > max_ig:
max_ig = ig
best_feature = i
return best_feature
def calc_entropy(self, y):
'''
计算信息熵
'''
n_samples = len(y)
counter = Counter(y)
probs = [count / n_samples for count in counter.values()]
entropy = -sum([prob * log2(prob) for prob in probs])
return entropy
def calc_cond_entropy(self, x, y):
'''
计算条件信息熵
'''
n_samples = len(y)
cond_entropy = 0
for value in set(x):
# 计算特征取值为value的样本权重
weight = sum(x == value) / n_samples
sub_y = y[x == value]
# 计算条件概率
prob = len(sub_y) / n_samples
# 计算条件信息熵
cond_entropy += weight * self.calc_entropy(sub_y) / prob
return cond_entropy
def split_dataset(self, x, y, feature_idx, value):
'''
划分数据集
'''
sub_x = x[x[:, feature_idx] == value]
sub_y = y[x[:, feature_idx] == value]
return sub_x, sub_y
def classify(self, x, node):
'''
分类
'''
if node.is_leaf:
# 如果是叶子节点,返回该节点的标签
return node.label
else:
# 向下递归分类
value = x[node.feature]
sub_tree = node.branches[value]
return self.classify(x, sub_tree)
def plot(self):
'''
可视化决策树
'''
fig, ax = plt.subplots()
self.plot_tree(ax, self.root, None)
plt.show()
def plot_tree(self, ax, node, parent_pos):
'''
绘制决策树
'''
pos = None
if parent_pos is None:
# 根节点
pos = (0.5, 1.0)
else:
# 非根节点
x, y = parent_pos
offset = 1 / (2 ** self.depth(node))
if node.feature != parent_node.feature:
pos = (parent_pos[0] + offset, parent_pos[1] - 0.1)
ax.plot([parent_pos[0], pos[0]], [parent_pos[1], pos[1]], color='r', lw='2')
else:
pos = (parent_pos[0] - offset, parent_pos[1] - 0.1)
ax.plot([parent_pos[0], pos[0]], [parent_pos[1], pos[1]], color='b', lw='2')
if node.is_leaf:
# 叶子节点
label = node.label
ax.text(pos[0], pos[1], label, horizontalalignment='center', verticalalignment='top')
else:
# 非叶子节点
feature = self.feature_names[node.feature]
ax.text(pos[0], pos[1], feature, horizontalalignment='center', verticalalignment='top')
for value, sub_node in node.branches.items():
self.plot_tree(ax, sub_node, pos)
def depth(self, node):
'''
计算树的深度
'''
if node.is_leaf:
return 0
else:
return 1 + max([self.depth(sub_node) for value, sub_node in node.branches.items()])
if __name__ == '__main__':
# 示例代码
train_x = np.array([
['sunny', 'hot', 'high', 'weak'],
['sunny', 'hot', 'high', 'strong'],
['overcast', 'hot', 'high', 'weak'],
['rainy', 'mild', 'high', 'weak'],
['rainy', 'cool', 'normal', 'weak'],
['rainy', 'cool', 'normal', 'strong'],
['overcast', 'cool', 'normal', 'strong'],
['sunny', 'mild', 'high', 'weak'],
['sunny', 'cool', 'normal', 'weak'],
['rainy', 'mild', 'normal', 'weak'],
['sunny', 'mild', 'normal', 'strong'],
['overcast', 'mild', 'high', 'strong'],
['overcast', 'hot', 'normal', 'weak'],
['rainy', 'mild', 'high', 'strong']
])
train_y = np.array(['no', 'no', 'yes', 'yes', 'yes', 'no', 'yes', 'no', 'yes', 'yes', 'yes', 'yes', 'yes', 'no'])
feature_names = ['outlook', 'temperature', 'humidity', 'windy']
tree = DecisionTree(train_x, train_y, feature_names=feature_names)
tree.fit()
tree.plot()
```
该决策树的可视化图形将会显示在窗口中。
阅读全文