python决策树可视化代码包括信息熵的计算
时间: 2023-12-10 10:02:22 浏览: 112
好的,以下是Python决策树可视化代码,包括信息熵的计算:
```
import pandas as pd
import numpy as np
import math
import graphviz
from sklearn.tree import DecisionTreeClassifier, export_graphviz
# 计算信息熵
def calc_entropy(data):
n = len(data)
label_counts = {}
for feat_vec in data:
current_label = feat_vec[-1]
if current_label not in label_counts:
label_counts[current_label] = 0
label_counts[current_label] += 1
entropy = 0.0
for key in label_counts:
prob = float(label_counts[key]) / n
entropy -= prob * math.log(prob, 2)
return entropy
# 划分数据集
def split_data(data, axis, value):
ret_data = []
for feat_vec in data:
if feat_vec[axis] == value:
reduced_feat_vec = feat_vec[:axis]
reduced_feat_vec.extend(feat_vec[axis+1:])
ret_data.append(reduced_feat_vec)
return ret_data
# 选择最优划分特征
def choose_best_feature(data):
num_features = len(data[0]) - 1
base_entropy = calc_entropy(data)
best_info_gain = 0.0
best_feature = -1
for i in range(num_features):
feat_list = [example[i] for example in data]
unique_vals = set(feat_list)
new_entropy = 0.0
for value in unique_vals:
sub_data = split_data(data, i, value)
prob = len(sub_data) / float(len(data))
new_entropy += prob * calc_entropy(sub_data)
info_gain = base_entropy - new_entropy
if info_gain > best_info_gain:
best_info_gain = info_gain
best_feature = i
return best_feature
# 统计类别数并返回出现次数最多的类别
def majority_cnt(class_list):
class_count = {}
for vote in class_list:
if vote not in class_count:
class_count[vote] = 0
class_count[vote] += 1
sorted_class_count = sorted(class_count.items(), key=lambda x:x[1], reverse=True)
return sorted_class_count[0][0]
# 创建决策树
def create_decision_tree(data, labels):
class_list = [example[-1] for example in data]
if class_list.count(class_list[0]) == len(class_list):
return class_list[0]
if len(data[0]) == 1:
return majority_cnt(class_list)
best_feat = choose_best_feature(data)
best_feat_label = labels[best_feat]
my_tree = {best_feat_label: {}}
del(labels[best_feat])
feat_values = [example[best_feat] for example in data]
unique_vals = set(feat_values)
for value in unique_vals:
sub_labels = labels[:]
my_tree[best_feat_label][value] = create_decision_tree(split_data(data, best_feat, value), sub_labels)
return my_tree
# 可视化决策树
def visualize_tree(tree, feature_names):
dot_data = export_graphviz(tree, out_file=None, feature_names=feature_names, class_names=['0', '1'], filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph.render("decision_tree")
# 测试代码
if __name__ == '__main__':
data = pd.read_csv('data.csv')
feature_names = list(data.columns[:-1])
data = np.array(data)
labels = feature_names[:]
tree = create_decision_tree(data.tolist(), labels)
visualize_tree(tree, feature_names)
```
其中,`calc_entropy` 函数用于计算信息熵,`split_data` 函数用于划分数据集,`choose_best_feature` 函数用于选择最优划分特征,`majority_cnt` 函数用于统计类别数并返回出现次数最多的类别,`create_decision_tree` 函数用于创建决策树,`visualize_tree` 函数用于可视化决策树。在测试代码中,我们读取数据集并创建决策树,最后将决策树可视化。
阅读全文