python代码利用ID3算法实现对葡萄酒数据集的分类
时间: 2023-08-15 10:04:22 浏览: 283
数据挖掘大作业-葡萄酒质量分析算法python源码+项目说明+详细注释+数据.zip
5星 · 资源好评率100%
好的,以下是利用ID3算法实现对葡萄酒数据集的分类的Python代码:
```python
import pandas as pd
import numpy as np
from math import log2
# 计算信息熵
def calc_entropy(data):
n = len(data)
label_counts = {}
for feat_vec in data:
current_label = feat_vec[-1]
if current_label not in label_counts.keys():
label_counts[current_label] = 0
label_counts[current_label] += 1
entropy = 0.0
for key in label_counts:
prob = float(label_counts[key]) / n
entropy += -prob * log2(prob)
return entropy
# 划分数据集
def split_data(data, axis, value):
ret_data = []
for feat_vec in data:
if feat_vec[axis] == value:
reduced_feat_vec = feat_vec[:axis]
reduced_feat_vec.extend(feat_vec[axis+1:])
ret_data.append(reduced_feat_vec)
return ret_data
# 选择最佳划分特征
def choose_best_feature_to_split(data):
num_features = len(data[0]) - 1
base_entropy = calc_entropy(data)
best_info_gain = 0.0
best_feature = -1
for i in range(num_features):
feat_list = [example[i] for example in data]
unique_vals = set(feat_list)
new_entropy = 0.0
for value in unique_vals:
sub_data = split_data(data, i, value)
prob = len(sub_data) / float(len(data))
new_entropy += prob * calc_entropy(sub_data)
info_gain = base_entropy - new_entropy
if info_gain > best_info_gain:
best_info_gain = info_gain
best_feature = i
return best_feature
# 定义叶子节点
def majority_cnt(class_list):
class_count = {}
for vote in class_list:
if vote not in class_count.keys():
class_count[vote] = 0
class_count[vote] += 1
sorted_class_count = sorted(class_count.items(), key=lambda x: x[1], reverse=True)
return sorted_class_count[0][0]
# 创建决策树
def create_tree(data, labels):
class_list = [example[-1] for example in data]
if class_list.count(class_list[0]) == len(class_list):
return class_list[0]
if len(data[0]) == 1:
return majority_cnt(class_list)
best_feat = choose_best_feature_to_split(data)
best_feat_label = labels[best_feat]
my_tree = {best_feat_label: {}}
del(labels[best_feat])
feat_values = [example[best_feat] for example in data]
unique_vals = set(feat_values)
for value in unique_vals:
sub_labels = labels[:]
my_tree[best_feat_label][value] = create_tree(split_data(data, best_feat, value), sub_labels)
return my_tree
# 测试决策树
def classify(input_tree, feat_labels, test_vec):
first_str = list(input_tree.keys())[0]
second_dict = input_tree[first_str]
feat_index = feat_labels.index(first_str)
for key in second_dict.keys():
if test_vec[feat_index] == key:
if type(second_dict[key]).__name__ == 'dict':
class_label = classify(second_dict[key], feat_labels, test_vec)
else:
class_label = second_dict[key]
return class_label
# 加载数据集
def load_dataset():
df = pd.read_csv('wine.csv')
data = np.array(df)
labels = df.columns.tolist()
return data, labels
# 主函数
if __name__ == '__main__':
data, labels = load_dataset()
my_tree = create_tree(data.tolist(), labels)
print(my_tree)
test_vec = [1, 13.05, 3.86, 2.32, 22.5, 85, 1.65, 1.59, 0.61, 2.87, 580]
class_label = classify(my_tree, labels, test_vec)
print(class_label)
```
在这个例子中,我们使用了葡萄酒数据集。数据集包含13个特征和1个类别标签,我们使用ID3算法构建决策树,并对一个测试样本进行分类。
阅读全文