解释代码def KMeans(dataSet, k): m = np.shape(dataSet)[0] clusterAssment = np.mat(np.zeros((m, 2))) clusterChange = True
时间: 2023-05-24 10:04:42 浏览: 82
这是一个K均值聚类算法的Python代码。
首先,定义了一个名为KMeans的函数,接受两个参数dataSet和k,分别代表数据集和聚类数。
接下来,计算数据集的行数m,并创建一个大小为(m,2)的全零矩阵clusterAssment,用于记录每条数据所属的簇及其到簇中心的距离。
最后,初始化一个名为clusterChange的变量为True,用于表示聚类是否发生变化。
相关问题
代码改进:import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from sklearn.datasets import make_blobs def distEclud(arrA,arrB): #欧氏距离 d = arrA - arrB dist = np.sum(np.power(d,2),axis=1) #差的平方的和 return dist def randCent(dataSet,k): #寻找质心 n = dataSet.shape[1] #列数 data_min = dataSet.min() data_max = dataSet.max() #生成k行n列处于data_min到data_max的质心 data_cent = np.random.uniform(data_min,data_max,(k,n)) return data_cent def kMeans(dataSet,k,distMeans = distEclud, createCent = randCent): x,y = make_blobs(centers=100)#生成k质心的数据 x = pd.DataFrame(x) m,n = dataSet.shape centroids = createCent(dataSet,k) #初始化质心,k即为初始化质心的总个数 clusterAssment = np.zeros((m,3)) #初始化容器 clusterAssment[:,0] = np.inf #第一列设置为无穷大 clusterAssment[:,1:3] = -1 #第二列放本次迭代点的簇编号,第三列存放上次迭代点的簇编号 result_set = pd.concat([pd.DataFrame(dataSet), pd.DataFrame(clusterAssment)],axis = 1,ignore_index = True) #将数据进行拼接,横向拼接,即将该容器放在数据集后面 clusterChanged = True while clusterChanged: clusterChanged = False for i in range(m): dist = distMeans(dataSet.iloc[i,:n].values,centroids) #计算点到质心的距离(即每个值到质心的差的平方和) result_set.iloc[i,n] = dist.min() #放入距离的最小值 result_set.iloc[i,n+1] = np.where(dist == dist.min())[0] #放入距离最小值的质心标号 clusterChanged = not (result_set.iloc[:,-1] == result_set.iloc[:,-2]).all() if clusterChanged: cent_df = result_set.groupby(n+1).mean() #按照当前迭代的数据集的分类,进行计算每一类中各个属性的平均值 centroids = cent_df.iloc[:,:n].values #当前质心 result_set.iloc[:,-1] = result_set.iloc[:,-2] #本次质心放到最后一列里 return centroids, result_set x = np.random.randint(0,100,size=100) y = np.random.randint(0,100,size=100) randintnum=pd.concat([pd.DataFrame(x), pd.DataFrame(y)],axis = 1,ignore_index = True) #randintnum_test, randintnum_test = kMeans(randintnum,3) #plt.scatter(randintnum_test.iloc[:,0],randintnum_test.iloc[:,1],c=randintnum_test.iloc[:,-1]) #result_test,cent_test = kMeans(data, 4) cent_test,result_test = kMeans(randintnum, 3) plt.scatter(result_test.iloc[:,0],result_test.iloc[:,1],c=result_test.iloc[:,-1]) plt.scatter(cent_test[:,0],cent_test[:,1],color = 'red',marker = 'x',s=100)
这段代码实现的是k-means聚类算法,但是有一些地方可以改进:
1. 函数kMeans中的createCent参数可以通过修改为直接传入质心的方式来更加灵活。
2. 函数kMeans中的distMeans参数可以修改为scipy库中的距离函数,如scipy.spatial.distance.cdist,来提高计算效率。
3. 函数kMeans中的clusterAssment可以使用pd.DataFrame来进行初始化,这样可以避免使用np.zeros和np.inf来进行初始化。
改进后的代码如下:
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from scipy.spatial.distance import cdist
def randCent(dataSet, k):
"""
随机生成k个质心
"""
n = dataSet.shape[1] # 列数
data_min = dataSet.min()
data_max = dataSet.max()
# 生成k行n列处于data_min到data_max的质心
data_cent = np.random.uniform(data_min, data_max, (k, n))
return data_cent
def kMeans(dataSet, k, createCent=randCent, distMeans=cdist):
"""
k-means聚类算法
"""
centroids = createCent(dataSet, k) # 初始化质心,k即为初始化质心的总个数
result_set = pd.concat([pd.DataFrame(dataSet), pd.DataFrame()], axis=1, ignore_index=True)
# 将数据进行拼接,横向拼接,即将该容器放在数据集后面
clusterChanged = True
while clusterChanged:
clusterChanged = False
dist = distMeans(dataSet, centroids, metric='euclidean')
clusterAssment = np.argmin(dist, axis=1)
result_set.iloc[:, -1] = pd.Series(clusterAssment)
for i in range(k):
cent_df = result_set[result_set.iloc[:, -1] == i].mean() # 按照当前迭代的数据集的分类,进行计算每一类中各个属性的平均值
if not cent_df.empty:
centroids[i] = cent_df.iloc[:-1].values # 当前质心
clusterChanged = True
return centroids, result_set
x = np.random.randint(0, 100, size=100)
y = np.random.randint(0, 100, size=100)
randintnum = pd.concat([pd.DataFrame(x), pd.DataFrame(y)], axis=1, ignore_index=True)
cent_test, result_test = kMeans(randintnum, 3)
plt.scatter(result_test.iloc[:, 0], result_test.iloc[:, 1], c=result_test.iloc[:, -1])
plt.scatter(cent_test[:, 0], cent_test[:, 1], color='red', marker='x', s=100)
```
import random import numpy as np import matplotlib.pyplot as plt 生成随机坐标点 def generate_points(num_points): points = [] for i in range(num_points): x = random.uniform(-10, 10) y = random.uniform(-10, 10) points.append([x, y]) return points 计算欧几里得距离 def euclidean_distance(point1, point2): return np.sqrt(np.sum(np.square(np.array(point1) - np.array(point2)))) K-means算法实现 def kmeans(points, k, num_iterations=100): num_points = len(points) # 随机选择k个点作为初始聚类中心 centroids = random.sample(points, k) # 初始化聚类标签和距离 labels = np.zeros(num_points) distances = np.zeros((num_points, k)) for i in range(num_iterations): # 计算每个点到每个聚类中心的距离 for j in range(num_points): for l in range(k): distances[j][l] = euclidean_distance(points[j], centroids[l]) # 根据距离将点分配到最近的聚类中心 for j in range(num_points): labels[j] = np.argmin(distances[j]) # 更新聚类中心 for l in range(k): centroids[l] = np.mean([points[j] for j in range(num_points) if labels[j] == l], axis=0) return labels, centroids 生成坐标点 points = generate_points(100) 对点进行K-means聚类 k_values = [2, 3, 4] for k in k_values: labels, centroids = kmeans(points, k) # 绘制聚类结果 colors = [‘r’, ‘g’, ‘b’, ‘y’, ‘c’, ‘m’] for i in range(k): plt.scatter([points[j][0] for j in range(len(points)) if labels[j] == i], [points[j][1] for j in range(len(points)) if labels[j] == i], color=colors[i]) plt.scatter([centroid[0] for centroid in centroids], [centroid[1] for centroid in centroids], marker=‘x’, color=‘k’, s=100) plt.title(‘K-means clustering with k={}’.format(k)) plt.show()import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import load_iris 载入数据集 iris = load_iris() X = iris.data y = iris.target K-means聚类 kmeans = KMeans(n_clusters=3, random_state=0).fit(X) 可视化结果 plt.scatter(X[:, 0], X[:, 1], c=kmeans.labels_) plt.xlabel(‘Sepal length’) plt.ylabel(‘Sepal width’) plt.title(‘K-means clustering on iris dataset’) plt.show()从聚类算法的评价指标对结果进行分析
常用的聚类算法评价指标有以下几种:
1. SSE(Sum of Squared Errors):簇内误差平方和,即簇内各点到簇中心的距离平方和,SSE越小表示簇内数据越紧密。
2. Silhouette Coefficient(轮廓系数):用于衡量样本聚类的密集程度,取值范围在[-1,1]之间,越接近1表示聚类效果越好。
3. Calinski-Harabasz Index(方差比率准则):通过计算类间离散度与类内离散度的比值来评估聚类的效果,CHI越大表示聚类效果越好。
4. Davies-Bouldin Index(DBI):通过计算簇内的平均距离和簇间的距离来衡量聚类效果,DBI越小表示聚类效果越好。
对于以上评价指标,可以根据具体的业务场景和需求选择合适的指标进行评估。在实际使用中,可以采用交叉验证等方法来验证聚类算法的效果。
阅读全文
相关推荐
















