factor_score = np.dot(X1,fa_5_rotate.loadings_) factor_score = pd.DataFrame(factor_score) factor_score.columns = ['factor1', 'factor2', 'factor3', 'factor4', 'factor5'] factor_score.index = df2_corr.columns print("\n因子得分:\n", factor_score)
时间: 2024-03-29 22:34:57 浏览: 88
这是一段 Python 代码,它使用了 NumPy 和 Pandas 库来计算因子得分。具体来说,它将一个数据集 X1 与一个因子分析模型 fa_5_rotate 的载荷矩阵相乘,得到每个样本在五个因子上的得分,然后将结果存储在一个 Pandas 数据框中,并为每个样本设置了索引,这个索引是从 df2_corr 数据集中获取的。这段代码的目的是为了计算因子得分,并将结果可视化或者进行进一步的分析。
相关问题
import pandas as pd import numpy as np from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt # 读取数据 data = pd.read_csv('D:/pythonProject/venv/BostonHousing2.csv') # 提取前13个指标的数据 X = data.iloc[:, 5:18].values # 数据标准化 scaler = StandardScaler() X_scaled = scaler.fit_transform(X) # 主成分分析 pca = PCA() X_pca = pca.fit_transform(X_scaled) # 特征值和特征向量 eigenvalues = pca.explained_variance_ eigenvectors = pca.components_.T # 碎石图 variance_explained = np.cumsum(eigenvalues / np.sum(eigenvalues)) plt.plot(range(6, 19), variance_explained, marker='o') plt.xlabel('Number of Components') plt.ylabel('Cumulative Proportion of Variance Explained') plt.title('Scree Plot') plt.show() # 选择主成分个数 n_components = np.sum(variance_explained <= 0.95) + 1 # 前2个主成分的载荷图 loadings = pd.DataFrame(eigenvectors[:, 0:2], columns=['PC1', 'PC2'], index=data.columns[0:13]) plt.figure(figsize=(10, 6)) plt.scatter(loadings['PC1'], loadings['PC2'], alpha=0.7) for i, feature in enumerate(loadings.index): plt.text(loadings['PC1'][i], loadings['PC2'][i], feature) plt.xlabel('PC1') plt.ylabel('PC2') plt.title('Loading Plot') plt.grid() plt.show() # 主成分得分图 scores = pd.DataFrame(X_pca[:, 0:n_components], columns=['PC{}'.format(i+1) for i in range(n_components)]) plt.figure(figsize=(10, 6)) plt.scatter(scores['PC1'], scores['PC2'], alpha=0.7) for i, label in enumerate(data['MEDV']): plt.text(scores['PC1'][i], scores['PC2'][i], label) plt.xlabel('PC1') plt.ylabel('PC2') plt.title('Scores Plot') plt.grid() plt.show() # 综合评估和排序 data['PC1_score'] = X_pca[:, 0] sorted_data = data.sort_values(by='PC1_score') # 主成分回归模型 from sklearn.linear_model import LinearRegression Y = data['MEDV'].values.reshape(-1, 1) X_pca_regression = X_pca[:, 0].reshape(-1, 1) regression_model = LinearRegression() regression_model.fit(X_pca_regression, Y) # 回归方程 intercept = regression_model.intercept_[0] slope = regression_model.coef_[0][0] equation = "MEDV = {:.2f} + {:.2f} * PC1".format(intercept, slope) print("Regression Equation:", equation) # 最小二乘估计结果 from statsmodels.api import OLS X_const = np.concatenate((np.ones((506, 1)), X_pca_regression), axis=1) ols_model = OLS(Y, X_const).fit() print("OLS Regression Summary:") print(ols_model.summary())
这段代码是用 Python 对波士顿房价数据进行主成分分析(PCA)。该代码读取了一个名为 "BostonHousing2.csv" 的数据文件,并将前 13 个指标的数据提取出来,进行了数据标准化和主成分分析。其中,碎石图展示了不同主成分个数下的累计方差贡献率,用于选择主成分个数;载荷图展示了前两个主成分对原始数据各个指标的影响程度;得分图展示了每个房价样本在前两个主成分上的得分情况,用于评估房价的相对位置。最后,该代码还进行了主成分回归模型和最小二乘估计结果的计算和展示。
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.decomposition import FactorAnalysis #Reading Data data=pd.read_csv("D:\复习资料\MVAPureData\who1.csv") data=data.iloc[1:,:] data=data.drop('Country', axis=1, inplace=True) #Converting Data to Numeric for i in range(1,data.shape[1]): data.iloc[:,i]=pd.to_numeric(data.iloc[:,i]) #Filling Missing Values with Mean data=data.fillna(data.mean()) #Factor Analysis using Principal Component Analysis fa=FactorAnalysis(n_components=5,rotation='varimax') fa.fit(data.iloc[:,1:]) loadings=pd.DataFrame(fa.components_.T,columns=['Factor1','Factor2','Factor3','Factor4','Factor5'],index=data.columns[1:]) print('\nFactor Loadings Using Principal Component Analysis:\n',loadings) #Factor Analysis using Principal Factor Analysis fa=FactorAnalysis(n_components=5,rotation='varimax',method='principal') fa.fit(data.iloc[:,1:]) loadings=pd.DataFrame(fa.components_.T,columns=['Factor1','Factor2','Factor3','Factor4','Factor5'],index=data.columns[1:]) print('\nFactor Loadings Using Principal Factor Analysis:\n',loadings) #Factor Analysis using Maximum Likelihood Estimation fa=FactorAnalysis(n_components=5,rotation='varimax',method="ml") fa.fit(data.iloc[:,1:]) loadings=pd.DataFrame(fa.components_.T,columns=['Factor1','Factor2','Factor3','Factor4','Factor5'],index=data.columns[1:]) print('\nFactor Loadings Using Maximum Likelihood Estimation:\n',loadings) #Plotting Factor Loadings plt.figure(figsize=(15,8)) sns.heatmap(loadings,cmap='coolwarm',xticklabels=True,yticklabels=True,annot=True) plt.title('Factor Loadings') plt.xlabel('Factors') plt.ylabel('Variables') plt.show() #Naming Factors factors=fa.transform(data.iloc[:,1:]) factors=pd.DataFrame(factors,columns=['Factor1','Factor2','Factor3','Factor4','Factor5']) factors['Country']=data.iloc[:,0] countries=factors['Country'].tolist() for i in range(factors.shape[1]-1): factors[f'Factor{i+1}']=(factors[f'Factor{i+1}']-factors[f'Factor{i+1}'].mean())/factors[f'Factor{i+1}'].std() factors['Score']=factors.sum(axis=1) factors=factors.sort_values(by=['Score'],ascending=False).reset_index(drop=True) print('\nRanked Countries:\n',factors[['Country','Score']])
这段代码是做因子分析的,将WHO(世界卫生组织)提供的数据集进行了处理和分析。首先将数据读入,并将非数值类型的数据转换为数值型数据;然后使用因子分析方法,包括主成分分析、主因子分析和最大似然估计,对数据进行因子分析,得到因子载荷量;接着绘制因子载荷量热力图,便于观察变量和因子的关系;最后对每个国家进行排名,得到评分。
阅读全文