a_data = df.iloc[:, number_a-1].values.tolist() ~~~~~~~~^~ TypeError: unsupported operand type(s) for -: 'str' and 'int'
时间: 2023-07-06 17:05:19 浏览: 130
这个错误提示表明在第 number_a 列的位置应该是一个整数,但是实际上它是一个字符串。你需要检查一下代码中第 number_a 列的位置是否正确,或者通过打印出 df 的列名来确认。如果是位置错误导致的错误,你需要将 number_a 更改为正确的列位置。如果 df 中确实存在一个名为 number_a 的字符串列,你需要更改代码以正确处理字符串数据。
相关问题
将上述代码放入了Recommenders.py文件中,作为一个自定义工具包。将下列代码中调用scipy包中svd的部分。转为使用Recommenders.py工具包中封装的svd方法。给出修改后的完整代码。import pandas as pd import math as mt import numpy as np from sklearn.model_selection import train_test_split from Recommenders import * from scipy.sparse.linalg import svds from scipy.sparse import coo_matrix from scipy.sparse import csc_matrix # Load and preprocess data triplet_dataset_sub_song_merged = triplet_dataset_sub_song_mergedpd # load dataset triplet_dataset_sub_song_merged_sum_df = triplet_dataset_sub_song_merged[['user','listen_count']].groupby('user').sum().reset_index() triplet_dataset_sub_song_merged_sum_df.rename(columns={'listen_count':'total_listen_count'},inplace=True) triplet_dataset_sub_song_merged = pd.merge(triplet_dataset_sub_song_merged,triplet_dataset_sub_song_merged_sum_df) triplet_dataset_sub_song_merged['fractional_play_count'] = triplet_dataset_sub_song_merged['listen_count']/triplet_dataset_sub_song_merged['total_listen_count'] # Convert data to sparse matrix format small_set = triplet_dataset_sub_song_merged user_codes = small_set.user.drop_duplicates().reset_index() song_codes = small_set.song.drop_duplicates().reset_index() user_codes.rename(columns={'index':'user_index'}, inplace=True) song_codes.rename(columns={'index':'song_index'}, inplace=True) song_codes['so_index_value'] = list(song_codes.index) user_codes['us_index_value'] = list(user_codes.index) small_set = pd.merge(small_set,song_codes,how='left') small_set = pd.merge(small_set,user_codes,how='left') mat_candidate = small_set[['us_index_value','so_index_value','fractional_play_count']] data_array = mat_candidate.fractional_play_count.values row_array = mat_candidate.us_index_value.values col_array = mat_candidate.so_index_value.values data_sparse = coo_matrix((data_array, (row_array, col_array)),dtype=float) # Compute SVD def compute_svd(urm, K): U, s, Vt = svds(urm, K) dim = (len(s), len(s)) S = np.zeros(dim, dtype=np.float32) for i in range(0, len(s)): S[i,i] = mt.sqrt(s[i]) U = csc_matrix(U, dtype=np.float32) S = csc_matrix(S, dtype=np.float32) Vt = csc_matrix(Vt, dtype=np.float32) return U, S, Vt def compute_estimated_matrix(urm, U, S, Vt, uTest, K, test): rightTerm = S*Vt max_recommendation = 10 estimatedRatings = np.zeros(shape=(MAX_UID, MAX_PID), dtype=np.float16) recomendRatings = np.zeros(shape=(MAX_UID,max_recommendation ), dtype=np.float16) for userTest in uTest: prod = U[userTest, :]*rightTerm estimatedRatings[userTest, :] = prod.todense() recomendRatings[userTest, :] = (-estimatedRatings[userTest, :]).argsort()[:max_recommendation] return recomendRatings K=50 # number of factors urm = data_sparse MAX_PID = urm.shape[1] MAX_UID = urm.shape[0] U, S, Vt = compute_svd(urm, K) # Compute recommendations for test users # Compute recommendations for test users uTest = [1,6,7,8,23] uTest_recommended_items = compute_estimated_matrix(urm, U, S, Vt, uTest, K, True) # Output recommended songs in a dataframe recommendations = pd.DataFrame(columns=['user','song', 'score','rank']) for user in uTest: rank = 1 for song_index in uTest_recommended_items[user, 0:10]: song = small_set.loc[small_set['so_index_value'] == song_index].iloc[0] # Get song details recommendations = recommendations.append({'user': user, 'song': song['title'], 'score': song['fractional_play_count'], 'rank': rank}, ignore_index=True) rank += 1 display(recommendations)
import pandas as pd
import math as mt
import numpy as np
from sklearn.model_selection import train_test_split
from Recommenders import SVDRecommender #import the SVDRecommender class from our custom package
# Load and preprocess data
triplet_dataset_sub_song_merged = triplet_dataset_sub_song_mergedpd
# load dataset
triplet_dataset_sub_song_merged_sum_df = triplet_dataset_sub_song_merged[['user','listen_count']].groupby('user').sum().reset_index()
triplet_dataset_sub_song_merged_sum_df.rename(columns={'listen_count':'total_listen_count'},inplace=True)
triplet_dataset_sub_song_merged = pd.merge(triplet_dataset_sub_song_merged,triplet_dataset_sub_song_merged_sum_df)
triplet_dataset_sub_song_merged['fractional_play_count'] = triplet_dataset_sub_song_merged['listen_count']/triplet_dataset_sub_song_merged['total_listen_count']
# Convert data to sparse matrix format
small_set = triplet_dataset_sub_song_merged
user_codes = small_set.user.drop_duplicates().reset_index()
song_codes = small_set.song.drop_duplicates().reset_index()
user_codes.rename(columns={'index':'user_index'}, inplace=True)
song_codes.rename(columns={'index':'song_index'}, inplace=True)
song_codes['so_index_value'] = list(song_codes.index)
user_codes['us_index_value'] = list(user_codes.index)
small_set = pd.merge(small_set,song_codes,how='left')
small_set = pd.merge(small_set,user_codes,how='left')
mat_candidate = small_set[['us_index_value','so_index_value','fractional_play_count']]
data_array = mat_candidate.fractional_play_count.values
row_array = mat_candidate.us_index_value.values
col_array = mat_candidate.so_index_value.values
data_sparse = coo_matrix((data_array, (row_array, col_array)),dtype=float)
# Compute SVD using our custom package
K=50 # number of factors
urm = data_sparse
MAX_PID = urm.shape[1]
MAX_UID = urm.shape[0]
recommender = SVDRecommender(K)
U, S, Vt = recommender.fit(urm)
# Compute recommendations for test users
uTest = [1,6,7,8,23]
uTest_recommended_items = recommender.recommend(uTest, urm, 10)
# Output recommended songs in a dataframe
recommendations = pd.DataFrame(columns=['user','song', 'score','rank'])
for user in uTest:
rank = 1
for song_index in uTest_recommended_items[user, 0:10]:
song = small_set.loc[small_set['so_index_value'] == song_index].iloc[0] # Get song details
recommendations = recommendations.append({'user': user, 'song': song['title'], 'score': song['fractional_play_count'], 'rank': rank}, ignore_index=True)
rank += 1
display(recommendations)
python代码:基因型数据集CSV文件,83列,具体1个分类标签值(5个分类),82个特征,第一行为特征名称;基于随机森林模型进行嵌入式特征选择(带有特征名称的 SelectFromModel),遴选出候选特征,输出结果为CSV文件;输出候选特征基于重要性排序的图片和CSV文件;逐步增加基于重要性排序的已候选特征数量,使用十折交叉验证计算分类正确率,并绘制学习曲线图和CSV文件
以下是符合你需求的Python代码示例:
```python
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
# 1. 加载数据集
data = pd.read_csv('genotype_dataset.csv')
X = data.iloc[:, 1:] # 特征
y = data.iloc[:, 0] # 标签
feature_names = X.columns.tolist() # 特征名称
# 2. 数据预处理(如果有需要)
# 3. 特征选择
rf = RandomForestClassifier(n_estimators=100, random_state=42)
rf.fit(X, y)
feature_importances = rf.feature_importances_
selector = SelectFromModel(rf, threshold='median', prefit=True)
X_selected = selector.transform(X)
selected_features = np.array(feature_names)[selector.get_support()]
# 4. 输出结果为CSV文件(候选特征)
selected_data = pd.concat([y, pd.DataFrame(X_selected, columns=selected_features)], axis=1)
selected_data.to_csv('selected_features.csv', index=False)
# 5. 输出候选特征基于重要性排序的图片和CSV文件
feature_importances_df = pd.DataFrame({'Feature': selected_features, 'Importance': feature_importances[selector.get_support()]})
feature_importances_df = feature_importances_df.sort_values(by='Importance', ascending=False)
feature_importances_df.to_csv('feature_importances.csv', index=False)
plt.figure(figsize=(10, 6))
plt.barh(feature_importances_df['Feature'], feature_importances_df['Importance'])
plt.xlabel('Importance')
plt.ylabel('Feature')
plt.title('Feature Importance')
plt.savefig('feature_importances.png')
# 6. 逐步增加特征数量并计算十折交叉验证的分类正确率,绘制学习曲线图和CSV文件
cv_scores = []
num_features_range = range(1, len(selected_features) + 1)
for num_features in num_features_range:
X_subset = selected_data.iloc[:, 1:num_features+1]
scores = cross_val_score(rf, X_subset, y, cv=10)
cv_scores.append(scores.mean())
learning_curve_df = pd.DataFrame({'Number of Features': num_features_range, 'Cross-validated Accuracy': cv_scores})
learning_curve_df.to_csv('learning_curve.csv', index=False)
plt.plot(num_features_range, cv_scores)
plt.xlabel('Number of Features')
plt.ylabel('Cross-validated Accuracy')
plt.title('Learning Curve')
plt.savefig('learning_curve.png')
```
在以上代码中,我们使用带有特征名称的 `SelectFromModel` 进行特征选择,并将选中的特征按重要性进行排序。然后,我们将结果保存到CSV文件中,并绘制了基于重要性排序的特征重要性图表。接着,我们逐步增加特征数量,并使用十折交叉验证计算分类正确率,并绘制了学习曲线图,同时将结果保存到CSV文件中。请确保你的数据集中的特征名称与代码中的特征名称一致。另外,需要确保已安装所需的Python库(如pandas、numpy、sklearn和matplotlib)。
阅读全文