np.array(df.iloc[max_index, 0:14])
时间: 2024-01-16 10:02:16 浏览: 24
这段代码是用来提取DataFrame中第max_index行的前14个元素,转化为一个numpy数组。
具体来说,df是一个包含多行多列数据的DataFrame对象,每行代表一个投资组合,包含14个资产的权重。iloc方法可以按照行列的索引来提取DataFrame中的数据,它的参数表示要提取的行和列的索引号。
这里,df.iloc[max_index, 0:14]表示提取df中第max_index行的前14个元素,即该投资组合的权重。提取出来的数据是一个Series对象,使用np.array()方法可以将其转化为numpy数组。因此,np.array(df.iloc[max_index, 0:14])返回一个大小为(14,)的numpy数组,其中包含了该投资组合的14个资产权重。
相关问题
将上述代码放入了Recommenders.py文件中,作为一个自定义工具包。将下列代码中调用scipy包中svd的部分。转为使用Recommenders.py工具包中封装的svd方法。给出修改后的完整代码。import pandas as pd import math as mt import numpy as np from sklearn.model_selection import train_test_split from Recommenders import * from scipy.sparse.linalg import svds from scipy.sparse import coo_matrix from scipy.sparse import csc_matrix # Load and preprocess data triplet_dataset_sub_song_merged = triplet_dataset_sub_song_mergedpd # load dataset triplet_dataset_sub_song_merged_sum_df = triplet_dataset_sub_song_merged[['user','listen_count']].groupby('user').sum().reset_index() triplet_dataset_sub_song_merged_sum_df.rename(columns={'listen_count':'total_listen_count'},inplace=True) triplet_dataset_sub_song_merged = pd.merge(triplet_dataset_sub_song_merged,triplet_dataset_sub_song_merged_sum_df) triplet_dataset_sub_song_merged['fractional_play_count'] = triplet_dataset_sub_song_merged['listen_count']/triplet_dataset_sub_song_merged['total_listen_count'] # Convert data to sparse matrix format small_set = triplet_dataset_sub_song_merged user_codes = small_set.user.drop_duplicates().reset_index() song_codes = small_set.song.drop_duplicates().reset_index() user_codes.rename(columns={'index':'user_index'}, inplace=True) song_codes.rename(columns={'index':'song_index'}, inplace=True) song_codes['so_index_value'] = list(song_codes.index) user_codes['us_index_value'] = list(user_codes.index) small_set = pd.merge(small_set,song_codes,how='left') small_set = pd.merge(small_set,user_codes,how='left') mat_candidate = small_set[['us_index_value','so_index_value','fractional_play_count']] data_array = mat_candidate.fractional_play_count.values row_array = mat_candidate.us_index_value.values col_array = mat_candidate.so_index_value.values data_sparse = coo_matrix((data_array, (row_array, col_array)),dtype=float) # Compute SVD def compute_svd(urm, K): U, s, Vt = svds(urm, K) dim = (len(s), len(s)) S = np.zeros(dim, dtype=np.float32) for i in range(0, len(s)): S[i,i] = mt.sqrt(s[i]) U = csc_matrix(U, dtype=np.float32) S = csc_matrix(S, dtype=np.float32) Vt = csc_matrix(Vt, dtype=np.float32) return U, S, Vt def compute_estimated_matrix(urm, U, S, Vt, uTest, K, test): rightTerm = S*Vt max_recommendation = 10 estimatedRatings = np.zeros(shape=(MAX_UID, MAX_PID), dtype=np.float16) recomendRatings = np.zeros(shape=(MAX_UID,max_recommendation ), dtype=np.float16) for userTest in uTest: prod = U[userTest, :]*rightTerm estimatedRatings[userTest, :] = prod.todense() recomendRatings[userTest, :] = (-estimatedRatings[userTest, :]).argsort()[:max_recommendation] return recomendRatings K=50 # number of factors urm = data_sparse MAX_PID = urm.shape[1] MAX_UID = urm.shape[0] U, S, Vt = compute_svd(urm, K) # Compute recommendations for test users # Compute recommendations for test users uTest = [1,6,7,8,23] uTest_recommended_items = compute_estimated_matrix(urm, U, S, Vt, uTest, K, True) # Output recommended songs in a dataframe recommendations = pd.DataFrame(columns=['user','song', 'score','rank']) for user in uTest: rank = 1 for song_index in uTest_recommended_items[user, 0:10]: song = small_set.loc[small_set['so_index_value'] == song_index].iloc[0] # Get song details recommendations = recommendations.append({'user': user, 'song': song['title'], 'score': song['fractional_play_count'], 'rank': rank}, ignore_index=True) rank += 1 display(recommendations)
import pandas as pd
import math as mt
import numpy as np
from sklearn.model_selection import train_test_split
from Recommenders import SVDRecommender #import the SVDRecommender class from our custom package
# Load and preprocess data
triplet_dataset_sub_song_merged = triplet_dataset_sub_song_mergedpd
# load dataset
triplet_dataset_sub_song_merged_sum_df = triplet_dataset_sub_song_merged[['user','listen_count']].groupby('user').sum().reset_index()
triplet_dataset_sub_song_merged_sum_df.rename(columns={'listen_count':'total_listen_count'},inplace=True)
triplet_dataset_sub_song_merged = pd.merge(triplet_dataset_sub_song_merged,triplet_dataset_sub_song_merged_sum_df)
triplet_dataset_sub_song_merged['fractional_play_count'] = triplet_dataset_sub_song_merged['listen_count']/triplet_dataset_sub_song_merged['total_listen_count']
# Convert data to sparse matrix format
small_set = triplet_dataset_sub_song_merged
user_codes = small_set.user.drop_duplicates().reset_index()
song_codes = small_set.song.drop_duplicates().reset_index()
user_codes.rename(columns={'index':'user_index'}, inplace=True)
song_codes.rename(columns={'index':'song_index'}, inplace=True)
song_codes['so_index_value'] = list(song_codes.index)
user_codes['us_index_value'] = list(user_codes.index)
small_set = pd.merge(small_set,song_codes,how='left')
small_set = pd.merge(small_set,user_codes,how='left')
mat_candidate = small_set[['us_index_value','so_index_value','fractional_play_count']]
data_array = mat_candidate.fractional_play_count.values
row_array = mat_candidate.us_index_value.values
col_array = mat_candidate.so_index_value.values
data_sparse = coo_matrix((data_array, (row_array, col_array)),dtype=float)
# Compute SVD using our custom package
K=50 # number of factors
urm = data_sparse
MAX_PID = urm.shape[1]
MAX_UID = urm.shape[0]
recommender = SVDRecommender(K)
U, S, Vt = recommender.fit(urm)
# Compute recommendations for test users
uTest = [1,6,7,8,23]
uTest_recommended_items = recommender.recommend(uTest, urm, 10)
# Output recommended songs in a dataframe
recommendations = pd.DataFrame(columns=['user','song', 'score','rank'])
for user in uTest:
rank = 1
for song_index in uTest_recommended_items[user, 0:10]:
song = small_set.loc[small_set['so_index_value'] == song_index].iloc[0] # Get song details
recommendations = recommendations.append({'user': user, 'song': song['title'], 'score': song['fractional_play_count'], 'rank': rank}, ignore_index=True)
rank += 1
display(recommendations)
import numpy as np import pandas as pd def localmin(points, pixel_size): x_min = np.min(points[:, 0]) y_min = np.min(points[:, 1]) x_max = np.max(points[:, 0]) y_max = np.max(points[:, 1]) w = x_max - x_min h = y_max - y_min wn = w // pixel_size + 1 hn = h // pixel_size + 1 x_bins = np.array([i * pixel_size for i in range(int(wn + 1))]) y_bins = np.array([i * pixel_size for i in range(int(hn + 1))]) df = pd.DataFrame(points, columns=['x', 'y', 'z', 'classification', 'indices']) df['x_bin'] = pd.cut(df['x'], bins=x_bins) df['y_bin'] = pd.cut(df['y'], bins=y_bins) result = df.groupby(['x_bin', 'y_bin']).apply(lambda x: x.loc[x['z'].idxmin()])[['x', 'y', 'z', 'classification', 'indices']] return result a = np.random.random([100,3]) b = np.random.random([100, 1])//0.5 c = np.arange(1000).reshape([100,1]) a = np.concatenate([a,b,c],axis=-1) d = localmin(a,0.2) 如何从a中删除d中的点
可以使用pandas的isin()方法来判断a中的点是否出现在d中,然后使用~运算符将不包含在d中的点筛选出来。具体实现代码如下:
```python
import numpy as np
import pandas as pd
def localmin(points, pixel_size):
x_min = np.min(points[:, 0])
y_min = np.min(points[:, 1])
x_max = np.max(points[:, 0])
y_max = np.max(points[:, 1])
w = x_max - x_min
h = y_max - y_min
wn = w // pixel_size + 1
hn = h // pixel_size + 1
x_bins = np.array([i * pixel_size for i in range(int(wn + 1))])
y_bins = np.array([i * pixel_size for i in range(int(hn + 1))])
df = pd.DataFrame(points, columns=['x', 'y', 'z', 'classification', 'indices'])
df['x_bin'] = pd.cut(df['x'], bins=x_bins)
df['y_bin'] = pd.cut(df['y'], bins=y_bins)
result = df.groupby(['x_bin', 'y_bin']).apply(lambda x: x.loc[x['z'].idxmin()])[['x', 'y', 'z', 'classification', 'indices']]
return result
a = np.random.random([100,3])
b = np.random.random([100, 1])//0.5
c = np.arange(1000).reshape([100,1])
a = np.concatenate([a,b,c],axis=-1)
d = localmin(a,0.2)
# 筛选出不包含在d中的点并重新赋值给a
a = a[~a[:, :2].isin(d.iloc[:, :2].values).any(axis=1)]
```
其中,isin()方法用于判断一个DataFrame或Series对象中的值是否在另一个DataFrame或Series对象中出现过。在这里我们用isin()方法来判断a中的前两列(即x和y坐标)是否出现在d中,然后使用any()方法将结果沿着列方向合并,得到一个长度为100的布尔型数组,表示a中每个点是否在d中出现过。最后使用~运算符对数组进行取反,得到不包含在d中的点的布尔型数组,然后使用该数组对a进行筛选即可。