for j in range(3,40): set_k = j data_x = data.iloc[set_k:-1, 1:] # 滞后一天的股票数据 data_x.index = range(data_x.shape[0]) # 重置索引 new_columns = ['volume_1', 'open_1', 'high_1', 'low_1', 'close_1', 'chg_1', 'percent_1', 'turnoverrate_1','amount_1','sentiment_score_avg_1'] data_x.columns = new_columns # 重置列名 for i in range(2, set_k): data_x_i = data.iloc[set_k + 1 - i:-i, 1:] data_x_i.index = range(data_x_i.shape[0]) # 重置索引 new_columns = ['volume_{}'.format(i), 'open_{}'.format(i), 'high_{}'.format(i), 'low_{}'.format(i), 'close_{}'.format(i), 'chg_{}'.format(i), 'percent_{}'.format(i), 'turnoverrate_{}'.format(i), 'amount_{}'.format(i), 'sentiment_score_avg_{}'.format(i)] data_x_i.columns = new_columns data_x = pd.concat([data_x, data_x_i], axis=1)
时间: 2024-03-07 20:51:29 浏览: 48
这段代码是一个双重循环,目的是为了构建用于股票预测的特征矩阵。其中第一重循环是从3到40,即设置了滞后期的范围。第二重循环是从2到set_k,即遍历所有的滞后期,构建每个滞后期对应的特征矩阵。具体来说,对于每一个滞后期i,都会取出data中第(set_k+1-i)行至第(set_k-i)行之间的数据,作为特征矩阵的一部分,并为这部分数据重新设置列名,命名规则为'volume_i', 'open_i', 'high_i', 'low_i', 'close_i', 'chg_i', 'percent_i', 'turnoverrate_i', 'amount_i', 'sentiment_score_avg_i'。然后将这部分数据与之前已经构建好的特征矩阵进行列方向上的拼接,最终得到完整的特征矩阵data_x。
相关问题
def data_processing(data): # 日期缺失,补充 data.fillna(method='ffill', inplace=True) date_history = pd.DataFrame(data.iloc[:, 0]) data_history = pd.DataFrame(data.iloc[:, 1]) date_history = np.array(date_history) data_history = [x for item in np.array(data_history).tolist() for x in item] # 缺失值处理 history_time_list = [] for date in date_history: date_obj = datetime.datetime.strptime(date[0], '%Y/%m/%d %H:%M') #将字符串转为 datetime 对象 history_time_list.append(date_obj) start_time = history_time_list[0] # 起始时间 end_time = history_time_list[-1] # 结束时间 delta = datetime.timedelta(minutes=15) #时间间隔为15分钟 time_new_list = [] current_time = start_time while current_time <= end_time: time_new_list.append(current_time) current_time += delta # 缺失位置记录 code_list = [] for i in range(len(time_new_list)): code_list = code_list history_time_list = history_time_list while (time_new_list[i] - history_time_list[i]) != datetime.timedelta(minutes=0): history_time_list.insert(i, time_new_list[i]) code_list.append(i) for i in code_list: data_history.insert(i, data_history[i - 1]) # 输出补充好之后的数据 data = pd.DataFrame({'date': time_new_list, 'load': data_history}) return data 代码优化
1. 可以将 `date_history` 和 `data_history` 的创建合并成一行:
```
date_history, data_history = np.array(data.iloc[:, 0]), [x for item in np.array(data.iloc[:, 1]).tolist() for x in item]
```
2. 可以在遍历 `date_history` 时,直接将字符串转为 datetime 对象,并添加到 `history_time_list` 中:
```
history_time_list = [datetime.datetime.strptime(date[0], '%Y/%m/%d %H:%M') for date in date_history]
```
3. 在记录缺失位置时,可以用 `zip()` 函数将 `time_new_list` 和 `history_time_list` 同时遍历,这样会更加简洁:
```
code_list = []
for new_time, history_time in zip(time_new_list, history_time_list):
while (new_time - history_time) != datetime.timedelta(minutes=0):
history_time_list.insert(i, new_time)
code_list.append(i)
```
4. 可以使用 `pandas` 的 `interpolate()` 方法来进行缺失值插值,这样可以省去很多代码:
```
data = data.set_index('date').resample('15T').interpolate().reset_index()
```
综上所述,优化后的代码如下:
```
def data_processing(data):
data.fillna(method='ffill', inplace=True)
date_history, data_history = np.array(data.iloc[:, 0]), [x for item in np.array(data.iloc[:, 1]).tolist() for x in item]
history_time_list = [datetime.datetime.strptime(date[0], '%Y/%m/%d %H:%M') for date in date_history]
start_time, end_time, delta = history_time_list[0], history_time_list[-1], datetime.timedelta(minutes=15)
time_new_list = [start_time + i * delta for i in range(int((end_time - start_time) / delta.total_seconds() / 60) + 1)]
data = pd.DataFrame({'date': time_new_list, 'load': data_history})
data = data.set_index('date').resample('15T').interpolate().reset_index()
return data
```
帮我为下面的代码加上注释:class SimpleDeepForest: def __init__(self, n_layers): self.n_layers = n_layers self.forest_layers = [] def fit(self, X, y): X_train = X for _ in range(self.n_layers): clf = RandomForestClassifier() clf.fit(X_train, y) self.forest_layers.append(clf) X_train = np.concatenate((X_train, clf.predict_proba(X_train)), axis=1) return self def predict(self, X): X_test = X for i in range(self.n_layers): X_test = np.concatenate((X_test, self.forest_layers[i].predict_proba(X_test)), axis=1) return self.forest_layers[-1].predict(X_test[:, :-2]) # 1. 提取序列特征(如:GC-content、序列长度等) def extract_features(fasta_file): features = [] for record in SeqIO.parse(fasta_file, "fasta"): seq = record.seq gc_content = (seq.count("G") + seq.count("C")) / len(seq) seq_len = len(seq) features.append([gc_content, seq_len]) return np.array(features) # 2. 读取相互作用数据并创建数据集 def create_dataset(rna_features, protein_features, label_file): labels = pd.read_csv(label_file, index_col=0) X = [] y = [] for i in range(labels.shape[0]): for j in range(labels.shape[1]): X.append(np.concatenate([rna_features[i], protein_features[j]])) y.append(labels.iloc[i, j]) return np.array(X), np.array(y) # 3. 调用SimpleDeepForest分类器 def optimize_deepforest(X, y): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = SimpleDeepForest(n_layers=3) model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred)) # 4. 主函数 def main(): rna_fasta = "RNA.fasta" protein_fasta = "pro.fasta" label_file = "label.csv" rna_features = extract_features(rna_fasta) protein_features = extract_features(protein_fasta) X, y = create_dataset(rna_features, protein_features, label_file) optimize_deepforest(X, y) if __name__ == "__main__": main()
# Define a class named 'SimpleDeepForest'
class SimpleDeepForest:
# Initialize the class with 'n_layers' parameter
def __init__(self, n_layers):
self.n_layers = n_layers
self.forest_layers = []
# Define a method named 'fit' to fit the dataset into the classifier
def fit(self, X, y):
X_train = X
# Use the forest classifier to fit the dataset for 'n_layers' times
for _ in range(self.n_layers):
clf = RandomForestClassifier()
clf.fit(X_train, y)
# Append the classifier to the list of forest layers
self.forest_layers.append(clf)
# Concatenate the training data with the predicted probability of the last layer
X_train = np.concatenate((X_train, clf.predict_proba(X_train)), axis=1)
# Return the classifier
return self
# Define a method named 'predict' to make predictions on the test set
def predict(self, X):
X_test = X
# Concatenate the test data with the predicted probability of each layer
for i in range(self.n_layers):
X_test = np.concatenate((X_test, self.forest_layers[i].predict_proba(X_test)), axis=1)
# Return the predictions of the last layer
return self.forest_layers[-1].predict(X_test[:, :-2])
# Define a function named 'extract_features' to extract sequence features
def extract_features(fasta_file):
features = []
# Parse the fasta file to extract sequence features
for record in SeqIO.parse(fasta_file, "fasta"):
seq = record.seq
gc_content = (seq.count("G") + seq.count("C")) / len(seq)
seq_len = len(seq)
features.append([gc_content, seq_len])
# Return the array of features
return np.array(features)
# Define a function named 'create_dataset' to create the dataset
def create_dataset(rna_features, protein_features, label_file):
labels = pd.read_csv(label_file, index_col=0)
X = []
y = []
# Create the dataset by concatenating the RNA and protein features
for i in range(labels.shape[0]):
for j in range(labels.shape[1]):
X.append(np.concatenate([rna_features[i], protein_features[j]]))
y.append(labels.iloc[i, j])
# Return the array of features and the array of labels
return np.array(X), np.array(y)
# Define a function named 'optimize_deepforest' to optimize the deep forest classifier
def optimize_deepforest(X, y):
# Split the dataset into training set and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Create an instance of the SimpleDeepForest classifier with 3 layers
model = SimpleDeepForest(n_layers=3)
# Fit the training set into the classifier
model.fit(X_train, y_train)
# Make predictions on the testing set
y_pred = model.predict(X_test)
# Print the classification report
print(classification_report(y_test, y_pred))
# Define the main function to run the program
def main():
rna_fasta = "RNA.fasta"
protein_fasta = "pro.fasta"
label_file = "label.csv"
# Extract the RNA and protein features
rna_features = extract_features(rna_fasta)
protein_features = extract_features(protein_fasta)
# Create the dataset
X, y = create_dataset(rna_features, protein_features, label_file)
# Optimize the DeepForest classifier
optimize_deepforest(X, y)
# Check if the program is being run as the main program
if __name__ == "__main__":
main()
阅读全文