borderline-smote 源码
时间: 2023-12-22 13:05:51 浏览: 167
基于Attention-GRU与改进Borderline-SMOTE的SHDoS攻击检测研究
以下是Python版的Borderline-SMOTE算法源码实现,其中包含注释以帮助理解:
```python
"""
Borderline-SMOTE算法的Python实现
"""
import numpy as np
from sklearn.neighbors import NearestNeighbors
def borderline_smote(X, y, k_neighbors=5, sampling_strategy='auto', random_state=None):
"""
Borderline-SMOTE算法实现函数
:param X: 样本特征矩阵,shape=(n_samples, n_features)
:param y: 样本标签向量,shape=(n_samples,)
:param k_neighbors: 用于KNN计算的近邻个数
:param sampling_strategy: 采样策略,'auto'表示自适应,'minority'表示只对少数类进行过采样
:param random_state: 随机种子
:return: 过采样后的样本特征矩阵和标签向量
"""
# 随机数生成器
rng = np.random.RandomState(random_state)
# 获取所有的类别
classes = np.unique(y)
# 如果采样策略为'auto',则自动选择需要过采样的类别
if sampling_strategy == 'auto':
# 计算每个类别的样本数量
class_count = np.bincount(y)
# 将数量最少的类别设为需要过采样的类别
sampling_strategy = {cls: max(class_count) for cls in classes}
# 存储生成的合成样本
synthetic_samples = []
# 遍历所有需要过采样的类别
for class_idx, n_samples in sampling_strategy.items():
# 如果需要过采样的样本数量为0,则跳过当前类别
if n_samples == 0:
continue
# 获取当前类别的所有样本的下标
mask = y == class_idx
class_samples = X[mask]
# 计算每个样本的k_neighbors个近邻
knn = NearestNeighbors(n_neighbors=k_neighbors + 1).fit(class_samples)
knn_distances, knn_indices = knn.kneighbors(class_samples)
# 存储每个样本是否为边界样本的标志
is_border = np.zeros(len(class_samples), dtype=bool)
for i in range(len(class_samples)):
# 获取第i个样本的k_neighbors个近邻的标签
nn_labels = y[mask][knn_indices[i, 1:]]
# 计算第i个样本的k近邻中属于不同类别的样本数量
n_neighbors_diff_class = np.sum(nn_labels != class_idx)
# 如果第i个样本是边界样本,则标记为True
if n_neighbors_diff_class > 0 and n_neighbors_diff_class < k_neighbors:
is_border[i] = True
# 计算需要生成的合成样本数量
n_synthetic_samples = n_samples - len(class_samples)
# 如果需要生成的合成样本数量大于0,则进行过采样
if n_synthetic_samples > 0:
# 遍历所有边界样本
for i in np.where(is_border)[0]:
# 获取第i个样本的k_neighbors个近邻的标签
nn_labels = y[mask][knn_indices[i, 1:]]
# 计算第i个样本的k近邻中属于不同类别的样本数量
n_neighbors_diff_class = np.sum(nn_labels != class_idx)
# 计算生成的合成样本应该沿着第几个近邻样本的方向生成
nn_index = rng.choice(np.where(nn_labels != class_idx)[0])
# 计算插值权重
alpha = rng.uniform(0, 1)
# 生成合成样本
synthetic_sample = class_samples[i] + alpha * (class_samples[nn_index] - class_samples[i])
synthetic_samples.append(synthetic_sample)
# 将生成的合成样本加入原始样本集中
X = np.concatenate((X, np.array(synthetic_samples)), axis=0)
y = np.concatenate((y, np.full(n_synthetic_samples, class_idx)), axis=0)
return X, y
```
使用示例:
```python
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from borderline_smote import borderline_smote
# 生成分类数据集
X, y = make_classification(n_samples=1000, n_features=10, n_informative=5, n_redundant=0, n_classes=2, random_state=42)
# 将原始数据集分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 对训练集进行Borderline-SMOTE过采样
X_train_resampled, y_train_resampled = borderline_smote(X_train, y_train, k_neighbors=5, sampling_strategy='auto', random_state=42)
# 训练逻辑回归模型并预测测试集
lr = LogisticRegression(random_state=42)
lr.fit(X_train_resampled, y_train_resampled)
y_pred = lr.predict(X_test)
# 计算准确率
acc = accuracy_score(y_test, y_pred)
print(f"Accuracy: {acc:.4f}")
```
阅读全文