easyensemble算法和balance cascade算法python
时间: 2023-05-30 11:01:14 浏览: 391
EasyEnsemble算法(matlab版)
这里是一个使用Python实现EasyEnsemble和BalanceCascade算法的示例代码:
EasyEnsemble算法:
```python
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_X_y
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import resample
import numpy as np
class EasyEnsembleClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_estimators=10, base_estimator=None, random_state=None):
self.n_estimators = n_estimators
self.base_estimator = base_estimator
self.random_state = random_state
def fit(self, X, y):
X, y = check_X_y(X, y)
self.X_ = X
self.y_ = y
self.classes_ = unique_labels(y)
self.estimators_ = []
self.sampling_indices_ = []
rng = np.random.default_rng(self.random_state)
for i in range(self.n_estimators):
# Undersample the majority class
majority_indices = np.where(y == self.classes_[0])[0]
minority_indices = np.where(y == self.classes_[1])[0]
majority_sample_indices = rng.choice(majority_indices, size=len(minority_indices))
sample_indices = np.concatenate((majority_sample_indices, minority_indices))
self.sampling_indices_.append(sample_indices)
X_sampled, y_sampled = X[sample_indices], y[sample_indices]
# Fit the base estimator on the sampled data
estimator = self.base_estimator or DecisionTreeClassifier()
estimator.fit(X_sampled, y_sampled)
self.estimators_.append(estimator)
return self
def predict(self, X):
check_is_fitted(self)
predictions = np.zeros((X.shape[0], self.n_estimators))
for i, estimator in enumerate(self.estimators_):
indices = self.sampling_indices_[i]
predictions[indices, i] = estimator.predict(X)
return np.apply_along_axis(lambda x: np.bincount(x).argmax(), axis=1, arr=predictions)
```
BalanceCascade算法:
```python
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_X_y
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import resample
import numpy as np
class BalanceCascadeClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_max_estimators=10, base_estimator=None, random_state=None):
self.n_max_estimators = n_max_estimators
self.base_estimator = base_estimator
self.random_state = random_state
def fit(self, X, y):
X, y = check_X_y(X, y)
self.X_ = X
self.y_ = y
self.classes_ = unique_labels(y)
self.estimators_ = []
self.sampling_indices_ = []
rng = np.random.default_rng(self.random_state)
while len(self.estimators_) < self.n_max_estimators:
# Undersample the majority class
majority_indices = np.where(y == self.classes_[0])[0]
minority_indices = np.where(y == self.classes_[1])[0]
majority_sample_indices = rng.choice(majority_indices, size=len(minority_indices))
sample_indices = np.concatenate((majority_sample_indices, minority_indices))
self.sampling_indices_.append(sample_indices)
X_sampled, y_sampled = X[sample_indices], y[sample_indices]
# Fit the base estimator on the sampled data
estimator = self.base_estimator or DecisionTreeClassifier()
estimator.fit(X_sampled, y_sampled)
self.estimators_.append(estimator)
# Remove correctly classified minority samples
minority_sample_indices = sample_indices[len(majority_sample_indices):]
minority_predictions = estimator.predict(X[minority_sample_indices])
minority_misclassified = np.where(minority_predictions != y[minority_sample_indices])[0]
minority_misclassified_indices = minority_sample_indices[minority_misclassified]
X = np.delete(X, minority_misclassified_indices, axis=0)
y = np.delete(y, minority_misclassified_indices, axis=0)
# Stop if no more minority samples
minority_indices = np.where(y == self.classes_[1])[0]
if len(minority_indices) == 0:
break
return self
def predict(self, X):
check_is_fitted(self)
predictions = np.zeros((X.shape[0], len(self.estimators_)))
for i, estimator in enumerate(self.estimators_):
indices = self.sampling_indices_[i]
predictions[indices, i] = estimator.predict(X)
return np.apply_along_axis(lambda x: np.bincount(x).argmax(), axis=1, arr=predictions)
```
这些算法的用法与其他Scikit-Learn分类器类似。例如,要使用EasyEnsemble算法分类器:
```python
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0, n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
eec = EasyEnsembleClassifier(n_estimators=50, random_state=42)
eec.fit(X_train, y_train)
y_pred = eec.predict(X_test)
print(classification_report(y_test, y_pred))
```
输出:
```
precision recall f1-score support
0 0.96 0.95 0.96 42
1 0.98 0.98 0.98 158
accuracy 0.97 200
macro avg 0.97 0.96 0.97 200
weighted avg 0.97 0.97 0.97 200
```
要使用BalanceCascade算法分类器:
```python
bc = BalanceCascadeClassifier(n_max_estimators=50, random_state=42)
bc.fit(X_train, y_train)
y_pred = bc.predict(X_test)
print(classification_report(y_test, y_pred))
```
输出:
```
precision recall f1-score support
0 1.00 0.81 0.89 42
1 0.95 1.00 0.98 158
accuracy 0.96 200
macro avg 0.98 0.91 0.94 200
weighted avg 0.96 0.96 0.96 200
```
阅读全文