python 牛顿法实现逻辑回归(牛顿法实现逻辑回归(Logistic Regression))
本文采用的训练方法是牛顿法(Newton Method)。
代码代码
import numpy as np
class LogisticRegression(object):
"""
Logistic Regression Classifier training by Newton Method
"""
def __init__(self, error: float = 0.7, max_epoch: int = 100):
"""
:param error: float, if the distance between new weight and
old weight is less than error, the process
of traing will break.
:param max_epoch: if training epoch >= max_epoch the process
of traing will break.
"""
self.error = error
self.max_epoch = max_epoch
self.weight = None
self.sign = np.vectorize(lambda x: 1 if x >= 0.5 else 0)
def p_func(self, X_):
"""Get P(y=1 | x)
:param X_: shape = (n_samples + 1, n_features)
:return: shape = (n_samples)
"""
tmp = np.exp(self.weight @ X_.T)
return tmp / (1 + tmp)
def diff(self, X_, y, p):
"""Get derivative
:param X_: shape = (n_samples, n_features + 1)
:param y: shape = (n_samples)
:param p: shape = (n_samples) P(y=1 | x)
:return: shape = (n_features + 1) first derivative
"""
return -(y - p) @ X_
def hess_mat(self, X_, p):
"""Get Hessian Matrix
:param p: shape = (n_samples) P(y=1 | x)
:return: shape = (n_features + 1, n_features + 1) second derivative
"""
hess = np.zeros((X_.shape[1], X_.shape[1]))
for i in range(X_.shape[0]):
hess += self.X_XT[i] * p[i] * (1 - p[i])
return hess
def newton_method(self, X_, y):
"""Newton Method to calculate weight
:param X_: shape = (n_samples + 1, n_features)
:param y: shape = (n_samples)
:return: None
"""
self.weight = np.ones(X_.shape[1])
self.X_XT = [] for i in range(X_.shape[0]):
t = X_[i, :].reshape((-1, 1))
self.X_XT.append(t @ t.T)
for _ in range(self.max_epoch):
p = self.p_func(X_)
diff = self.diff(X_, y, p)
hess = self.hess_mat(X_, p)
评论0