scala语言,用 Newton-Raphson算法实现逻辑回归
时间: 2024-05-14 18:14:14 浏览: 98
逻辑回归R语言算法
3星 · 编辑精心推荐
以下是使用Scala语言实现逻辑回归的Newton-Raphson算法的示例代码:
```
import breeze.linalg.{DenseMatrix, DenseVector}
import breeze.numerics.{exp, log}
import scala.annotation.tailrec
object LogisticRegression {
/**
* Compute the sigmoid function
*
* @param z input value
* @return sigmoid value
*/
def sigmoid(z: Double): Double = {
1.0 / (1.0 + exp(-z))
}
/**
* Compute the gradient of the log-likelihood function
*
* @param X design matrix
* @param y target variable
* @param weights current weights
* @return gradient vector
*/
def gradient(X: DenseMatrix[Double], y: DenseVector[Double], weights: DenseVector[Double]): DenseVector[Double] = {
val activation = sigmoid(X * weights)
X.t * (activation - y)
}
/**
* Compute the Hessian matrix of the log-likelihood function
*
* @param X design matrix
* @param weights current weights
* @return Hessian matrix
*/
def hessian(X: DenseMatrix[Double], weights: DenseVector[Double]): DenseMatrix[Double] = {
val activation = sigmoid(X * weights)
val diagonal = activation *:* (1.0 - activation)
X.t * (X(::, *) * diagonal)
}
/**
* Compute the log-likelihood function
*
* @param X design matrix
* @param y target variable
* @param weights current weights
* @return log-likelihood value
*/
def logLikelihood(X: DenseMatrix[Double], y: DenseVector[Double], weights: DenseVector[Double]): Double = {
val activation = sigmoid(X * weights)
val epsilon = 1e-16
val clippedActivation = activation.map(a => math.max(a, epsilon)).map(a => math.min(a, 1.0 - epsilon))
val logActivation = log(clippedActivation)
val logOneMinusActivation = log(1.0 - clippedActivation)
val logLikelihood = y.t * logActivation + (1.0 - y).t * logOneMinusActivation
-logLikelihood
}
/**
* Train a logistic regression model using Newton-Raphson algorithm
*
* @param X design matrix
* @param y target variable
* @param maxIterations maximum number of iterations
* @param tolerance convergence tolerance
* @return weights vector
*/
def train(X: DenseMatrix[Double], y: DenseVector[Double], maxIterations: Int = 100, tolerance: Double = 1e-6): DenseVector[Double] = {
val numFeatures = X.cols
val weights = DenseVector.zeros[Double](numFeatures)
@tailrec
def loop(iteration: Int): DenseVector[Double] = {
val grad = gradient(X, y, weights)
val hess = hessian(X, weights)
val delta = hess \ grad
weights -= delta
val llh = logLikelihood(X, y, weights)
val improvement = llh - logLikelihood(X, y, weights + delta)
if (iteration >= maxIterations || improvement < tolerance) {
weights
} else {
loop(iteration + 1)
}
}
loop(0)
}
}
```
该示例代码定义了sigmoid函数、梯度函数、Hessian矩阵函数、对数似然函数和训练函数。在训练函数中,使用了尾递归进行迭代,直到满足最大迭代次数或收敛容差的条件为止。最终,训练函数返回权重向量作为模型的输出。
阅读全文