逻辑回归梯度下降代码实现
时间: 2023-11-21 20:56:02 浏览: 30
以下是逻辑回归梯度下降的Python代码实现:
```python
import numpy as np
def sigmoid(z):
"""
sigmoid函数
"""
return 1 / (1 + np.exp(-z))
def gradient_descent(X, y, theta, alpha, num_iters):
"""
梯度下降函数
"""
m = len(y)
for i in range(num_iters):
h = sigmoid(np.dot(X, theta))
theta = theta - (alpha / m) * np.dot(X.T, h - y)
return theta
# 测试
X = np.array([[1, 2, 3], [1, 3, 4], [1, 4, 5], [1, 5, 6]])
y = np.array([[0], [0], [1], [1]])
theta = np.zeros((3, 1))
alpha = 0.1
num_iters = 1000
theta = gradient_descent(X, y, theta, alpha, num_iters)
print(theta)
```
相关问题
逻辑回归梯度下降代码
逻辑回归是一种常用的分类算法,梯度下降是逻辑回归中常用的优化算法。下面是逻辑回归梯度下降的代码示例:
```python
import numpy as np
class LogisticRegression:
def __init__(self, learning_rate=0.01, num_iterations=1000):
self.learning_rate = learning_rate
self.num_iterations = num_iterations
self.weights = None
self.bias = None
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def initialize_parameters(self, n_features):
self.weights = np.zeros(n_features)
self.bias = 0
def fit(self, X, y):
n_samples, n_features = X.shape
self.initialize_parameters(n_features)
for _ in range(self.num_iterations):
linear_model = np.dot(X, self.weights) + self.bias
y_predicted = self.sigmoid(linear_model)
dw = (1 / n_samples) * np.dot(X.T, (y_predicted - y))
db = (1 / n_samples) * np.sum(y_predicted - y)
self.weights -= self.learning_rate * dw
self.bias -= self.learning_rate * db
def predict(self, X):
linear_model = np.dot(X, self.weights) + self.bias
y_predicted = self.sigmoid(linear_model)
y_predicted_cls = [1 if i > 0.5 else 0 for i in y_predicted]
return y_predicted_cls
```
上述代码实现了一个简单的逻辑回归类,其中包括了sigmoid函数、参数初始化、模型训练和预测等功能。你可以根据自己的需求进行调整和扩展。
逻辑回归梯度下降的的Python代码
以下是逻辑回归梯度下降的Python代码示例:
```python
import numpy as np
class LogisticRegression:
def __init__(self, learning_rate=0.01, num_iterations=100000, fit_intercept=True, verbose=False):
self.learning_rate = learning_rate
self.num_iterations = num_iterations
self.fit_intercept = fit_intercept
self.verbose = verbose
def __add_intercept(self, X):
intercept = np.ones((X.shape[0], 1))
return np.concatenate((intercept, X), axis=1)
def __sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def __loss(self, h, y):
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
def fit(self, X, y):
if self.fit_intercept:
X = self.__add_intercept(X)
# 初始化权重
self.theta = np.zeros(X.shape[1])
for i in range(self.num_iterations):
z = np.dot(X, self.theta)
h = self.__sigmoid(z)
gradient = np.dot(X.T, (h - y)) / y.size
self.theta -= self.learning_rate * gradient
if(self.verbose == True and i % 10000 == 0):
z = np.dot(X, self.theta)
h = self.__sigmoid(z)
print(f'loss: {self.__loss(h, y)} \t')
def predict_prob(self, X):
if self.fit_intercept:
X = self.__add_intercept(X)
return self.__sigmoid(np.dot(X, self.theta))
def predict(self, X, threshold):
return self.predict_prob(X) >= threshold
```
其中,`fit`函数用于拟合模型,`predict_prob`函数用于预测概率,`predict`函数用于根据设定的阈值进行预测。在使用时,需要先初始化一个`LogisticRegression`对象,然后调用其`fit`函数拟合模型,并根据需要调用`predict_prob`或`predict`函数进行预测。