逻辑回归python代码实现 迪哥
时间: 2023-11-03 21:59:00 浏览: 116
逻辑回归是一种机器学习算法,常用于二分类问题。下面是一个逻辑回归的Python代码实现的例子:
```python
import numpy as np
import pandas as pd
# Sigmoid函数,用于将预测结果转化为概率值
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# 损失函数,用于评估模型的准确性
def cost(theta, X, y):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply((1-y), np.log(1 - sigmoid(X * theta.T)))
return np.sum(first - second) / len(X)
# 梯度下降算法,用于最小化损失函数,得到最优参数
def gradientDescent(X, y, theta, alpha, iters):
temp = np.matrix(np.zeros(theta.shape))
parameters = int(theta.ravel().shape[1])
cost = np.zeros(iters)
for i in range(iters):
error = sigmoid(X * theta.T) - y
for j in range(parameters):
term = np.multiply(error, X[:,j])
temp[0,j] = theta[0,j] - (alpha / len(X)) * np.sum(term)
theta = temp
cost[i] = cost(theta, X, y)
return theta, cost
# 读取数据
data = pd.read_csv('data.csv')
# 添加一列全为1的特征列
data.insert(0, 'Ones', 1)
# 将数据转化为矩阵
cols = data.shape[1]
X = data.iloc[:,0:cols-1]
y = data.iloc[:,cols-1:cols]
X = np.matrix(X.values)
y = np.matrix(y.values)
theta = np.zeros([1,3])
# 设置学习率和迭代次数
alpha = 0.01
iters = 1000
# 执行梯度下降算法,得到最优参数
theta, cost = gradientDescent(X, y, theta, alpha, iters)
# 输出最优参数和损失函数值
print("最优参数:", theta)
print("损失函数值:", cost[-1])
```
阅读全文