感知器的三种算法用python呈现出来
时间: 2024-03-09 08:06:09 浏览: 66
Python实现感知器算法
4星 · 用户满意度95%
感知器是一种二元线性分类器,可以使用以下三种算法来训练和优化模型:
1. 随机梯度下降算法(Stochastic Gradient Descent,SGD)
```python
import numpy as np
class PerceptronSGD(object):
def __init__(self, learning_rate=0.01, n_epochs=100):
self.learning_rate = learning_rate
self.n_epochs = n_epochs
def fit(self, X, y):
self.weights = np.zeros(1 + X.shape[1])
self.errors = []
for epoch in range(self.n_epochs):
error = 0
for x_i, y_i in zip(X, y):
update = self.learning_rate * (y_i - self.predict(x_i))
self.weights[1:] += update * x_i
self.weights[0] += update
error += int(update != 0.0)
self.errors.append(error)
return self
def net_input(self, X):
return np.dot(X, self.weights[1:]) + self.weights[0]
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
```
2. 自适应学习率算法(Adaptive Learning Rate,Adaline)
```python
import numpy as np
class Adaline(object):
def __init__(self, learning_rate=0.01, n_epochs=50):
self.learning_rate = learning_rate
self.n_epochs = n_epochs
def fit(self, X, y):
self.weights = np.zeros(1 + X.shape[1])
self.costs = []
for epoch in range(self.n_epochs):
net_input = self.net_input(X)
errors = y - net_input
self.weights[1:] += self.learning_rate * X.T.dot(errors)
self.weights[0] += self.learning_rate * errors.sum()
cost = (errors ** 2).sum() / 2.0
self.costs.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.weights[1:]) + self.weights[0]
def activation(self, X):
return self.net_input(X)
def predict(self, X):
return np.where(self.activation(X) >= 0.0, 1, -1)
```
3. 正则化算法(Regularized Adaline,RAdaline)
```python
import numpy as np
class RAdaline(object):
def __init__(self, learning_rate=0.01, n_epochs=50, lambda_reg=0.01):
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.lambda_reg = lambda_reg
def fit(self, X, y):
self.weights = np.zeros(1 + X.shape[1])
self.costs = []
for epoch in range(self.n_epochs):
net_input = self.net_input(X)
errors = y - net_input
self.weights[1:] += self.learning_rate * (X.T.dot(errors) - self.lambda_reg * self.weights[1:])
self.weights[0] += self.learning_rate * errors.sum()
cost = (errors ** 2).sum() / 2.0 + self.lambda_reg * np.sum(self.weights[1:] ** 2)
self.costs.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.weights[1:]) + self.weights[0]
def activation(self, X):
return self.net_input(X)
def predict(self, X):
return np.where(self.activation(X) >= 0.0, 1, -1)
```
阅读全文