感知器的三种算法用Python实现
时间: 2024-03-24 17:40:16 浏览: 110
感知机算法Python实现
感知器的三种算法是原始形式、对偶形式和对称形式。以下是使用Python实现这三种算法的示例代码:
1. 原始形式
```python
import numpy as np
class Perceptron():
def __init__(self, learning_rate=0.01, n_iterations=100):
self.learning_rate = learning_rate
self.n_iterations = n_iterations
def fit(self, X, y):
n_samples, n_features = X.shape
# Initializing weights and bias
self.weights = np.zeros(n_features)
self.bias = 0
for _ in range(self.n_iterations):
for idx, sample in enumerate(X):
linear_output = np.dot(sample, self.weights) + self.bias
predicted_output = np.where(linear_output >= 0, 1, -1)
# Updating weights and bias
update = self.learning_rate * (y[idx] - predicted_output)
self.weights += update * sample
self.bias += update
def predict(self, X):
linear_output = np.dot(X, self.weights) + self.bias
predicted_output = np.where(linear_output >= 0, 1, -1)
return predicted_output
```
2. 对偶形式
```python
import numpy as np
class DualPerceptron():
def __init__(self, learning_rate=0.01, n_iterations=100):
self.learning_rate = learning_rate
self.n_iterations = n_iterations
def fit(self, X, y):
n_samples, n_features = X.shape
# Initializing alpha, bias and Gram matrix
self.alpha = np.zeros(n_samples)
self.bias = 0
gram_matrix = np.dot(X, X.T)
for _ in range(self.n_iterations):
for idx in range(n_samples):
linear_output = np.sum(self.alpha * y * gram_matrix[:, idx]) + self.bias
predicted_output = np.where(linear_output >= 0, 1, -1)
# Updating alpha and bias
update = self.learning_rate * (y[idx] - predicted_output)
self.alpha[idx] += update
self.bias += update * y[idx]
def predict(self, X):
gram_matrix = np.dot(X, X.T)
linear_output = np.sum(self.alpha * self.y_train * gram_matrix, axis=1) + self.bias
predicted_output = np.where(linear_output >= 0, 1, -1)
return predicted_output
```
3. 对称形式
```python
import numpy as np
class SymmetricPerceptron():
def __init__(self, learning_rate=0.01, n_iterations=100):
self.learning_rate = learning_rate
self.n_iterations = n_iterations
def fit(self, X, y):
n_samples, n_features = X.shape
# Initializing weights, bias and v
self.weights = np.zeros(n_features)
self.bias = 0
self.v = np.zeros(n_features)
for _ in range(self.n_iterations):
for idx, sample in enumerate(X):
linear_output = np.dot(sample, self.weights) + self.bias
predicted_output = np.where(linear_output >= 0, 1, -1)
# Updating weights, bias and v
if y[idx] * predicted_output <= 0:
self.weights += self.learning_rate * (y[idx] - predicted_output) * sample + self.v
self.bias += self.learning_rate * (y[idx] - predicted_output)
self.v = self.learning_rate * (y[idx] - predicted_output) * sample
def predict(self, X):
linear_output = np.dot(X, self.weights) + self.bias
predicted_output = np.where(linear_output >= 0, 1, -1)
return predicted_output
```
阅读全文