封装好的bp算法python实现
时间: 2023-09-06 09:01:55 浏览: 212
bp算法python实现
BP算法,即反向传播算法,是一种常用的深度学习算法。下面是封装好的BP算法的Python实现。
```python
import numpy as np
class MLP:
def __init__(self, hidden_layers):
self.hidden_layers = hidden_layers
self.weights = []
self.biases = []
def add_layer(self, input_size, output_size):
weight = np.random.randn(output_size, input_size)
bias = np.random.randn(output_size, 1)
self.weights.append(weight)
self.biases.append(bias)
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
return self.sigmoid(x) * (1 - self.sigmoid(x))
def feedforward(self, X):
self.activations = [X]
self.sums = []
for i in range(len(self.weights) - 1):
sum_i = np.dot(self.weights[i], self.activations[i]) + self.biases[i]
self.sums.append(sum_i)
self.activations.append(self.sigmoid(sum_i))
sum_last = np.dot(self.weights[-1], self.activations[-1]) + self.biases[-1]
self.sums.append(sum_last)
self.activations.append(sum_last)
def backpropagation(self, y):
num_layers = len(self.weights)
delta = (self.activations[-1] - y) * self.sigmoid_derivative(self.sums[-1])
d_weights = [np.zeros(w.shape) for w in self.weights]
d_biases = [np.zeros(b.shape) for b in self.biases]
d_weights[-1] = np.dot(delta, self.activations[-2].T)
d_biases[-1] = delta
for l in range(2, num_layers + 1):
delta = np.dot(self.weights[-l+1].T, delta) * self.sigmoid_derivative(self.sums[-l])
d_weights[-l] = np.dot(delta, self.activations[-l-1].T)
d_biases[-l] = delta
return d_weights, d_biases
def update_weights(self, d_weights, d_biases, learning_rate):
self.weights = [w - learning_rate * dw for w, dw in zip(self.weights, d_weights)]
self.biases = [b - learning_rate * db for b, db in zip(self.biases, d_biases)]
def train(self, X, y, epochs, learning_rate):
for epoch in range(epochs):
self.feedforward(X)
d_weights, d_biases = self.backpropagation(y)
self.update_weights(d_weights, d_biases, learning_rate)
def predict(self, X):
self.feedforward(X)
return self.activations[-1]
```
该封装好的BP算法使用多层神经网络(MLP)进行模型的训练和预测。通过add_layer方法可以添加隐藏层,并且可以通过train方法进行模型的训练,使用predict方法进行预测。
封装的BP算法使用sigmoid作为激活函数,并且使用梯度下降法进行参数的更新。在训练过程中,首先进行前向传播计算每一层的输出,然后进行反向传播计算梯度,最后通过更新权重和偏置来更新模型的参数。
这样的封装可以方便地使用BP算法进行深度学习任务的实现,使得代码更加简洁和可读,同时也方便进行调参和扩展。
阅读全文