bp神经网络python
时间: 2023-08-26 20:04:59 浏览: 64
实现bp神经网络的Python代码如下:
```python
import numpy as np
class NeuralNetwork:
def __init__(self, layers):
self.layers = layers
self.weights = []
self.biases = []
for i in range(1, len(layers)):
self.weights.append(np.random.randn(layers[i-1], layers[i]))
self.biases.append(np.random.randn(layers[i]))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def sigmoid_prime(self, z):
return self.sigmoid(z) * (1 - self.sigmoid(z))
def feedforward(self, a):
for w, b in zip(self.weights, self.biases):
a = self.sigmoid(np.dot(a, w) + b)
return a
def backpropagation(self, x, y):
nabla_w = [np.zeros(w.shape) for w in self.weights]
nabla_b = [np.zeros(b.shape) for b in self.biases]
# Feedforward
activation = x
activations = [x]
zs = []
for w, b in zip(self.weights, self.biases):
z = np.dot(activation, w) + b
zs.append(z)
activation = self.sigmoid(z)
activations.append(activation)
# Backward pass
delta = (activations[-1] - y) * self.sigmoid_prime(zs[-1])
nabla_w[-1] = np.dot(activations[-2].T, delta)
nabla_b[-1] = delta
for l in range(2, len(self.layers)):
z = zs[-l]
sp = self.sigmoid_prime(z)
delta = np.dot(delta, self.weights[-l+1].T) * sp
nabla_w[-l] = np.dot(activations[-l-1].T, delta)
nabla_b[-l] = delta
return nabla_w, nabla_b
def update_weights(self, nabla_w, nabla_b, lr):
for i in range(len(self.weights)):
self.weights[i] -= lr * nabla_w[i]
self.biases[i] -= lr * nabla_b[i]
def train(self, x_train, y_train, epochs, lr):
for i in range(epochs):
for x, y in zip(x_train, y_train):
nabla_w, nabla_b = self.backpropagation(x, y)
self.update_weights(nabla_w, nabla_b, lr)
def predict(self, x):
return self.feedforward(x)
```
这段代码实现了一个具有任意层数的神经网络,其中 `layers` 是一个列表,包含网络的各个层的神经元数量。例如,如果 `layers=[2, 3, 1]`,则表示一个具有 2 个输入神经元、1 个输出神经元和 1 个隐藏层(包含 3 个神经元)的神经网络。
代码实现了 `sigmoid` 和 `sigmoid_prime` 函数,分别计算 Sigmoid 函数和其导数。`feedforward` 函数对网络进行前向传播,`backpropagation` 函数对网络进行反向传播,`update_weights` 函数更新网络的权重和偏置,`train` 函数用于训练网络,`predict` 函数用于进行预测。