利用python完成BP神经网络设计与实现,要求三个输入(两维数据+1个偏置)、两个 隐藏层(分别为20、10神经元),激活函数使用tanh
时间: 2024-05-03 18:17:15 浏览: 141
好的,以下是一个基本的BP神经网络的代码实现,满足你的要求:
```python
import numpy as np
# sigmoid函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# sigmoid函数的导数
def sigmoid_derivative(x):
return x * (1 - x)
# tanh函数
def tanh(x):
return np.tanh(x)
# tanh函数的导数
def tanh_derivative(x):
return 1 - np.tanh(x) ** 2
class NeuralNetwork:
def __init__(self, num_inputs, hidden1_size, hidden2_size, num_outputs):
# 初始化权重矩阵,使用高斯分布随机初始化
self.weights1 = np.random.randn(num_inputs, hidden1_size)
self.weights2 = np.random.randn(hidden1_size, hidden2_size)
self.weights3 = np.random.randn(hidden2_size, num_outputs)
# 初始化偏置项
self.bias1 = np.zeros((1, hidden1_size))
self.bias2 = np.zeros((1, hidden2_size))
self.bias3 = np.zeros((1, num_outputs))
def feedforward(self, X):
# 输入层
self.layer1 = tanh(np.dot(X, self.weights1) + self.bias1)
# 第一个隐藏层
self.layer2 = tanh(np.dot(self.layer1, self.weights2) + self.bias2)
# 第二个隐藏层
self.layer3 = tanh(np.dot(self.layer2, self.weights3) + self.bias3)
return self.layer3
def backpropagation(self, X, y, learning_rate):
# 计算输出误差
output_error = y - self.layer3
output_delta = output_error * tanh_derivative(self.layer3)
# 计算第二个隐藏层误差
layer2_error = np.dot(output_delta, self.weights3.T)
layer2_delta = layer2_error * tanh_derivative(self.layer2)
# 计算第一个隐藏层误差
layer1_error = np.dot(layer2_delta, self.weights2.T)
layer1_delta = layer1_error * tanh_derivative(self.layer1)
# 更新权重矩阵和偏置项
self.weights3 += learning_rate * np.dot(self.layer2.T, output_delta)
self.weights2 += learning_rate * np.dot(self.layer1.T, layer2_delta)
self.weights1 += learning_rate * np.dot(X.T, layer1_delta)
self.bias3 += learning_rate * np.sum(output_delta, axis=0, keepdims=True)
self.bias2 += learning_rate * np.sum(layer2_delta, axis=0, keepdims=True)
self.bias1 += learning_rate * np.sum(layer1_delta, axis=0, keepdims=True)
def train(self, X, y, learning_rate, epochs):
for i in range(epochs):
output = self.feedforward(X)
self.backpropagation(X, y, learning_rate)
def predict(self, X):
return self.feedforward(X)
```
这里我们使用了tanh作为激活函数,并且自定义了一个神经网络类NeuralNetwork,其中包括初始化权重矩阵、前向传播、反向传播和训练等方法。在训练过程中,我们使用了随机梯度下降法来更新权重矩阵和偏置项。
阅读全文