bp神经网络python
时间: 2023-08-27 16:05:16 浏览: 61
Python是一个非常流行的编程语言,也可以用于实现BP神经网络。以下是一个简单的BP神经网络的Python实现示例:
```python
import numpy as np
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
# 初始化权重
self.weights1 = np.random.rand(self.input_size, self.hidden_size)
self.weights2 = np.random.rand(self.hidden_size, self.output_size)
# 初始化偏置
self.bias1 = np.zeros((1, self.hidden_size))
self.bias2 = np.zeros((1, self.output_size))
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
return x * (1 - x)
def forward(self, X):
# 前向传播
self.z1 = np.dot(X, self.weights1) + self.bias1
self.a1 = self.sigmoid(self.z1)
self.z2 = np.dot(self.a1, self.weights2) + self.bias2
y_hat = self.sigmoid(self.z2)
return y_hat
def backward(self, X, y, y_hat, learning_rate):
# 反向传播
delta2 = (y - y_hat) * self.sigmoid_derivative(y_hat)
d_weights2 = np.dot(self.a1.T, delta2)
d_bias2 = np.sum(delta2, axis=0, keepdims=True)
delta1 = np.dot(delta2, self.weights2.T) * self.sigmoid_derivative(self.a1)
d_weights1 = np.dot(X.T, delta1)
d_bias1 = np.sum(delta1, axis=0)
# 更新权重和偏置
self.weights1 += learning_rate * d_weights1
self.bias1 += learning_rate * d_bias1
self.weights2 += learning_rate * d_weights2
self.bias2 += learning_rate * d_bias2
def train(self, X, y, num_epochs, learning_rate):
for epoch in range(num_epochs):
# 前向传播
y_hat = self.forward(X)
# 反向传播
self.backward(X, y, y_hat, learning_rate)
# 计算损失
loss = np.mean(np.square(y - y_hat))
# 输出每个epoch的损失
if epoch % 1000 == 0:
print(f"Epoch {epoch}: Loss = {loss}")
if __name__ == '__main__':
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])
nn = NeuralNetwork(2, 3, 1)
nn.train(X, y, 10000, 0.1)
print("Predictions:")
for i in range(X.shape[0]):
print(f"{X[i]} -> {nn.forward(X[i])}")
```
这个示例实现了一个具有2个输入,3个隐藏神经元和1个输出的BP神经网络。使用XOR问题作为训练数据,通过指定10000个epochs和0.1的学习速率进行训练。最后,使用训练好的模型进行预测,并打印结果。
阅读全文