使用python代码编写BP神经网络预测实例代码,加上注释
时间: 2024-08-22 11:00:25 浏览: 47
当然可以。以下是使用Python和`numpy`库编写简单BP(Backpropagation)神经网络进行线性回归预测的一个示例代码,包括了初始化权重、前向传播、反向传播以及训练过程。这里我们假设有一个简单的数据集,目标是预测y基于x。
```python
import numpy as np
# 定义sigmoid激活函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# 初始化权重函数(随机值)
def initialize_weights(input_nodes, hidden_nodes, output_nodes):
weights_ih = np.random.uniform(size=(hidden_nodes, input_nodes)) - 0.5
weights_ho = np.random.uniform(size=(output_nodes, hidden_nodes)) - 0.5
return weights_ih, weights_ho
# 前向传播函数
def forward_propagation(weights_ih, weights_ho, inputs):
hidden_layer = sigmoid(np.dot(weights_ih, inputs))
outputs = sigmoid(np.dot(weights_ho, hidden_layer))
return hidden_layer, outputs
# 反向传播函数
def back_propagation(inputs, targets, outputs, hidden_layer, weights_ih, weights_ho):
d_weights_ho = np.dot(outputs - targets, hidden_layer.T)
d_weights_ih = np.dot(hidden_layer.T, np.dot(weights_ho.T, (outputs - targets)))
return d_weights_ih, d_weights_ho
# 神经网络训练
def train_network(data, targets, epochs, learning_rate, hidden_nodes):
# 初始化权重
weights_ih, weights_ho = initialize_weights(len(data[0]), hidden_nodes, 1)
for epoch in range(epochs):
# 前向传播
hidden_layer, outputs = forward_propagation(weights_ih, weights_ho, data)
# 计算误差
error = outputs - targets
# 反向传播更新权重
d_weights_ih, d_weights_ho = back_propagation(data, targets, outputs, hidden_layer, weights_ih, weights_ho)
# 更新权重
weights_ih -= learning_rate * d_weights_ih
weights_ho -= learning_rate * d_weights_ho
return weights_ih, weights_ho
# 示例数据(你可以替换为你实际的数据)
data = np.array([[0], [1]])
targets = np.array([[0], [1]])
# 训练模型
hidden_nodes = 2 # 隐藏节点数
epochs = 1000 # 迭代次数
learning_rate = 0.1 # 学习率
weights_ih, weights_ho = train_network(data, targets, epochs, learning_rate, hidden_nodes)
# 测试模型
test_input = np.array([[2]]) # 测试数据点
hidden_layer_test, output_test = forward_propagation(weights_ih, weights_ho, test_input)
print(f"预测值: {output_test[0][0]}")
阅读全文