归一化bp神经网络python代码
时间: 2024-05-15 20:10:40 浏览: 18
归一化(Normalization)是指将数据映射到一个特定的范围,通常将数据映射到[0,1]或[-1,1]区间内,以便提高训练的收敛速度和准确度。BP神经网络是一种常见的神经网络模型,用于解决分类、回归、聚类等问题,其代码实现可以使用Python语言来完成。下面是归一化BP神经网络Python代码的基本结构:
```
import numpy as np
class BPNeuralNetwork:
def __init__(self, input_dim, hidden_dim, output_dim, lr=0.1):
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.lr = lr
self.weights_ih = np.random.randn(self.hidden_dim, self.input_dim)
self.biases_ih = np.random.randn(self.hidden_dim, 1)
self.weights_ho = np.random.randn(self.output_dim, self.hidden_dim)
self.biases_ho = np.random.randn(self.output_dim, 1)
def normalize_data(self, data):
# 归一化数据
normalized_data = (data - np.min(data)) / (np.max(data) - np.min(data))
return normalized_data
def denormalize_data(self, data):
# 反归一化数据
denormalized_data = data * (np.max(data) - np.min(data)) + np.min(data)
return denormalized_data
def sigmoid(self, x):
# sigmoid激活函数
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
# sigmoid激活函数导数
return x * (1 - x)
def forward(self, x):
# 前向传播
z1 = np.dot(self.weights_ih, x) + self.biases_ih
a1 = self.sigmoid(z1)
z2 = np.dot(self.weights_ho, a1) + self.biases_ho
y = self.sigmoid(z2)
return y, a1
def backward(self, x, y_true, y_pred, a1):
# 反向传播
delta2 = (y_true - y_pred) * self.sigmoid_derivative(y_pred)
delta1 = np.dot(self.weights_ho.T, delta2) * self.sigmoid_derivative(a1)
grad_weights_ho = np.dot(delta2, a1.T)
grad_biases_ho = delta2
grad_weights_ih = np.dot(delta1, x.T)
grad_biases_ih = delta1
return grad_weights_ih, grad_biases_ih, grad_weights_ho, grad_biases_ho
def train(self, X_train, y_train, epochs=100):
# 训练模型
for epoch in range(epochs):
loss = 0
for i in range(X_train.shape):
x = X_train[i].reshape(-1, 1)
y_true = y_train[i].reshape(-1, 1)
y_pred, a1 = self.forward(x)
loss += np.sum((y_true - y_pred) ** 2)
grad_weights_ih, grad_biases_ih, grad_weights_ho, grad_biases_ho = self.backward(x, y_true, y_pred, a1)
self.weights_ih += self.lr * grad_weights_ih
self.biases_ih += self.lr * grad_biases_ih
self.weights_ho += self.lr * grad_weights_ho
self.biases_ho += self.lr * grad_biases_ho
loss /= X_train.shape
print(f"Epoch {epoch+1} loss: {loss}")
def predict(self, X_test):
# 预测结果
predictions = []
for i in range(X_test.shape):
x = X_test[i].reshape(-1, 1)
y_pred, _ = self.forward(x)
predictions.append(y_pred)
return np.array(predictions).reshape(-1)
# 示例使用iris数据集
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data[:, :2]
y = iris.target
nn = BPNeuralNetwork(input_dim=X.shape, hidden_dim=5, output_dim=1, lr=0.01)
# 归一化数据
X_norm = nn.normalize_data(X)
# 训练模型
nn.train(X_norm, y.reshape(-1, 1), epochs=1000)
# 预测结果并反归一化
X_test_norm = nn.normalize_data([[5.1, 3.5], [6.2, 3.4]])
y_pred_norm = nn.predict(X_test_norm)
y_pred_denorm = nn.denormalize_data(y_pred_norm)
print(y_pred_denorm)
```
以上是一个简单的归一化BP神经网络Python代码示例,其中包括模型初始化、归一化数据处理、前向传播、反向传播、训练模型、预测结果等步骤。
相关推荐
![zip](https://img-home.csdnimg.cn/images/20210720083736.png)
![zip](https://img-home.csdnimg.cn/images/20210720083736.png)
![7z](https://img-home.csdnimg.cn/images/20210720083312.png)
![-](https://csdnimg.cn/download_wenku/file_type_column_c1.png)
![-](https://csdnimg.cn/download_wenku/file_type_column_c1.png)
![-](https://csdnimg.cn/download_wenku/file_type_column_c1.png)
![-](https://csdnimg.cn/download_wenku/file_type_column_c1.png)
![-](https://csdnimg.cn/download_wenku/file_type_column_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)