BP算法应用于两个数据集得出结果分析 python实现
时间: 2023-11-10 22:02:33 浏览: 100
BP算法是一种常用的神经网络训练算法,用于解决分类、回归等问题。下面以应用BP算法解决分类问题为例,对两个数据集进行实验分析,并使用Python实现。
数据集1:鸢尾花数据集
鸢尾花数据集是一个经典的分类问题数据集,包含3类鸢尾花,每类鸢尾花有50个样本,共计150个样本。每个样本包含4个特征,分别为花萼长度、花萼宽度、花瓣长度、花瓣宽度。该数据集经过归一化处理后,可以直接用于BP神经网络的训练。下面是Python代码实现:
```
import numpy as np
from sklearn.datasets import load_iris
from sklearn.preprocessing import normalize
# 加载鸢尾花数据集
iris = load_iris()
X = iris.data
y = iris.target
# 数据预处理
X = normalize(X, axis=0)
# 将标签转换为独热编码
n_classes = 3
y_one_hot = np.zeros((len(y), n_classes))
for i, label in enumerate(y):
y_one_hot[i, label] = 1
# 构建神经网络
input_size = X.shape[1]
hidden_size = 10
output_size = n_classes
W1 = np.random.randn(input_size, hidden_size)
b1 = np.zeros(hidden_size)
W2 = np.random.randn(hidden_size, output_size)
b2 = np.zeros(output_size)
# 定义激活函数和损失函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def softmax(x):
exp_x = np.exp(x)
return exp_x / np.sum(exp_x, axis=1, keepdims=True)
def cross_entropy_loss(y_pred, y_true):
return -np.mean(y_true * np.log(y_pred))
# 定义训练函数
def train(X, y, W1, b1, W2, b2, lr, epochs):
for epoch in range(epochs):
# 前向传播
a1 = np.dot(X, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y_pred = softmax(a2)
# 计算损失
loss = cross_entropy_loss(y_pred, y_one_hot)
# 反向传播
delta2 = y_pred - y_one_hot
delta1 = np.dot(delta2, W2.T) * z1 * (1 - z1)
# 更新参数
W2 -= lr * np.dot(z1.T, delta2)
b2 -= lr * np.sum(delta2, axis=0)
W1 -= lr * np.dot(X.T, delta1)
b1 -= lr * np.sum(delta1, axis=0)
# 打印损失
if epoch % 100 == 0:
print("Epoch %d Loss %.4f" % (epoch, loss))
return W1, b1, W2, b2
# 训练神经网络
W1, b1, W2, b2 = train(X, y_one_hot, W1, b1, W2, b2, lr=0.1, epochs=1000)
# 预测
a1 = np.dot(X, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y_pred = np.argmax(a2, axis=1)
acc = np.mean(y_pred == y)
print("Accuracy %.4f" % acc)
```
数据集2:手写数字识别数据集
手写数字识别数据集包含10个类别,每个类别有大约180个样本,共计约1800个样本。每个样本是28*28的灰度图像,需要将其展平为784维向量进行训练。下面是Python代码实现:
```
import numpy as np
from sklearn.datasets import load_digits
from sklearn.preprocessing import normalize
from sklearn.model_selection import train_test_split
# 加载手写数字识别数据集
digits = load_digits()
X = digits.images.reshape(-1, 784)
y = digits.target
# 数据预处理
X = normalize(X, axis=0)
# 将标签转换为独热编码
n_classes = 10
y_one_hot = np.zeros((len(y), n_classes))
for i, label in enumerate(y):
y_one_hot[i, label] = 1
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y_one_hot, test_size=0.2, random_state=42)
# 构建神经网络
input_size = X.shape[1]
hidden_size = 50
output_size = n_classes
W1 = np.random.randn(input_size, hidden_size)
b1 = np.zeros(hidden_size)
W2 = np.random.randn(hidden_size, output_size)
b2 = np.zeros(output_size)
# 定义激活函数和损失函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def softmax(x):
exp_x = np.exp(x)
return exp_x / np.sum(exp_x, axis=1, keepdims=True)
def cross_entropy_loss(y_pred, y_true):
return -np.mean(y_true * np.log(y_pred))
# 定义训练函数
def train(X, y, W1, b1, W2, b2, lr, epochs):
for epoch in range(epochs):
# 前向传播
a1 = np.dot(X, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y_pred = softmax(a2)
# 计算损失
loss = cross_entropy_loss(y_pred, y)
# 反向传播
delta2 = y_pred - y
delta1 = np.dot(delta2, W2.T) * z1 * (1 - z1)
# 更新参数
W2 -= lr * np.dot(z1.T, delta2)
b2 -= lr * np.sum(delta2, axis=0)
W1 -= lr * np.dot(X.T, delta1)
b1 -= lr * np.sum(delta1, axis=0)
# 打印损失
if epoch % 100 == 0:
print("Epoch %d Loss %.4f" % (epoch, loss))
return W1, b1, W2, b2
# 训练神经网络
W1, b1, W2, b2 = train(X_train, y_train, W1, b1, W2, b2, lr=0.1, epochs=1000)
# 预测
a1 = np.dot(X_test, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y_pred = np.argmax(a2, axis=1)
acc = np.mean(y_pred == np.argmax(y_test, axis=1))
print("Accuracy %.4f" % acc)
```
以上就是两个数据集应用BP算法的实验分析和Python实现,希望对你有所帮助。
阅读全文