python实现利用神经网络和BP算法,,对 MNIST 数据集中给定的测试集进行分类
时间: 2024-03-18 21:41:45 浏览: 81
好的,为了更具体地回答您的问题,我可以提供一份Python代码,该代码使用神经网络和BP算法对MNIST数据集中的测试集进行分类。以下是代码:
```python
import numpy as np
import struct
# 读取MNIST数据集的函数
def load_mnist(path, kind='train'):
labels_path = path + '/' + kind + '-labels-idx1-ubyte'
images_path = path + '/' + kind + '-images-idx3-ubyte'
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II', lbpath.read(8))
labels = np.fromfile(lbpath, dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))
images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)
return images, labels
# 数据预处理函数
def preprocess_data(X, y):
X = X.astype(np.float32) / 255.0
y = y.reshape(-1, 1)
return X, y
# 独热编码函数
def one_hot(y, n_classes):
return np.eye(n_classes)[y.reshape(-1)]
# sigmoid激活函数
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
# sigmoid函数的导数
def sigmoid_prime(z):
return sigmoid(z) * (1 - sigmoid(z))
# 前向传播函数
def feedforward(X, w1, b1, w2, b2):
z1 = np.dot(X, w1) + b1
a1 = sigmoid(z1)
z2 = np.dot(a1, w2) + b2
a2 = sigmoid(z2)
return z1, a1, z2, a2
# 反向传播函数
def backprop(X, y, w1, b1, w2, b2, z1, a1, z2, a2):
delta2 = a2 - y
delta1 = np.dot(delta2, w2.T) * sigmoid_prime(z1)
grad_w2 = np.dot(a1.T, delta2)
grad_b2 = np.sum(delta2, axis=0, keepdims=True)
grad_w1 = np.dot(X.T, delta1)
grad_b1 = np.sum(delta1, axis=0)
return grad_w1, grad_b1, grad_w2, grad_b2
# 训练神经网络函数
def train(X_train, y_train, n_hidden, n_epochs, eta):
n_samples, n_features = X_train.shape
n_classes = len(np.unique(y_train))
y_train_encoded = one_hot(y_train, n_classes)
# 初始化权重和偏置
np.random.seed(0)
w1 = np.random.normal(scale=0.1, size=(n_features, n_hidden))
b1 = np.zeros(n_hidden)
w2 = np.random.normal(scale=0.1, size=(n_hidden, n_classes))
b2 = np.zeros(n_classes)
# 训练神经网络
for i in range(n_epochs):
z1, a1, z2, a2 = feedforward(X_train, w1, b1, w2, b2)
grad_w1, grad_b1, grad_w2, grad_b2 = backprop(X_train, y_train_encoded, w1, b1, w2, b2, z1, a1, z2, a2)
w1 -= eta * grad_w1
b1 -= eta * grad_b1
w2 -= eta * grad_w2
b2 -= eta * grad_b2
if (i+1) % 10 == 0:
print("Epoch {:3d}/{:3d} | Cost {:.4f}".format(i+1, n_epochs, np.mean((y_train_encoded - a2)**2)))
return w1, b1, w2, b2
# 测试神经网络函数
def test(X_test, y_test, w1, b1, w2, b2):
z1, a1, z2, a2 = feedforward(X_test, w1, b1, w2, b2)
y_pred = np.argmax(a2, axis=1)
accuracy = np.sum(y_test == y_pred) / len(y_test)
return accuracy
# 主函数
def main():
# 加载数据集
X_train, y_train = load_mnist('data', kind='train')
X_test, y_test = load_mnist('data', kind='t10k')
X_train, y_train = preprocess_data(X_train, y_train)
X_test, y_test = preprocess_data(X_test, y_test)
# 训练神经网络
w1, b1, w2, b2 = train(X_train, y_train, n_hidden=50, n_epochs=100, eta=0.1)
# 测试神经网络
accuracy = test(X_test, y_test, w1, b1, w2, b2)
print("Test Accuracy: {:.2f}%".format(accuracy * 100))
if __name__ == '__main__':
main()
```
代码中使用了numpy库实现了神经网络的前向传播和反向传播过程,其中激活函数采用了sigmoid函数。训练过程中使用了随机梯度下降算法,每隔10个epoch输出一次损失函数的值。在测试集上评估模型的性能时,采用了准确率作为评价指标。您可以根据自己的需求进行代码的修改和调试。
阅读全文