多层感知机用于 mnist 手写数字数据集分类python代码
时间: 2023-07-02 15:15:52 浏览: 125
下面是一个简单的多层感知机(MLP)用于MNIST手写数字数据集分类的Python代码:
```python
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 加载MNIST数据集
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# 设置MLP的参数
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# 设置MLP的网络结构
n_hidden_1 = 256
n_hidden_2 = 256
n_input = 784
n_classes = 10
# 创建输入和输出的占位符
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("float", [None, n_classes])
# 创建MLP的权重和偏置变量
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# 创建MLP的模型
def multilayer_perceptron(x):
layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['h1']), biases['b1']))
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']))
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# 构建MLP的模型
logits = multilayer_perceptron(X)
# 定义损失函数和优化器
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# 定义评估模型的准确率
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# 初始化所有的变量
init = tf.global_variables_initializer()
# 开始训练MLP模型
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# 遍历所有的batch
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# 运行优化器来更新权重和偏置
_, c = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
# 计算平均损失值
avg_cost += c / total_batch
# 每个epoch打印一次损失值和准确率
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost={:.9f}".format(avg_cost))
print("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels}))
print("Optimization Finished!")
print("Final Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels}))
```
这段代码使用TensorFlow库来构建一个两层的MLP模型,用于对MNIST手写数字数据集进行分类。它使用交叉熵作为损失函数,Adam优化器来更新权重和偏置,以及准确率来评估模型的性能。