卷积神经网络python代码
时间: 2023-10-14 19:30:51 浏览: 44
这里是一个简单的卷积神经网络的Python代码示例:
```python
import numpy as np
import tensorflow as tf
# 导入MNIST数据集
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# 设置超参数
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# 输入图像的维度
n_input = 784
# 输出图像的维度
n_classes = 10
# 卷积层的参数
filter_size = 5
n_filters_1 = 16
n_filters_2 = 32
# 定义输入输出placeholder
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# 将输入数据转换为图像格式
x_image = tf.reshape(x, [-1,28,28,1])
# 第一层卷积层
W_conv1 = tf.Variable(tf.truncated_normal([filter_size, filter_size, 1, n_filters_1], stddev=0.1))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[n_filters_1]))
h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1)
# 第一层池化层
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# 第二层卷积层
W_conv2 = tf.Variable(tf.truncated_normal([filter_size, filter_size, n_filters_1, n_filters_2], stddev=0.1))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[n_filters_2]))
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2)
# 第二层池化层
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# 将卷积层的输出拉成一维向量
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*n_filters_2])
# 全连接层
n_fc = 1024
W_fc1 = tf.Variable(tf.truncated_normal([7*7*n_filters_2, n_fc], stddev=0.1))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[n_fc]))
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout层
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# 输出层
W_fc2 = tf.Variable(tf.truncated_normal([n_fc, n_classes], stddev=0.1))
b_fc2 = tf.Variable(tf.constant(0.1, shape=[n_classes]))
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# 定义损失函数和优化器
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y))
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
# 定义评测准确率的操作
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 初始化所有变量
init = tf.global_variables_initializer()
# 开始训练
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, c = sess.run([train_step, cross_entropy], feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.5})
avg_cost += c / total_batch
if epoch % 1 == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print("Accuracy:", accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0}))
print("Optimization Finished!")
print("Final Accuracy:", accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0}))
```
这个代码使用了TensorFlow来定义一个简单的卷积神经网络,用于对MNIST手写数字数据集进行分类。该网络包括两个卷积层、两个池化层、一个全连接层、一个Dropout层和一个输出层。在训练过程中,使用Adam优化器最小化交叉熵损失函数,并计算准确率作为评测指标。