利用Python实现卷积神经网络
时间: 2023-06-30 11:26:24 浏览: 64
好的,下面是一个简单的卷积神经网络的Python实现:
```python
import numpy as np
import tensorflow as tf
# 定义卷积神经网络模型
def conv_net(x, n_classes, dropout):
# 输入层
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# 卷积层1
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# 池化层1
pool1 = tf.layers.max_pooling2d(conv1, 2, 2)
# 卷积层2
conv2 = tf.layers.conv2d(pool1, 64, 3, activation=tf.nn.relu)
# 池化层2
pool2 = tf.layers.max_pooling2d(conv2, 2, 2)
# 平坦化
flat = tf.reshape(pool2, [-1, 7*7*64])
# 全连接层1
fc1 = tf.layers.dense(flat, 1024)
# Dropout层
dropout = tf.layers.dropout(fc1, rate=dropout)
# 全连接层2
fc2 = tf.layers.dense(dropout, n_classes)
return fc2
# 定义参数
learning_rate = 0.001
num_steps = 2000
batch_size = 128
display_step = 100
# 定义输入输出
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
# 构建模型
logits = conv_net(x, 10, 0.75)
# 定义损失函数和优化器
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# 定义准确率
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# 初始化变量
init = tf.global_variables_initializer()
# 加载数据
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# 开始训练模型
with tf.Session() as sess:
sess.run(init)
for step in range(1, num_steps+1):
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(train_op, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0 or step == 1:
# 计算损失和准确率
loss, acc = sess.run([loss_op, accuracy], feed_dict={x: batch_x, y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + "{:.4f}".format(loss) + ", Training Accuracy= " + "{:.3f}".format(acc))
print("Training finished!")
# 计算测试集准确率
print("Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256]}))
```
这个卷积神经网络模型使用了两个卷积层和两个池化层,最后连接了两个全连接层。可以通过调整参数来改变模型的结构和性能。在训练过程中,使用了交叉熵损失函数和Adam优化器。