optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# 定义预测结果的比较
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# 定义预测的精确度
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
iteration = 0
# 定义要保存训练模型的变量
saver = tf.train.Saver()
# 创建TensorFlow会话
with tf.Session() as sess:
# 初始化TensorFlow的全局变量
sess.run(tf.global_variables_initializer())
# 计算所有的训练集需要被训练多少次,当每批次是batch_size个时
batch_count = int(math.ceil(sample_size / float(batch_size)))
# 要迭代epochs次训练
for e in range(epochs):
# 对每张图像进行训练
for batch_i in range(batch_count):
# 每次取出batch_size张图像
batch_x, batch_y = mnist.train.next_batch(batch_size=batch_size)
# 训练模型
_, loss = sess.run([optimizer, cost], feed_dict={x: batch_x, y:
batch_y})
# 每训练20次图像时打印一次日志信息,也就是20次乘以batch_size个图像已经被训练了
if batch_i % 20 == 0:
print("Epoch: {}/{}".format(e+1, epochs),
"Iteration: {}".format(iteration),
"Training loss: {:.5f}".format(loss))
iteration += 1
# 每迭代一次时,做一次验证,并打印日志信息
if iteration % batch_size == 0:
valid_acc = sess.run(accuracy, feed_dict={x: x_valid, y:
y_valid})
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Validation Accuracy: {:.5f}".format(valid_acc))
# 保存模型的检查点
saver.save(sess, "checkpoints/mnist_cnn_tf.ckpt")
# 定义卷积函数
def create_conv2d(input_data, num_input_channels, num_filters, filter_shape,
pool_shape, name):