用tensorflow写alexnet代码
时间: 2023-12-17 11:24:35 浏览: 90
以下是用TensorFlow实现AlexNet的示例代码:
```python
import tensorflow as tf
# 定义输入
x = tf.placeholder(tf.float32, [None, 227, 227, 3])
y = tf.placeholder(tf.float32, [None, 1000])
# 第一层卷积
conv1 = tf.layers.conv2d(
inputs=x,
filters=96,
kernel_size=[11, 11],
strides=(4, 4),
padding='valid',
activation=tf.nn.relu)
# 最大池化层
pool1 = tf.layers.max_pooling2d(
inputs=conv1,
pool_size=[3, 3],
strides=(2, 2),
padding='valid')
# 第二层卷积
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=256,
kernel_size=[5, 5],
strides=(1, 1),
padding='same',
activation=tf.nn.relu)
# 最大池化层
pool2 = tf.layers.max_pooling2d(
inputs=conv2,
pool_size=[3, 3],
strides=(2, 2),
padding='valid')
# 第三层卷积
conv3 = tf.layers.conv2d(
inputs=pool2,
filters=384,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
activation=tf.nn.relu)
# 第四层卷积
conv4 = tf.layers.conv2d(
inputs=conv3,
filters=384,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
activation=tf.nn.relu)
# 第五层卷积
conv5 = tf.layers.conv2d(
inputs=conv4,
filters=256,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
activation=tf.nn.relu)
# 最大池化层
pool5 = tf.layers.max_pooling2d(
inputs=conv5,
pool_size=[3, 3],
strides=(2, 2),
padding='valid')
# 全连接层1
flatten = tf.layers.flatten(
inputs=pool5)
fc1 = tf.layers.dense(
inputs=flatten,
units=4096,
activation=tf.nn.relu)
# 全连接层2
fc2 = tf.layers.dense(
inputs=fc1,
units=4096,
activation=tf.nn.relu)
# 输出层
logits = tf.layers.dense(
inputs=fc2,
units=1000)
# 定义损失函数和优化器
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
optimizer = tf.train.AdamOptimizer().minimize(loss)
# 训练模型
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(num_epochs):
sess.run(optimizer, feed_dict={x: train_data, y: train_labels})
if i % 10 == 0:
print('Epoch:', i, 'Accuracy:', sess.run(accuracy, feed_dict={x: test_data, y: test_labels}))
```
注意,这里的代码只是AlexNet的简单实现,可能与原始的AlexNet模型实现有一些不同。
阅读全文