gan python实现,100行代码以内
时间: 2023-10-20 08:05:28 浏览: 74
好的,以下是一个简单的 GAN(生成对抗网络)的 Python 实现,代码不到 100 行:
```python
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# 生成器
def generator(z, reuse=None):
with tf.variable_scope('gen', reuse=reuse):
hidden1 = tf.layers.dense(inputs=z, units=128, activation=tf.nn.leaky_relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=128, activation=tf.nn.leaky_relu)
output = tf.layers.dense(inputs=hidden2, units=784, activation=tf.nn.tanh)
return output
# 判别器
def discriminator(X, reuse=None):
with tf.variable_scope('dis', reuse=reuse):
hidden1 = tf.layers.dense(inputs=X, units=128, activation=tf.nn.leaky_relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=128, activation=tf.nn.leaky_relu)
logits = tf.layers.dense(inputs=hidden2, units=1)
output = tf.sigmoid(logits)
return output, logits
# 输入
real_images = tf.placeholder(tf.float32, shape=[None, 784])
z = tf.placeholder(tf.float32, shape=[None, 100])
# 生成器生成样本
G = generator(z)
# 判别器判别真实样本和生成样本
D_output_real, D_logits_real = discriminator(real_images)
D_output_fake, D_logits_fake = discriminator(G, reuse=True)
# 损失函数
def loss_func(logits_in, labels_in):
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_in, labels=labels_in))
D_real_loss = loss_func(D_logits_real, tf.ones_like(D_logits_real) * 0.9)
D_fake_loss = loss_func(D_logits_fake, tf.zeros_like(D_logits_fake))
D_loss = D_real_loss + D_fake_loss
G_loss = loss_func(D_logits_fake, tf.ones_like(D_logits_fake))
# 优化器
lr = 0.001
tvars = tf.trainable_variables()
D_vars = [var for var in tvars if 'dis' in var.name]
G_vars = [var for var in tvars if 'gen' in var.name]
D_trainer = tf.train.AdamOptimizer(lr).minimize(D_loss, var_list=D_vars)
G_trainer = tf.train.AdamOptimizer(lr).minimize(G_loss, var_list=G_vars)
# 训练
batch_size = 100
epochs = 100
init = tf.global_variables_initializer()
samples = []
with tf.Session() as sess:
sess.run(init)
for epoch in range(epochs):
num_batches = mnist.train.num_examples // batch_size
for i in range(num_batches):
batch = mnist.train.next_batch(batch_size)
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images * 2 - 1
batch_z = np.random.uniform(-1, 1, size=(batch_size, 100))
_ = sess.run(D_trainer, feed_dict={real_images: batch_images, z: batch_z})
_ = sess.run(G_trainer, feed_dict={z: batch_z})
print("Epoch: ", epoch)
sample_z = np.random.uniform(-1, 1, size=(1, 100))
gen_sample = sess.run(generator(z, reuse=True), feed_dict={z: sample_z})
samples.append(gen_sample)
# 可视化生成结果
plt.imshow(samples[0].reshape(28, 28), cmap='Greys')
plt.show()
```
这个实现使用 TensorFlow 框架,生成器和判别器都是简单的全连接层,输入是 28x28 的 MNIST 图像,随机噪声采用均匀分布。训练过程中使用 Adam 优化器,损失函数是交叉熵。最终生成的图像可以用 Matplotlib 进行可视化。
阅读全文