acgan自动生成动漫头像代码和数据集
时间: 2023-10-11 08:07:12 浏览: 91
基于GAN的动漫头像生成系统
5星 · 资源好评率100%
ACGAN(Auxiliary Classifier GAN)是一种生成对抗网络,它结合了分类器和生成器,可以生成具有特定属性的图像。下面是一个基于ACGAN的动漫头像生成代码和数据集。
代码:
```python
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import cv2
# 超参数设置
latent_dim = 100
num_classes = 10
width = 64
height = 64
channels = 3
batch_size = 64
epochs = 50
img_dir = "anime_faces"
# 加载数据集
def load_data():
images = []
for filename in os.listdir(img_dir):
img = cv2.imread(os.path.join(img_dir, filename))
img = cv2.resize(img, (width, height))
images.append(img)
return np.array(images, dtype="float32") / 255.0
# 构建生成器
def build_generator():
model = tf.keras.Sequential()
model.add(layers.Dense(4 * 4 * 256, use_bias=False, input_shape=(latent_dim + num_classes,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((4, 4, 256)))
assert model.output_shape == (None, 4, 4, 256) # 注意:batch size 没有限制
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 8, 8, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 16, 16, 64)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(channels, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, height, width, channels)
return model
# 构建判别器
def build_discriminator():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
input_shape=[height, width, channels + num_classes]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
# 定义生成器和判别器
generator = build_generator()
discriminator = build_discriminator()
# 定义生成器和判别器的优化器
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
# 定义损失函数
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
categorical_crossentropy = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
# 定义训练过程
@tf.function
def train_step(images, labels):
# 生成随机噪声
noise = tf.random.normal([batch_size, latent_dim])
# 添加标签信息
noise = tf.concat([noise, labels], axis=1)
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# 生成图片
generated_images = generator(noise, training=True)
# 真实图片和生成图片的标签
real_labels = tf.ones((batch_size, 1))
fake_labels = tf.zeros((batch_size, 1))
real_and_labels = tf.concat([images, labels], axis=3)
fake_and_labels = tf.concat([generated_images, labels], axis=3)
# 判别器判别真实图片
real_discrimination = discriminator(real_and_labels, training=True)
# 判别器判别生成图片
fake_discrimination = discriminator(fake_and_labels, training=True)
# 计算判别器损失
real_discriminator_loss = cross_entropy(real_labels, real_discrimination)
fake_discriminator_loss = cross_entropy(fake_labels, fake_discrimination)
discriminator_loss = real_discriminator_loss + fake_discriminator_loss
# 计算生成器损失
generator_loss = categorical_crossentropy(labels, fake_discrimination)
# 计算生成器和判别器的梯度
generator_gradients = gen_tape.gradient(generator_loss, generator.trainable_variables)
discriminator_gradients = disc_tape.gradient(discriminator_loss, discriminator.trainable_variables)
# 更新生成器和判别器的参数
generator_optimizer.apply_gradients(zip(generator_gradients, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(discriminator_gradients, discriminator.trainable_variables))
return generator_loss, discriminator_loss
# 训练模型
def train(dataset, epochs):
for epoch in range(epochs):
for i in range(dataset.shape[0] // batch_size):
# 获取真实图片
images = dataset[i * batch_size:(i + 1) * batch_size]
# 获取真实标签
labels = tf.one_hot(np.random.randint(0, num_classes, batch_size), depth=num_classes)
# 训练一次
generator_loss, discriminator_loss = train_step(images, labels)
# 每10次迭代输出一次结果
if i % 10 == 0:
print("Epoch: %d, Iteration: %d, Generator Loss: %f, Discriminator Loss: %f" % (epoch, i, generator_loss, discriminator_loss))
# 加载数据集
dataset = load_data()
# 训练模型
train(dataset, epochs)
# 生成新图片
def generate_images(num_images):
# 生成随机噪声
noise = tf.random.normal([num_images, latent_dim])
# 随机标签
labels = tf.one_hot(np.random.randint(0, num_classes, num_images), depth=num_classes)
# 添加标签信息
noise = tf.concat([noise, labels], axis=1)
# 生成图片
generated_images = generator(noise, training=False)
# 显示图片
fig = plt.figure(figsize=(4, 4))
for i in range(num_images):
plt.subplot(4, 4, i+1)
plt.imshow(generated_images[i, :, :, :])
plt.axis('off')
plt.show()
# 生成10张新图片
generate_images(10)
```
数据集:
你可以在以下链接中找到适合ACGAN生成动漫头像的数据集:
https://www.kaggle.com/splcher/animefacedataset
将数据集下载解压后,将其中的动漫头像图片放入一个文件夹中,例如 "anime_faces"。
注意:数据集中图片的尺寸可能不一致,需要将其缩放到统一的尺寸。在代码中,我们将图片缩放到了 64x64 的尺寸,你可以根据需要进行调整。
阅读全文