seq = tensorflow.keras.layers.LayerNormalization(tensorflow.keras.layers.Add([seq1,seq])) TypeError: __init__() takes 1 positional argument but 2 were given
时间: 2024-03-02 13:51:52 浏览: 138
这个错误提示表明 LayerNormalization 的初始化函数只接受一个位置参数,但是你传递了两个参数。可能的原因是 `tensorflow.keras.layers.Add([seq1,seq])` 返回的是一个列表,而 LayerNormalization 期望的是一个张量。你需要将这个列表中的张量相加,然后再传递给 LayerNormalization。可以尝试以下代码:
```
add = tensorflow.keras.layers.Add()([seq1, seq])
seq = tensorflow.keras.layers.LayerNormalization()(add)
```
这里我们首先定义了一个 `tensorflow.keras.layers.Add()` 层,然后将两个张量作为列表传递给它,得到它们的和。接着我们将这个和张量传递给 `tensorflow.keras.layers.LayerNormalization()` 层,并执行标准化操作。
相关问题
下面代码在tensorflow中出现了init() missing 1 required positional argument: 'cell'报错: class Model(): def init(self): self.img_seq_shape=(10,128,128,3) self.img_shape=(128,128,3) self.train_img=dataset # self.test_img=dataset_T patch = int(128 / 2 ** 4) self.disc_patch = (patch, patch, 1) self.optimizer=tf.keras.optimizers.Adam(learning_rate=0.001) self.build_generator=self.build_generator() self.build_discriminator=self.build_discriminator() self.build_discriminator.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=['accuracy']) self.build_generator.compile(loss='binary_crossentropy', optimizer=self.optimizer) img_seq_A = Input(shape=(10,128,128,3)) #输入图片 img_B = Input(shape=self.img_shape) #目标图片 fake_B = self.build_generator(img_seq_A) #生成的伪目标图片 self.build_discriminator.trainable = False valid = self.build_discriminator([img_seq_A, fake_B]) self.combined = tf.keras.models.Model([img_seq_A, img_B], [valid, fake_B]) self.combined.compile(loss=['binary_crossentropy', 'mse'], loss_weights=[1, 100], optimizer=self.optimizer,metrics=['accuracy']) def build_generator(self): def res_net(inputs, filters): x = inputs net = conv2d(x, filters // 2, (1, 1), 1) net = conv2d(net, filters, (3, 3), 1) net = net + x # net=tf.keras.layers.LeakyReLU(0.2)(net) return net def conv2d(inputs, filters, kernel_size, strides): x = tf.keras.layers.Conv2D(filters, kernel_size, strides, 'same')(inputs) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.LeakyReLU(alpha=0.2)(x) return x d0 = tf.keras.layers.Input(shape=(10, 128, 128, 3)) out= ConvRNN2D(filters=32, kernel_size=3,padding='same')(d0) out=tf.keras.layers.Conv2D(3,1,1,'same')(out) return keras.Model(inputs=d0, outputs=out) def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, bn=True): d = tf.keras.layers.Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) if bn: d = tf.keras.layers.BatchNormalization(momentum=0.8)(d) d = tf.keras.layers.LeakyReLU(alpha=0.2)(d) return d img_A = tf.keras.layers.Input(shape=(10, 128, 128, 3)) img_B = tf.keras.layers.Input(shape=(128, 128, 3)) df = 32 lstm_out = ConvRNN2D(filters=df, kernel_size=4, padding="same")(img_A) lstm_out = tf.keras.layers.LeakyReLU(alpha=0.2)(lstm_out) combined_imgs = tf.keras.layers.Concatenate(axis=-1)([lstm_out, img_B]) d1 = d_layer(combined_imgs, df)#64 d2 = d_layer(d1, df * 2)#32 d3 = d_layer(d2, df * 4)#16 d4 = d_layer(d3, df * 8)#8 validity = tf.keras.layers.Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return tf.keras.Model([img_A, img_B], validity)
你在定义 Model 类时,没有传入参数 cell,但是在代码中使用了 ConvRNN2D 的实例化对象,这个对象需要一个 cell 参数。你需要在初始化函数中添加这个参数,如下所示:
```
class Model():
def __init__(self, cell):
self.img_seq_shape=(10,128,128,3)
self.img_shape=(128,128,3)
self.train_img=dataset
patch = int(128 / 2 ** 4)
self.disc_patch = (patch, patch, 1)
self.optimizer=tf.keras.optimizers.Adam(learning_rate=0.001)
self.build_generator=self.build_generator()
self.build_discriminator=self.build_discriminator(cell)
self.build_discriminator.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=['accuracy'])
self.build_generator.compile(loss='binary_crossentropy', optimizer=self.optimizer)
img_seq_A = Input(shape=(10,128,128,3))
img_B = Input(shape=self.img_shape)
fake_B = self.build_generator(img_seq_A)
self.build_discriminator.trainable = False
valid = self.build_discriminator([img_seq_A, fake_B])
self.combined = tf.keras.models.Model([img_seq_A, img_B], [valid, fake_B])
self.combined.compile(loss=['binary_crossentropy', 'mse'], loss_weights=[1, 100], optimizer=self.optimizer,metrics=['accuracy'])
def build_generator(self):
def res_net(inputs, filters):
x = inputs
net = conv2d(x, filters // 2, (1, 1), 1)
net = conv2d(net, filters, (3, 3), 1)
net = net + x
return net
def conv2d(inputs, filters, kernel_size, strides):
x = tf.keras.layers.Conv2D(filters, kernel_size, strides, 'same')(inputs)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
return x
d0 = tf.keras.layers.Input(shape=(10, 128, 128, 3))
out= ConvRNN2D(cell, filters=32, kernel_size=3,padding='same')(d0)
out=tf.keras.layers.Conv2D(3,1,1,'same')(out)
return keras.Model(inputs=d0, outputs=out)
def build_discriminator(self, cell):
def d_layer(layer_input, filters, f_size=4, bn=True):
d = tf.keras.layers.Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
if bn:
d = tf.keras.layers.BatchNormalization(momentum=0.8)(d)
d = tf.keras.layers.LeakyReLU(alpha=0.2)(d)
return d
img_A = tf.keras.layers.Input(shape=(10, 128, 128, 3))
img_B = tf.keras.layers.Input(shape=(128, 128, 3))
df = 32
lstm_out = ConvRNN2D(cell, filters=df, kernel_size=4, padding="same")(img_A)
lstm_out = tf.keras.layers.LeakyReLU(alpha=0.2)(lstm_out)
combined_imgs = tf.keras.layers.Concatenate(axis=-1)([lstm_out, img_B])
d1 = d_layer(combined_imgs, df)#64
d2 = d_layer(d1, df * 2)#32
d3 = d_layer(d2, df * 4)#16
d4 = d_layer(d3, df * 8)#8
validity = tf.keras.layers.Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return tf.keras.Model([img_A, img_B], validity)
```
将下面代码使用ConvRNN2D层来替换ConvLSTM2D层,并在模块__init__.py中创建类‘convrnn’ class Model(): def __init__(self): self.img_seq_shape=(10,128,128,3) self.img_shape=(128,128,3) self.train_img=dataset # self.test_img=dataset_T patch = int(128 / 2 ** 4) self.disc_patch = (patch, patch, 1) self.optimizer=tf.keras.optimizers.Adam(learning_rate=0.001) self.build_generator=self.build_generator() self.build_discriminator=self.build_discriminator() self.build_discriminator.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=['accuracy']) self.build_generator.compile(loss='binary_crossentropy', optimizer=self.optimizer) img_seq_A = Input(shape=(10,128,128,3)) #输入图片 img_B = Input(shape=self.img_shape) #目标图片 fake_B = self.build_generator(img_seq_A) #生成的伪目标图片 self.build_discriminator.trainable = False valid = self.build_discriminator([img_seq_A, fake_B]) self.combined = tf.keras.models.Model([img_seq_A, img_B], [valid, fake_B]) self.combined.compile(loss=['binary_crossentropy', 'mse'], loss_weights=[1, 100], optimizer=self.optimizer,metrics=['accuracy']) def build_generator(self): def res_net(inputs, filters): x = inputs net = conv2d(x, filters // 2, (1, 1), 1) net = conv2d(net, filters, (3, 3), 1) net = net + x # net=tf.keras.layers.LeakyReLU(0.2)(net) return net def conv2d(inputs, filters, kernel_size, strides): x = tf.keras.layers.Conv2D(filters, kernel_size, strides, 'same')(inputs) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.LeakyReLU(alpha=0.2)(x) return x d0 = tf.keras.layers.Input(shape=(10, 128, 128, 3)) out= tf.keras.layers.ConvRNN2D(filters=32, kernel_size=3,padding='same')(d0) out=tf.keras.layers.Conv2D(3,1,1,'same')(out) return keras.Model(inputs=d0, outputs=out)
以下是使用ConvRNN2D层替换ConvLSTM2D层的修改后的代码:
```python
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, ConvRNN2D, Input, BatchNormalization, LeakyReLU, Flatten, Dense, Reshape
from tensorflow.keras.models import Model
class Model():
def __init__(self):
self.img_seq_shape=(10,128,128,3)
self.img_shape=(128,128,3)
self.train_img=dataset
patch = int(128 / 2 ** 4)
self.disc_patch = (patch, patch, 1)
self.optimizer=tf.keras.optimizers.Adam(learning_rate=0.001)
self.build_generator=self.build_generator()
self.build_discriminator=self.build_discriminator()
self.build_discriminator.compile(loss='binary_crossentropy',
optimizer=self.optimizer,
metrics=['accuracy'])
self.build_generator.compile(loss='binary_crossentropy',
optimizer=self.optimizer)
img_seq_A = Input(shape=(10,128,128,3)) #输入图片
img_B = Input(shape=self.img_shape) #目标图片
fake_B = self.build_generator(img_seq_A) #生成的伪目标图片
self.build_discriminator.trainable = False
valid = self.build_discriminator([img_seq_A, fake_B])
self.combined = Model([img_seq_A, img_B], [valid, fake_B])
self.combined.compile(loss=['binary_crossentropy', 'mse'],
loss_weights=[1, 100],
optimizer=self.optimizer,
metrics=['accuracy'])
def build_generator(self):
def res_net(inputs, filters):
x = inputs
net = conv2d(x, filters // 2, (1, 1), 1)
net = conv2d(net, filters, (3, 3), 1)
net = net + x
# net=tf.keras.layers.LeakyReLU(0.2)(net)
return net
def conv2d(inputs, filters, kernel_size, strides):
x = Conv2D(filters, kernel_size, strides, 'same')(inputs)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
return x
d0 = Input(shape=(10, 128, 128, 3))
out = ConvRNN2D(filters=32, kernel_size=3, padding='same')(d0)
out = conv2d(out, 3, 1, 1)
return Model(inputs=d0, outputs=out)
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, normalization=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
if normalization:
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
return d
img_A = Input(shape=self.img_seq_shape)
img_B = Input(shape=self.img_shape)
combined_imgs = tf.keras.layers.concatenate([img_A, img_B])
d1 = d_layer(combined_imgs, 64, normalization=False)
d2 = d_layer(d1, 128)
d3 = d_layer(d2, 256)
d4 = d_layer(d3, 512)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model([img_A, img_B], validity)
```
在代码中,我们将ConvLSTM2D层替换为了ConvRNN2D层,并添加了新的模块`convrnn`。
阅读全文