def MEAN_Spot(opt): # channel 1 inputs1 = layers.Input(shape=(42,42,1)) conv1 = layers.Conv2D(3, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs1) bn1 = layers.BatchNormalization()(conv1) pool1 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn1) do1 = layers.Dropout(0.3)(pool1) # channel 2 inputs2 = layers.Input(shape=(42,42,1)) conv2 = layers.Conv2D(3, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs2) bn2 = layers.BatchNormalization()(conv2) pool2 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn2) do2 = layers.Dropout(0.3)(pool2) # channel 3 inputs3 = layers.Input(shape=(42,42,1)) conv3 = layers.Conv2D(8, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs3) bn3 = layers.BatchNormalization()(conv3) pool3 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn3) do3 = layers.Dropout(0.3)(pool3) # merge 1 merged = layers.Concatenate()([do1, do2, do3]) # interpretation 1 merged_conv = layers.Conv2D(8, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.1))(merged) merged_pool = layers.MaxPooling2D(pool_size=(2, 2), padding='same', strides=(2,2))(merged_conv) flat = layers.Flatten()(merged_pool) flat_do = layers.Dropout(0.2)(flat) # outputs outputs = layers.Dense(1, activation='linear', name='spot')(flat_do) #Takes input u, v, os model = keras.models.Model(inputs=[inputs1, inputs2, inputs3], outputs=[outputs]) model.compile( loss={'spot':'mse'}, optimizer=opt, metrics={'spot':tf.keras.metrics.MeanAbsoluteError()}, ) return model 如何先Concatenate输入图像,然后加入CBAM
时间: 2023-09-19 21:08:11 浏览: 41
您可以使用以下代码来加入CBAM:
```python
from tensorflow.keras import layers
def conv_block(inputs, filters):
x = layers.Conv2D(filters, kernel_size=3, padding='same', use_bias=False)(inputs)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
return x
def channel_attention(inputs, ratio=8):
channels_axis = 3
channels = inputs.shape[channels_axis]
shared_layer_one = layers.Dense(channels//ratio, activation='relu', use_bias=True, kernel_initializer='he_normal')
shared_layer_two = layers.Dense(channels, activation='sigmoid', use_bias=True, kernel_initializer='he_normal')
avg_pool = layers.GlobalAveragePooling2D()(inputs)
avg_pool = layers.Reshape((1,1,channels))(avg_pool)
assert avg_pool.shape[1:] == (1,1,channels)
avg_pool = shared_layer_one(avg_pool)
assert avg_pool.shape[1:] == (1,1,channels//ratio)
avg_pool = shared_layer_two(avg_pool)
assert avg_pool.shape[1:] == (1,1,channels)
return layers.multiply([inputs, avg_pool])
def spatial_attention(inputs):
kernel_size = 7
channels = inputs.shape[3]
x = layers.Conv2D(channels, kernel_size, padding='same', use_bias=False)(inputs)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
avg_pool = layers.Lambda(lambda x: tf.reduce_mean(x, axis=3, keepdims=True))(x)
max_pool = layers.Lambda(lambda x: tf.reduce_max(x, axis=3, keepdims=True))(x)
concat = layers.Concatenate(axis=3)([avg_pool, max_pool])
assert concat.shape[1:] == (inputs.shape[1], inputs.shape[2], 2)
x = layers.Conv2D(1, kernel_size, padding='same', use_bias=False, activation='sigmoid')(concat)
assert x.shape[1:] == (inputs.shape[1], inputs.shape[2], 1)
return layers.multiply([inputs, x])
def cbam_block(inputs, ratio=8):
x = channel_attention(inputs, ratio)
x = spatial_attention(x)
return x
def MEAN_Spot(opt):
# channel 1
inputs1 = layers.Input(shape=(42,42,1))
conv1 = conv_block(inputs1, 3)
pool1 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(conv1)
do1 = layers.Dropout(0.3)(pool1)
# channel 2
inputs2 = layers.Input(shape=(42,42,1))
conv2 = conv_block(inputs2, 3)
pool2 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(conv2)
do2 = layers.Dropout(0.3)(pool2)
# channel 3
inputs3 = layers.Input(shape=(42,42,1))
conv3 = conv_block(inputs3, 8)
pool3 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(conv3)
do3 = layers.Dropout(0.3)(pool3)
# merge 1
merged = layers.Concatenate()([do1, do2, do3])
cbam = cbam_block(merged)
# interpretation 1
merged_conv = conv_block(cbam, 8)
merged_pool = layers.MaxPooling2D(pool_size=(2, 2), padding='same', strides=(2,2))(merged_conv)
flat = layers.Flatten()(merged_pool)
flat_do = layers.Dropout(0.2)(flat)
# outputs
outputs = layers.Dense(1, activation='linear', name='spot')(flat_do)
#Takes input u, v, os
model = keras.models.Model(inputs=[inputs1, inputs2, inputs3], outputs=[outputs])
model.compile(
loss={'spot':'mse'},
optimizer=opt,
metrics={'spot':tf.keras.metrics.MeanAbsoluteError()},
)
return model
```
这里定义了`conv_block`函数来构建卷积块,同时定义了`channel_attention`和`spatial_attention`函数来构建CBAM的通道注意力和空间注意力模块,最后用`cbam_block`函数将两个注意力模块组合起来,加入到模型中。在`MEAN_Spot`函数中,先将输入图像进行Concatenate,然后加入CBAM模块,最后继续进行后续的卷积、池化和全连接层操作。
相关推荐
![rar](https://img-home.csdnimg.cn/images/20210720083606.png)
![zip](https://img-home.csdnimg.cn/images/20210720083736.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![rar](https://img-home.csdnimg.cn/images/20210720083606.png)
![zip](https://img-home.csdnimg.cn/images/20210720083736.png)
![py](https://img-home.csdnimg.cn/images/20210720083646.png)
![pdf](https://img-home.csdnimg.cn/images/20210720083512.png)
![zip](https://img-home.csdnimg.cn/images/20210720083736.png)