def MEAN_Spot(opt): # channel 1 inputs1 = layers.Input(shape=(42,42,1)) conv1 = layers.Conv2D(3, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs1) bn1 = layers.BatchNormalization()(conv1) pool1 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn1) do1 = layers.Dropout(0.3)(pool1) # channel 2 inputs2 = layers.Input(shape=(42,42,1)) conv2 = layers.Conv2D(3, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs2) bn2 = layers.BatchNormalization()(conv2) pool2 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn2) do2 = layers.Dropout(0.3)(pool2) # channel 3 inputs3 = layers.Input(shape=(42,42,1)) conv3 = layers.Conv2D(8, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs3) bn3 = layers.BatchNormalization()(conv3) pool3 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn3) do3 = layers.Dropout(0.3)(pool3) # merge 1 merged = layers.Concatenate()([do1, do2, do3]) # interpretation 1 merged_conv = layers.Conv2D(8, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.1))(merged) merged_pool = layers.MaxPooling2D(pool_size=(2, 2), padding='same', strides=(2,2))(merged_conv) flat = layers.Flatten()(merged_pool) flat_do = layers.Dropout(0.2)(flat) # outputs outputs = layers.Dense(1, activation='linear', name='spot')(flat_do) #Takes input u, v, os model = keras.models.Model(inputs=[inputs1, inputs2, inputs3], outputs=[outputs]) model.compile( loss={'spot':'mse'}, optimizer=opt, metrics={'spot':tf.keras.metrics.MeanAbsoluteError()}, ) return model 更改模型加入CBAM模块
时间: 2023-09-29 20:05:42 浏览: 160
libaio1和libmecab2.zip
可以尝试在每个卷积层后加入CBAM模块,CBAM模块可以用来增强模型的注意力机制。
以下是修改后的代码:
```python
from keras.layers import GlobalAveragePooling2D, Reshape, Dense, multiply, Permute
def channel_attention(input_feature, ratio=8):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
channel = input_feature._keras_shape[channel_axis]
shared_layer_one = Dense(channel//ratio,
activation='relu',
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')
shared_layer_two = Dense(channel,
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')
avg_pool = GlobalAveragePooling2D()(input_feature)
avg_pool = Reshape((1,1,channel))(avg_pool)
assert avg_pool._keras_shape[1:] == (1,1,channel)
avg_pool = shared_layer_one(avg_pool)
assert avg_pool._keras_shape[1:] == (1,1,channel//ratio)
avg_pool = shared_layer_two(avg_pool)
assert avg_pool._keras_shape[1:] == (1,1,channel)
max_pool = GlobalMaxPooling2D()(input_feature)
max_pool = Reshape((1,1,channel))(max_pool)
assert max_pool._keras_shape[1:] == (1,1,channel)
max_pool = shared_layer_one(max_pool)
assert max_pool._keras_shape[1:] == (1,1,channel//ratio)
max_pool = shared_layer_two(max_pool)
assert max_pool._keras_shape[1:] == (1,1,channel)
cbam_feature = Add()([avg_pool,max_pool])
cbam_feature = Activation('sigmoid')(cbam_feature)
if K.image_data_format() == "channels_first":
cbam_feature = Permute((3, 1, 2))(cbam_feature)
return multiply([input_feature, cbam_feature])
def spatial_attention(input_feature):
kernel_size = 7
if K.image_data_format() == "channels_first":
channel = input_feature._keras_shape[1]
cbam_feature = Permute((2,3,1))(input_feature)
else:
channel = input_feature._keras_shape[-1]
cbam_feature = input_feature
avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature)
assert avg_pool._keras_shape[-1] == 1
max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature)
assert max_pool._keras_shape[-1] == 1
concat = Concatenate(axis=3)([avg_pool, max_pool])
assert concat._keras_shape[-1] == 2
cbam_feature = Conv2D(filters = 1,
kernel_size=kernel_size,
strides=1,
padding='same',
activation='sigmoid',
kernel_initializer='he_normal',
use_bias=False)(concat)
assert cbam_feature._keras_shape[-1] == 1
if K.image_data_format() == "channels_first":
cbam_feature = Permute((3, 1, 2))(cbam_feature)
return multiply([input_feature, cbam_feature])
def CBAM(input_feature):
cbam_feature = channel_attention(input_feature)
cbam_feature = spatial_attention(cbam_feature)
return cbam_feature
def MEAN_Spot_CBAM(opt): # channel 1
inputs1 = layers.Input(shape=(42,42,1))
conv1 = layers.Conv2D(3, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs1)
bn1 = layers.BatchNormalization()(conv1)
cbam1 = CBAM(bn1)
pool1 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(cbam1)
do1 = layers.Dropout(0.3)(pool1)
# channel 2
inputs2 = layers.Input(shape=(42,42,1))
conv2 = layers.Conv2D(3, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs2)
bn2 = layers.BatchNormalization()(conv2)
cbam2 = CBAM(bn2)
pool2 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(cbam2)
do2 = layers.Dropout(0.3)(pool2)
# channel 3
inputs3 = layers.Input(shape=(42,42,1))
conv3 = layers.Conv2D(8, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs3)
bn3 = layers.BatchNormalization()(conv3)
cbam3 = CBAM(bn3)
pool3 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(cbam3)
do3 = layers.Dropout(0.3)(pool3)
# merge 1
merged = layers.Concatenate()([do1, do2, do3])
# interpretation 1
merged_conv = layers.Conv2D(8, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.1))(merged)
merged_cbam = CBAM(merged_conv)
merged_pool = layers.MaxPooling2D(pool_size=(2, 2), padding='same', strides=(2,2))(merged_cbam)
flat = layers.Flatten()(merged_pool)
flat_do = layers.Dropout(0.2)(flat)
# outputs
outputs = layers.Dense(1, activation='linear', name='spot')(flat_do)
#Takes input u, v, os
model = keras.models.Model(inputs=[inputs1, inputs2, inputs3], outputs=[outputs])
model.compile(
loss={'spot':'mse'},
optimizer=opt,
metrics={'spot':tf.keras.metrics.MeanAbsoluteError()},
)
return model
```
阅读全文