class SelfAttention(nn.Module): def init(self, input_size=1, num_heads=1): super(SelfAttention, self).init() self.num_heads = 1 self.head_size = 1 self.query = nn.Linear(1, 1) self.key = nn.Linear(1, 1) self.value = nn.Linear(1, 1) self.out = nn.Linear(1, 1) def forward(self, inputs): batch_size, seq_len, input_size = inputs.size() # 128 706 1 # Split inputs into num_heads inputs = inputs.view(batch_size, seq_len, self.num_heads, self.head_size) inputs = inputs.permute(0, 2, 1, 3).contiguous() queries = self.query(inputs).view(batch_size, self.num_heads, seq_len, self.head_size) keys = self.key(inputs).view(batch_size, self.num_heads, seq_len, self.head_size) values = self.value(inputs).view(batch_size, self.num_heads, seq_len, self.head_size) # Compute attention scores scores = torch.matmul(queries, keys.permute(0, 1, 3, 2)) scores = scores / (self.head_size ** 0.5) attention = F.softmax(scores, dim=-1) # Apply attention weights to values attention_output = torch.matmul(attention, values) attention_output = attention_output.view(batch_size, seq_len, input_size) # Apply output linear layer output = self.out(attention_output) return output class DenseAttentionLayer(nn.Module): def init(self, input_size, return_alphas=True, name=None, num_heads=1): super(DenseAttentionLayer, self).init() self.return_alphas = return_alphas self.name = name self.num_heads = num_heads # If input comes with a hidden dimension (e.g. 5 features per gene) # print("len(input_size): ",len(input_size)) # 2 if len(input_size) == 3: self.feature_collapse = nn.Linear(input_size[-1], 1) input_size = (input_size[0], input_size[1]) self.attention = SelfAttention(input_size=1, num_heads=1) def forward(self, inputs): print("inputs.shape: ",inputs.shape) # torch.Size([128, 706]) output = self.attention(inputs) if self.return_alphas: alphas = F.softmax(output, dim=1) return torch.mul(inputs, alphas), alphas else: return output 对于上述代码其中numheads=1 headsize=1
时间: 2023-11-16 20:03:48 浏览: 227
adonisjs-cerberus:保护您的API端点的地狱猎犬...:smiling_face_with_horns:
这段代码实现了一个自注意力层(Self-Attention Layer)和一个稠密注意力层(Dense Attention Layer)。
在自注意力层中,输入被划分为多个头(num_heads),每个头的大小为head_size。然后,通过三个线性层(query、key、value)将输入映射到查询(queries)、键(keys)和值(values)空间,并计算注意力分数(scores),再通过softmax函数计算权重(attention),最后将权重与值相乘得到输出(attention_output)。
在稠密注意力层中,首先将输入进行特征折叠(feature collapse),将第三个维度(如果存在)折叠为1。然后,将折叠后的输入送入自注意力层,得到输出。如果return_alphas为True,则也返回注意力权重。
需要注意的是,这里的num_heads和head_size都被设置为1,因此实际上并没有使用多头自注意力机制。
阅读全文