python语言实现multi-head-self-attention示例的代码:
时间: 2023-09-21 14:06:05 浏览: 219
### 回答1:
以下是一个 Python 代码示例,用于实现 multi-head self-attention:
```python
import torch
import torch.nn as nn
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
self.depth = d_model // num_heads
self.query_linear = nn.Linear(d_model, d_model)
self.key_linear = nn.Linear(d_model, d_model)
self.value_linear = nn.Linear(d_model, d_model)
self.output_linear = nn.Linear(d_model, d_model)
def forward(self, query, key, value, mask=None):
batch_size = query.size()
# Linear transformations
query = self.query_linear(query)
key = self.key_linear(key)
value = self.value_linear(value)
# Split into heads
query = query.view(batch_size * self.num_heads, -1, self.depth)
key = key.view(batch_size * self.num_heads, -1, self.depth)
value = value.view(batch_size * self.num_heads, -1, self.depth)
# Transpose for matrix multiplication
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
# Calculate scores
scores = torch.matmul(query, key.transpose(-2, -1))
scores = scores / torch.sqrt(torch.tensor(self.depth).float())
# Apply mask (if provided)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == , -1e9)
# Softmax
attention_weights = nn.Softmax(dim=-1)(scores)
# Dropout
attention_weights = nn.Dropout(p=.1)(attention_weights)
# Multiply by values
context = torch.matmul(attention_weights, value)
# Reshape and concatenate
context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.num_heads * self.depth)
# Linear transformation
output = self.output_linear(context)
return output
```
希望对你有所帮助!
### 回答2:
下面是使用Python语言实现multi-head self-attention的一个示例代码:
```
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadSelfAttention(nn.Module):
def __init__(self, d_model, num_heads):
super(MultiHeadSelfAttention, self).__init__()
self.num_heads = num_heads
self.d_head = d_model // num_heads
self.fc_query = nn.Linear(d_model, d_model)
self.fc_key = nn.Linear(d_model, d_model)
self.fc_value = nn.Linear(d_model, d_model)
self.fc_concat = nn.Linear(d_model, d_model)
def forward(self, x):
batch_size, seq_len, d_model = x.size()
h = self.num_heads
# Split input into multiple heads
query = self.fc_query(x).view(batch_size, seq_len, h, self.d_head)
key = self.fc_key(x).view(batch_size, seq_len, h, self.d_head)
value = self.fc_value(x).view(batch_size, seq_len, h, self.d_head)
# Compute attention scores
scores = torch.matmul(query, key.transpose(-2, -1)) / (self.d_head ** 0.5)
attn_weights = F.softmax(scores, dim=-1)
# Apply attention weights to value vectors
attended_values = torch.matmul(attn_weights, value)
attended_values = attended_values.transpose(1, 2).contiguous().view(batch_size, seq_len, -1)
# Concatenate and linearly transform attended values
output = self.fc_concat(attended_values)
return output
# 使用示例
d_model = 128
num_heads = 8
seq_len = 10
batch_size = 4
input_tensor = torch.randn(batch_size, seq_len, d_model)
attention = MultiHeadSelfAttention(d_model, num_heads)
output = attention(input_tensor)
print("Input Shape: ", input_tensor.shape)
print("Output Shape: ", output.shape)
```
上述代码定义了一个`MultiHeadSelfAttention`的类,其中`forward`函数实现了multi-head self-attention的计算过程。在使用示例中,我们输入一个大小为`(batch_size, seq_len, d_model)`的张量,经过multi-head self-attention计算后输出一个大小为`(batch_size, seq_len, d_model)`的张量。其中`d_model`表示输入的特征维度,`num_heads`表示attention头的数量。
### 回答3:
下面是使用Python实现multi-head self-attention示例的代码:
```python
import torch
import torch.nn as nn
class MultiHeadSelfAttention(nn.Module):
def __init__(self, embed_size, num_heads):
super(MultiHeadSelfAttention, self).__init__()
self.embed_size = embed_size
self.num_heads = num_heads
self.head_size = embed_size // num_heads
self.query = nn.Linear(embed_size, embed_size)
self.key = nn.Linear(embed_size, embed_size)
self.value = nn.Linear(embed_size, embed_size)
self.out = nn.Linear(embed_size, embed_size)
def forward(self, x):
batch_size, seq_len, embed_size = x.size()
# Split the embedding into num_heads and reshape
x = x.view(batch_size, seq_len, self.num_heads, self.head_size)
x = x.permute(0, 2, 1, 3)
# Apply linear transformations to obtain query, key, and value
query = self.query(x)
key = self.key(x)
value = self.value(x)
# Compute scaled dot product attention scores
scores = torch.matmul(query, key.permute(0, 1, 3, 2))
scores = scores / self.head_size**0.5
# Apply softmax to obtain attention probabilities
attn_probs = nn.Softmax(dim=-1)(scores)
# Apply attention weights to value and sum across heads
attended = torch.matmul(attn_probs, value)
attended = attended.permute(0, 2, 1, 3)
attended = attended.contiguous().view(batch_size, seq_len, self.embed_size)
# Apply output linear transformation
output = self.out(attended)
return output
```
上述代码中定义了一个名为MultiHeadSelfAttention的类,继承自nn.Module,可以通过指定嵌入大小(embed_size)和头部数量(num_heads)来创建多头自注意力层。在前向传播方法forward中,先通过线性变换将输入张量分别变换为查询(query)、键(key)和值(value)张量。然后计算缩放点积注意力得分,将其作为注意力概率经过softmax函数进行归一化。通过注意力概率权重对值进行加权求和,并应用线性变换得到最终的输出张量。最后返回输出张量。
阅读全文