transformer代码
时间: 2023-09-03 11:09:25 浏览: 129
Transformer代码
5星 · 资源好评率100%
以下是一个简单的Transformer模型的代码示例,用PyTorch实现:
```python
import torch
import torch.nn as nn
class Transformer(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, num_heads):
super().__init__()
self.embedding = nn.Embedding(input_dim, hidden_dim)
self.pos_encoding = PositionalEncoding(hidden_dim)
self.transformer_layers = nn.ModuleList([
TransformerLayer(hidden_dim, num_heads)
for _ in range(num_layers)
])
self.fc = nn.Linear(hidden_dim, input_dim)
def forward(self, x):
x = self.embedding(x)
x = self.pos_encoding(x)
for transformer_layer in self.transformer_layers:
x = transformer_layer(x)
x = self.fc(x)
return x
class TransformerLayer(nn.Module):
def __init__(self, hidden_dim, num_heads):
super().__init__()
self.multihead_attention = MultiheadAttention(hidden_dim, num_heads)
self.norm1 = nn.LayerNorm(hidden_dim)
self.feedforward = Feedforward(hidden_dim)
self.norm2 = nn.LayerNorm(hidden_dim)
def forward(self, x):
x = x + self.multihead_attention(x)
x = self.norm1(x)
x = x + self.feedforward(x)
x = self.norm2(x)
return x
class MultiheadAttention(nn.Module):
def __init__(self, hidden_dim, num_heads):
super().__init__()
self.hidden_dim = hidden_dim
self.num_heads = num_heads
self.query_linear = nn.Linear(hidden_dim, hidden_dim)
self.key_linear = nn.Linear(hidden_dim, hidden_dim)
self.value_linear = nn.Linear(hidden_dim, hidden_dim)
self.output_linear = nn.Linear(hidden_dim, hidden_dim)
def forward(self, x):
batch_size = x.shape[0]
query = self.query_linear(x)
key = self.key_linear(x)
value = self.value_linear(x)
query = query.view(batch_size, -1, self.num_heads, self.hidden_dim // self.num_heads)
key = key.view(batch_size, -1, self.num_heads, self.hidden_dim // self.num_heads)
value = value.view(batch_size, -1, self.num_heads, self.hidden_dim // self.num_heads)
query = query.permute(0, 2, 1, 3)
key = key.permute(0, 2, 1, 3)
value = value.permute(0, 2, 1, 3)
scores = torch.matmul(query, key.transpose(-2, -1)) / (self.hidden_dim // self.num_heads) ** 0.5
attention_weights = torch.softmax(scores, dim=-1)
weighted_values = torch.matmul(attention_weights, value)
weighted_values = weighted_values.permute(0, 2, 1, 3).contiguous()
weighted_values = weighted_values.view(batch_size, -1, self.hidden_dim)
output = self.output_linear(weighted_values)
return output
class Feedforward(nn.Module):
def __init__(self, hidden_dim):
super().__init__()
self.fc1 = nn.Linear(hidden_dim, hidden_dim * 4)
self.fc2 = nn.Linear(hidden_dim * 4, hidden_dim)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
class PositionalEncoding(nn.Module):
def __init__(self, hidden_dim, max_len=512):
super().__init__()
self.hidden_dim = hidden_dim
pe = torch.zeros(max_len, hidden_dim)
position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(torch.arange(0, hidden_dim, 2).float() * (-math.log(10000.0) / hidden_dim))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x * math.sqrt(self.hidden_dim)
seq_len = x.shape[1]
x = x + self.pe[:, :seq_len, :]
return x
```
该模型包括Transformer层、多头注意力机制、前向神经网络以及位置编码器。在forward方法中,我们首先将输入转换成嵌入向量,然后加上位置编码器。接下来,我们通过多层Transformer层处理输入。最后,我们将输出传递给全连接层,以获得最终的预测结果。
阅读全文