transformer实例完整代码
时间: 2023-07-30 10:13:18 浏览: 588
以下是一个使用Transformer模型的完整示例代码:
```python
import torch
import torch.nn as nn
import torch.optim as optim
# 定义Transformer模型
class Transformer(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_heads):
super(Transformer, self).__init__()
self.embedding = nn.Embedding(input_size, hidden_size)
self.encoder = nn.TransformerEncoder(
nn.TransformerEncoderLayer(hidden_size, num_heads),
num_layers
)
self.decoder = nn.Linear(hidden_size, input_size)
def forward(self, src):
embedded = self.embedding(src)
encoded = self.encoder(embedded)
output = self.decoder(encoded)
return output
# 定义超参数
input_size = 100 # 输入词汇表的大小
hidden_size = 256 # 隐层的维度大小
num_layers = 4 # Transformer编码器的层数
num_heads = 8 # 注意力头的数量
# 创建模型实例
model = Transformer(input_size, hidden_size, num_layers, num_heads)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# 定义训练数据和标签
train_data = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
train_labels = torch.tensor([2, 3])
# 训练模型
num_epochs = 10
for epoch in range(num_epochs):
optimizer.zero_grad()
# 前向传播
output = model(train_data)
# 计算损失
loss = criterion(output, train_labels)
# 反向传播和优化
loss.backward()
optimizer.step()
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
# 使用模型进行预测
test_data = torch.tensor([[1, 2, 3, 4, 5]])
with torch.no_grad():
predictions = model(test_data)
_, predicted_labels = torch.max(predictions.data, 1)
print('Predicted labels:', predicted_labels.tolist())
```
注意:这只是一个简单的Transformer模型示例,您可以根据自己的需求进行修改和扩展。
阅读全文