python中的Transformer应用代码举例
时间: 2023-11-14 16:27:12 浏览: 108
下面是一个使用PyTorch实现Transformer模型进行文本分类的代码示例:
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import IMDB
from torchtext.data import Field, LabelField, BucketIterator
# 定义Field
text_field = Field(tokenize='spacy', lower=True)
label_field = LabelField(dtype=torch.float)
# 加载数据集
train_data, test_data = IMDB.splits(text_field, label_field)
text_field.build_vocab(train_data, max_size=10000, vectors='glove.6B.100d')
label_field.build_vocab(train_data)
# 定义模型
class TransformerClassifier(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, num_layers, num_heads, dropout):
super(TransformerClassifier, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.transformer = nn.TransformerEncoder(nn.TransformerEncoderLayer(embedding_dim, num_heads, hidden_dim, dropout), num_layers)
self.fc = nn.Linear(embedding_dim, 1)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
embedded = self.embedding(text)
embedded = embedded.permute(1, 0, 2)
encoded = self.transformer(embedded)
pooled = encoded.mean(dim=0)
output = self.fc(pooled)
return self.dropout(output)
# 定义超参数
vocab_size = len(text_field.vocab)
embedding_dim = 100
hidden_dim = 256
num_layers = 6
num_heads = 8
dropout = 0.2
batch_size = 32
# 初始化模型和优化器
model = TransformerClassifier(vocab_size, embedding_dim, hidden_dim, num_layers, num_heads, dropout)
optimizer = optim.Adam(model.parameters(), lr=1e-4)
criterion = nn.BCEWithLogitsLoss()
# 定义迭代器
train_iter, test_iter = BucketIterator.splits((train_data, test_data), batch_size=batch_size)
# 训练模型
for epoch in range(10):
for batch in train_iter:
text, label = batch.text, batch.label
optimizer.zero_grad()
output = model(text).squeeze(1)
loss = criterion(output, label)
loss.backward()
optimizer.step()
print(f'Epoch: {epoch+1}, Loss: {loss.item():.4f}')
# 评估模型
with torch.no_grad():
correct = 0
total = 0
for batch in test_iter:
text, label = batch.text, batch.label
output = model(text).squeeze(1)
predicted = torch.round(torch.sigmoid(output))
total += label.size(0)
correct += (predicted == label).sum().item()
print(f'Accuracy: {100*correct/total:.2f}%')
在这个代码中,我们使用了torchtext库加载IMDB数据集,并使用Field和LabelField定义了text和label的处理方法。然后,我们定义了一个TransformerClassifier类,使用nn.TransformerEncoder和nn.Linear搭建了Transformer模型,并定义了forward方法用于进行前向传播。在训练时,我们使用Adam优化器和BCEWithLogitsLoss损失函数,并使用BucketIterator.splits定义了训练和测试数据的迭代器。最后,我们进行了10次epoch的训练,并在测试集上进行了模型评估,输出了准确率的结果。
相关推荐
















