给我一个用transformer实现文本情感分析的案例的代码
时间: 2023-04-04 16:04:57 浏览: 122
以下是一个使用Transformer实现文本情感分析的案例代码:
```
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchtext.datasets import IMDB
from torchtext.data import Field, LabelField, BucketIterator
# 定义Field
TEXT = Field(tokenize='spacy', tokenizer_language='en_core_web_sm', include_lengths=True)
LABEL = LabelField(dtype=torch.float)
# 加载IMDB数据集
train_data, test_data = IMDB.splits(TEXT, LABEL)
# 构建词汇表
TEXT.build_vocab(train_data, max_size=25000, vectors='glove.6B.100d')
LABEL.build_vocab(train_data)
# 定义模型
class TransformerModel(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, dropout):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.transformer = nn.TransformerEncoder(nn.TransformerEncoderLayer(embedding_dim, nhead=8, dim_feedforward=hidden_dim, dropout=dropout), num_layers=n_layers)
self.fc = nn.Linear(embedding_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text, text_lengths):
embedded = self.dropout(self.embedding(text))
embedded = embedded.permute(1, 0, 2)
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths)
packed_output = self.transformer(packed_embedded)
output, _ = nn.utils.rnn.pad_packed_sequence(packed_output)
output = output.permute(1, 0, 2)
pooled = F.avg_pool2d(output, (output.shape[1], 1)).squeeze(1)
return self.fc(pooled)
# 定义超参数
vocab_size = len(TEXT.vocab)
embedding_dim = 100
hidden_dim = 256
output_dim = 1
n_layers = 2
dropout = 0.5
# 初始化模型和优化器
model = TransformerModel(vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, dropout)
optimizer = optim.Adam(model.parameters())
# 定义损失函数
criterion = nn.BCEWithLogitsLoss()
# 将数据集分批次
BATCH_SIZE = 64
train_iterator, test_iterator = BucketIterator.splits((train_data, test_data), batch_size=BATCH_SIZE)
# 训练模型
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
text, text_lengths = batch.text
optimizer.zero_grad()
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
acc = ((predictions >= 0.5).float() == batch.label).float().mean()
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# 测试模型
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
acc = ((predictions >= 0.5).float() == batch.label).float().mean()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, test_iterator, criterion)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut6-model.pt')
print(f'Epoch: {epoch+1:02}')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
```
希望这个代码对你有所帮助!
阅读全文