IMDB transformer情感分析
时间: 2023-12-21 19:32:16 浏览: 133
以下是使用Transformer模型进行IMDB情感分析的代码示例:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import IMDB
from torchtext.data import Field, LabelField, BucketIterator
# 定义字段
TEXT = Field(tokenize='spacy', lower=True)
LABEL = LabelField(dtype=torch.float)
# 加载数据集
train_data, test_data = IMDB.splits(TEXT, LABEL)
# 构建词汇表
TEXT.build_vocab(train_data, max_size=25000, vectors="glove.6B.100d")
LABEL.build_vocab(train_data)
# 创建迭代器
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, test_iterator = BucketIterator.splits(
(train_data, test_data),
batch_size=64,
device=device
)
# 定义Transformer模型
class TransformerModel(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, n_layers, n_heads, dropout):
super().__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim)
self.transformer = nn.Transformer(
embedding_dim, n_heads, n_layers, n_layers, hidden_dim, dropout
)
self.fc = nn.Linear(embedding_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
embedded = self.dropout(self.embedding(text))
embedded = embedded.permute(1, 0, 2)
output = self.transformer(embedded)
output = output.permute(1, 0, 2)
return self.fc(output[-1])
# 初始化模型和优化器
input_dim = len(TEXT.vocab)
embedding_dim = 100
hidden_dim = 256
output_dim = 1
n_layers = 2
n_heads = 4
dropout = 0.5
model = TransformerModel(input_dim, embedding_dim, hidden_dim, output_dim, n_layers, n_heads, dropout).to(device)
optimizer = optim.Adam(model.parameters())
# 定义损失函数
criterion = nn.BCEWithLogitsLoss()
# 训练模型
def train(model, iterator, optimizer, criterion):
model.train()
epoch_loss = 0
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# 测试模型
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for batch in iterator:
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# 训练和测试模型
N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
train_loss = train(model, train_iterator, optimizer, criterion)
valid_loss = evaluate(model, test_iterator, criterion)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'transformer_model.pt')
# 加载最佳模型并进行预测
model.load_state_dict(torch.load('transformer_model.pt'))
def predict_sentiment(model, sentence):
model.eval()
tokenized = [tok.text for tok in nlp.tokenizer(sentence)]
indexed = [TEXT.vocab.stoi[t] for t in tokenized]
tensor = torch.LongTensor(indexed).to(device)
tensor = tensor.unsqueeze(1)
prediction = torch.sigmoid(model(tensor))
return prediction.item()
# 示例预测
positive_review = "This movie is great!"
negative_review = "This movie is terrible!"
print(predict_sentiment(model, positive_review)) # 输出:大于0.5的正面情感极性
print(predict_sentiment(model, negative_review)) # 输出:小于0.5的负面情感极性
```
阅读全文