将本地词向量应用到简单的情感分类任务中,并且计算其准确率的pytorch代码
时间: 2024-02-13 14:05:28 浏览: 125
好的,以下是一个简单的示例代码:
```python
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
# 定义情感分类模型
class SentimentClassifier(nn.Module):
def __init__(self, embedding_dim, hidden_dim, output_dim):
super().__init__()
self.embedding = nn.Embedding(num_embeddings=VOCAB_SIZE, embedding_dim=embedding_dim)
self.rnn = nn.LSTM(embedding_dim, hidden_dim, bidirectional=True)
self.fc = nn.Linear(hidden_dim*2, output_dim)
def forward(self, text):
embedded = self.embedding(text) # text shape: [seq_len, batch_size]
output, (hidden, cell) = self.rnn(embedded)
hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1)
return self.fc(hidden)
# 定义训练函数
def train(model, iterator, optimizer, criterion):
model.train()
epoch_loss = 0
epoch_acc = 0
for batch in iterator:
optimizer.zero_grad()
text, labels = batch
predictions = model(text).squeeze(1)
loss = criterion(predictions, labels)
acc = binary_accuracy(predictions, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# 定义评估函数
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
epoch_acc = 0
with torch.no_grad():
for batch in iterator:
text, labels = batch
predictions = model(text).squeeze(1)
loss = criterion(predictions, labels)
acc = binary_accuracy(predictions, labels)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# 定义计算准确率函数
def binary_accuracy(preds, y):
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float()
acc = correct.sum() / len(correct)
return acc
# 加载本地词向量
with open('vectors.txt', 'r') as f:
words = []
vectors = []
for line in f:
word, *vector = line.split()
words.append(word)
vectors.append(vector)
vectors = np.asarray(vectors, dtype=np.float32)
# 将词向量转换为 PyTorch tensor
vectors = torch.from_numpy(vectors)
# 定义数据集
TEXT = torchtext.data.Field(tokenize='spacy')
LABEL = torchtext.data.LabelField(dtype=torch.float)
train_data, test_data = torchtext.datasets.IMDB.splits(TEXT, LABEL)
# 构建词汇表
TEXT.build_vocab(train_data, vectors=vectors)
LABEL.build_vocab(train_data)
# 定义超参数
EMBEDDING_DIM = vectors.shape[1]
HIDDEN_DIM = 256
OUTPUT_DIM = 1
BATCH_SIZE = 64
# 构建迭代器
train_iterator, test_iterator = torchtext.data.BucketIterator.splits(
(train_data, test_data),
batch_size=BATCH_SIZE,
device=device)
# 初始化模型、优化器和损失函数
model = SentimentClassifier(EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()
# 训练模型
N_EPOCHS = 5
for epoch in range(N_EPOCHS):
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, test_iterator, criterion)
print(f'Epoch: {epoch+1:02}')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
```
在这个代码中,我们加载了一个本地词向量文件 "vectors.txt",并将其应用到一个简单的情感分类任务中。我们使用 PyTorch 的 TorchText 库来构建数据集和迭代器,并且使用 PyTorch 自带的 LSTM 模型来进行情感分类。我们使用 Adam 优化器和二元交叉熵损失函数来训练模型,并且计算每个 epoch 的训练损失、训练准确率、验证损失和验证准确率。
阅读全文