用python基于神经网络torch的IMDB数据集文本分类
时间: 2024-05-11 10:20:00 浏览: 169
基于torch.nn的回归、二分类、多分类任务代码
首先需要安装需要的库:
```python
pip install torch
pip install torchtext
```
然后可以使用以下代码实现IMDB数据集的文本分类:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import IMDB
from torchtext.data import Field, LabelField, BucketIterator
from torchtext.vocab import GloVe
# 设置随机种子
seed = 1234
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# 定义Field
TEXT = Field(tokenize='spacy', tokenizer_language='en_core_web_sm')
LABEL = LabelField(dtype=torch.float)
# 加载IMDB数据集
train_data, test_data = IMDB.splits(TEXT, LABEL)
# 构建词表
TEXT.build_vocab(train_data, vectors=GloVe(name='6B', dim=300))
LABEL.build_vocab(train_data)
# 定义模型
class Net(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, dropout):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.fc1 = nn.Linear(embedding_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
embedded = self.embedding(text)
embedded = embedded.mean(dim=0)
hidden = self.dropout(torch.relu(self.fc1(embedded)))
output = self.fc2(hidden)
return output
# 定义超参数
BATCH_SIZE = 64
EMBEDDING_DIM = 300
HIDDEN_DIM = 256
OUTPUT_DIM = 1
DROPOUT = 0.5
N_EPOCHS = 10
# 定义模型、优化器和损失函数
model = Net(len(TEXT.vocab), EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, DROPOUT)
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()
# 将模型和数据集迁移到GPU上
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
train_data, test_data = train_data.to(device), test_data.to(device)
TEXT.vocab.vectors = TEXT.vocab.vectors.to(device)
# 定义训练和测试函数
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
text, label = batch.text, batch.label
predictions = model(text).squeeze(1)
loss = criterion(predictions, label)
acc = binary_accuracy(predictions, label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
text, label = batch.text, batch.label
predictions = model(text).squeeze(1)
loss = criterion(predictions, label)
acc = binary_accuracy(predictions, label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def binary_accuracy(predictions, label):
rounded_preds = torch.round(torch.sigmoid(predictions))
correct = (rounded_preds == label).float()
acc = correct.sum() / len(correct)
return acc
# 定义主函数
def main():
train_iterator, test_iterator = BucketIterator.splits((train_data, test_data), batch_size=BATCH_SIZE, device=device)
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, test_iterator, criterion)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'imdb-model.pt')
print(f'Epoch: {epoch+1:02}')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
# 加载最佳模型并测试
model.load_state_dict(torch.load('imdb-model.pt'))
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
if __name__ == '__main__':
main()
```
这段代码中,我们使用了torchtext库中的IMDB数据集,并且使用了预训练的GloVe词向量作为初始的单词嵌入。模型结构采用了简单的全连接神经网络,并且在训练和测试过程中使用了Adam优化器和二元交叉熵损失函数。
阅读全文