用transformer进行sendiment-analysis代码
时间: 2023-07-24 13:08:34 浏览: 119
以下是使用Transformer进行情感分析的示例代码:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.data import Field, TabularDataset, BucketIterator
# 设置随机种子
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# 定义字段
TEXT = Field(tokenize='spacy', lower=True)
LABEL = Field(sequential=False, dtype=torch.float)
# 加载数据集
train_data, test_data = TabularDataset.splits(
path='path/to/data',
train='train.csv',
test='test.csv',
format='csv',
fields=[('text', TEXT), ('label', LABEL)]
)
# 构建词汇表
TEXT.build_vocab(train_data, max_size=10000, vectors='glove.6B.100d')
LABEL.build_vocab(train_data)
# 创建迭代器
BATCH_SIZE = 64
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, test_iterator = BucketIterator.splits(
(train_data, test_data),
batch_size=BATCH_SIZE,
device=device
)
# 定义Transformer模型
class Transformer(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, n_layers, n_heads, pf_dim, dropout):
super().__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim)
self.encoder = nn.TransformerEncoder(
nn.TransformerEncoderLayer(embedding_dim, n_heads, pf_dim, dropout),
num_layers=n_layers
)
self.fc = nn.Linear(embedding_dim, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(hidden_dim, output_dim)
def forward(self, src):
embedded = self.dropout(self.embedding(src))
encoded = self.encoder(embedded)
pooled = encoded.mean(dim=0)
hidden = self.dropout(torch.relu(self.fc(pooled)))
output = self.out(hidden)
return output
# 初始化模型
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = 1
N_LAYERS = 2
N_HEADS = 8
PF_DIM = 512
DROPOUT = 0.5
model = Transformer(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, N_HEADS, PF_DIM, DROPOUT).to(device)
# 定义损失函数和优化器
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters())
# 定义训练和评估函数
def train(model, iterator, optimizer, criterion):
model.train()
epoch_loss = 0
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for batch in iterator:
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# 训练模型
N_EPOCHS = 10
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
train_loss = train(model, train_iterator, optimizer, criterion)
valid_loss = evaluate(model, test_iterator, criterion)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'sentiment_model.pt')
# 加载模型并进行预测
model.load_state_dict(torch.load('sentiment_model.pt'))
model.eval()
def predict_sentiment(sentence):
model.eval()
tokenized = [tok.text for tok in nlp.tokenizer(sentence)]
indexed = [TEXT.vocab.stoi[t] for t in tokenized]
tensor = torch.LongTensor(indexed).unsqueeze(1).to(device)
prediction = torch.sigmoid(model(tensor))
return prediction.item()
```
请确保将代码中的`path/to/data`替换为包含您数据集的路径。此外,您还需要下载预训练的GloVe词向量并将其放入正确的路径,以便在构建词汇表时使用。
阅读全文