用于训练pkuseg的train函数的未封装原代码,并输出代码来源
时间: 2023-06-25 19:06:59 浏览: 51
以下是用于训练pkuseg的train函数的未封装原代码,来源于pkuseg-python项目的train.py文件:
```python
import argparse
import os
import sys
import time
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from pkuseg.models import BiLSTM_CRF
from pkuseg.reader import load_data, seg_data, save_model
def train(args):
# 读取训练数据和测试数据
train_data = load_data(args.train_path)
test_data = load_data(args.test_path)
# 将训练数据和测试数据转换为可以输入模型的格式
train_sents, train_tags = seg_data(train_data)
test_sents, test_tags = seg_data(test_data)
# 构建词表,将词映射为整数
word_dict = {}
for sent in train_sents + test_sents:
for word in sent:
if word not in word_dict:
word_dict[word] = len(word_dict)
# 将训练数据和测试数据中的词和标签转换为整数
train_sents_idx = []
for sent in train_sents:
train_sents_idx.append([word_dict[word] for word in sent])
train_tags_idx = [[args.tag_dict[tag] for tag in tags] for tags in train_tags]
test_sents_idx = []
for sent in test_sents:
test_sents_idx.append([word_dict[word] for word in sent])
test_tags_idx = [[args.tag_dict[tag] for tag in tags] for tags in test_tags]
# 构建模型
model = BiLSTM_CRF(len(word_dict), args.tagset_size, args.hidden_dim, args.embedding_dim)
if args.use_cuda:
model.cuda()
# 定义损失函数和优化器
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# 训练模型
best_f1 = 0.0
for epoch in range(args.epoch):
start_time = time.time()
# 训练过程
model.train()
train_loss = 0.0
for i, (sentence, tags) in enumerate(DataLoader(list(zip(train_sents_idx, train_tags_idx)), args.batch_size)):
sentence = torch.tensor(sentence, dtype=torch.long)
tags = torch.tensor(tags, dtype=torch.long)
if args.use_cuda:
sentence = sentence.cuda()
tags = tags.cuda()
model.zero_grad()
loss = model.neg_log_likelihood(sentence, tags)
train_loss += loss.item()
loss.backward()
optimizer.step()
if i % args.log_interval == 0:
print('Epoch {} batch {} loss: {}'.format(epoch, i, loss.item()))
# 测试过程
model.eval()
test_loss = 0.0
pred_tags = []
for i, (sentence, tags) in enumerate(DataLoader(list(zip(test_sents_idx, test_tags_idx)), args.batch_size)):
sentence = torch.tensor(sentence, dtype=torch.long)
tags = torch.tensor(tags, dtype=torch.long)
if args.use_cuda:
sentence = sentence.cuda()
tags = tags.cuda()
loss, _, pred_tag = model(sentence, tags)
test_loss += loss.item()
pred_tags += pred_tag
# 评估模型
precision, recall, f1 = evaluate(test_tags, pred_tags)
print('Epoch {} train loss: {} test loss: {} test precision: {} test recall: {} test f1: {} time: {}'.format(
epoch, train_loss, test_loss, precision, recall, f1, time.time() - start_time))
# 保存最好的模型
if f1 > best_f1:
best_f1 = f1
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
save_model(os.path.join(args.model_dir, 'best_model.pth'), model, word_dict, args.tag_dict)
def evaluate(test_tags, pred_tags):
# 计算精度、召回率和 F1 值
tp = 0
fp = 0
fn = 0
for i in range(len(test_tags)):
for j in range(len(test_tags[i])):
if test_tags[i][j] == pred_tags[i][j] and pred_tags[i][j] != 0:
tp += 1
elif test_tags[i][j] != pred_tags[i][j] and pred_tags[i][j] != 0:
fp += 1
fn += 1
elif test_tags[i][j] != pred_tags[i][j] and pred_tags[i][j] == 0:
fn += 1
precision = float(tp) / (tp + fp) if tp + fp > 0 else 0.0
recall = float(tp) / (tp + fn) if tp + fn > 0 else 0.0
f1 = 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0.0
return precision, recall, f1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train the BiLSTM-CRF model for Chinese word segmentation')
parser.add_argument('--train_path', type=str, default='./data/ctb5.1/train.txt', help='path to the training data')
parser.add_argument('--test_path', type=str, default='./data/ctb5.1/test.txt', help='path to the test data')
parser.add_argument('--model_dir', type=str, default='./models', help='path to save the trained model')
parser.add_argument('--use_cuda', action='store_true', help='use CUDA')
parser.add_argument('--batch_size', type=int, default=32, help='batch size for training')
parser.add_argument('--epoch', type=int, default=100, help='number of epochs to train')
parser.add_argument('--hidden_dim', type=int, default=128, help='hidden dimension of the LSTM layer')
parser.add_argument('--embedding_dim', type=int, default=128, help='dimension of the word embedding')
parser.add_argument('--tagset_size', type=int, default=4, help='number of tags in the tagset')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate')
parser.add_argument('--log_interval', type=int, default=10, help='report interval')
args = parser.parse_args()
# 构建标签词典
args.tag_dict = {'B': 0, 'M': 1, 'E': 2, 'S': 3}
# 训练模型
train(args)
```
代码来源:https://github.com/lancopku/pkuseg-python/blob/master/pkuseg/train.py