cuda = True if torch.cuda.is_available() else False dev = torch.device("cpu") if not cuda else torch.device("cuda") print(f'Training on {dev} device') model = model.to(dev)是什么意思
时间: 2023-09-15 15:16:22 浏览: 85
这段代码用于设置训练设备(CPU或GPU)并将模型移动到对应的设备上进行训练。
首先,通过调用`torch.cuda.is_available()`函数判断是否存在可用的GPU设备。如果存在可用的GPU,则将`cuda`变量设置为`True`,否则设置为`False`。
接下来,通过条件判断语句,如果`cuda`为`False`(即没有可用的GPU),则将`dev`设备设置为CPU设备;如果`cuda`为`True`(即有可用的GPU),则将`dev`设备设置为CUDA设备。
然后,使用字符串格式化将训练设备信息打印输出到控制台。
最后,使用`model.to(dev)`将模型移动到指定的设备上。这样可以确保模型在训练过程中使用相应的设备进行计算,以提高计算效率和性能。
这段代码的作用是根据是否有可用的GPU设备,确定训练所使用的设备(CPU或GPU),并将模型移动到对应的设备上进行训练。
相关问题
https://github.com/weizhepei/CasRel中run.py解读
`run.py` 是 `CasRel` 项目的入口文件,用于训练和测试模型。以下是 `run.py` 的主要代码解读和功能说明:
### 导入依赖包和模块
首先,`run.py` 导入了所需的依赖包和模块,包括 `torch`、`numpy`、`argparse`、`logging` 等。
```python
import argparse
import logging
import os
import random
import time
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from casrel import CasRel
from dataset import RE_Dataset
from utils import init_logger, load_tokenizer, set_seed, collate_fn
```
### 解析命令行参数
接下来,`run.py` 解析了命令行参数,包括训练数据路径、模型保存路径、预训练模型路径、学习率等参数。
```python
def set_args():
parser = argparse.ArgumentParser()
parser.add_argument("--train_data", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--dev_data", default=None, type=str, required=True,
help="The input development data file (a text file).")
parser.add_argument("--test_data", default=None, type=str, required=True,
help="The input testing data file (a text file).")
parser.add_argument("--model_path", default=None, type=str, required=True,
help="Path to save, load model")
parser.add_argument("--pretrain_path", default=None, type=str,
help="Path to pre-trained model")
parser.add_argument("--vocab_path", default=None, type=str, required=True,
help="Path to vocabulary")
parser.add_argument("--batch_size", default=32, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--gradient_accumulation_steps", default=1, type=int,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=3, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--max_seq_length", default=256, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Linear warmup over warmup_steps.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--logging_steps", type=int, default=500,
help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500,
help="Save checkpoint every X updates steps.")
parser.add_argument("--seed", type=int, default=42,
help="random seed for initialization")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
help="selected device (default: cuda if available)")
args = parser.parse_args()
return args
```
### 加载数据和模型
接下来,`run.py` 加载了训练、验证和测试数据,以及 `CasRel` 模型。
```python
def main():
args = set_args()
init_logger()
set_seed(args)
tokenizer = load_tokenizer(args.vocab_path)
train_dataset = RE_Dataset(args.train_data, tokenizer, args.max_seq_length)
dev_dataset = RE_Dataset(args.dev_data, tokenizer, args.max_seq_length)
test_dataset = RE_Dataset(args.test_data, tokenizer, args.max_seq_length)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size,
collate_fn=collate_fn)
dev_sampler = SequentialSampler(dev_dataset)
dev_dataloader = DataLoader(dev_dataset, sampler=dev_sampler, batch_size=args.batch_size,
collate_fn=collate_fn)
test_sampler = SequentialSampler(test_dataset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.batch_size,
collate_fn=collate_fn)
model = CasRel(args)
if args.pretrain_path:
model.load_state_dict(torch.load(args.pretrain_path, map_location="cpu"))
logging.info(f"load pre-trained model from {args.pretrain_path}")
model.to(args.device)
```
### 训练模型
接下来,`run.py` 开始训练模型,包括前向传播、反向传播、梯度更新等步骤。
```python
optimizer = torch.optim.Adam([{'params': model.bert.parameters(), 'lr': args.learning_rate},
{'params': model.subject_fc.parameters(), 'lr': args.learning_rate},
{'params': model.object_fc.parameters(), 'lr': args.learning_rate},
{'params': model.predicate_fc.parameters(), 'lr': args.learning_rate},
{'params': model.linear.parameters(), 'lr': args.learning_rate}],
lr=args.learning_rate, eps=args.adam_epsilon, weight_decay=args.weight_decay)
total_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
warmup_steps = int(total_steps * args.warmup_proportion)
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda epoch: 1 / (1 + 0.05 * (epoch - 1))
)
global_step = 0
best_f1 = 0
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_dataloader):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"subj_pos": batch[3],
"obj_pos": batch[4],
"subj_type": batch[5],
"obj_type": batch[6],
"subj_label": batch[7],
"obj_label": batch[8],
"predicate_label": batch[9],
}
outputs = model(**inputs)
loss = outputs[0]
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
global_step += 1
if global_step % args.logging_steps == 0:
logging.info(f"Epoch:[{epoch + 1}]/[{args.num_train_epochs}] Step:[{global_step}] "
f"Train loss:{loss.item():.6f}")
if global_step % args.save_steps == 0:
f1 = evaluate(model, dev_dataloader, args)
if f1 > best_f1:
best_f1 = f1
torch.save(model.state_dict(), os.path.join(args.model_path, "best_model.bin"))
logging.info(f"Save model at step [{global_step}] with best f1 {best_f1:.4f}")
```
### 测试模型
最后,`run.py` 对模型进行测试,输出模型在测试集上的预测结果。
```python
model.load_state_dict(torch.load(os.path.join(args.model_path, "best_model.bin"), map_location="cpu"))
logging.info(f"load best model from {os.path.join(args.model_path, 'best_model.bin')}")
f1, precision, recall = evaluate(model, test_dataloader, args)
logging.info(f"Test f1:{f1:.4f} precision:{precision:.4f} recall:{recall:.4f}")
```
以上就是 `run.py` 的主要代码解读和功能说明。
可以将seg.train转化成未封装好的代码吗
当然可以,以下是将 `seg.train` 转化为未封装的代码的步骤:
1. 导入相关库
```python
import json
import random
from collections import Counter
from functools import partial
from pathlib import Path
from typing import List, Tuple
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
```
2. 准备数据
```python
def load_data(file_path: str) -> List[Tuple[List[str], List[str]]]:
"""加载数据"""
with open(file_path, "r", encoding="utf-8") as f:
data = f.read().splitlines()
data = [line.split("\t") for line in data]
return [(text.split(), label.split()) for text, label in data]
train_data = load_data("train.txt")
valid_data = load_data("dev.txt")
test_data = load_data("test.txt")
```
3. 构建词表
```python
def build_vocab(data: List[Tuple[List[str], List[str]]], min_freq: int = 2) -> Tuple[Dict[str, int], Dict[int, str]]:
"""构建词表"""
counter = Counter([word for text, _ in data for word in text])
counter = {word: freq for word, freq in counter.items() if freq >= min_freq}
word_list = sorted(list(counter.keys()))
word2id = {"[PAD]": 0, "[UNK]": 1, "[CLS]": 2, "[SEP]": 3}
for word in word_list:
word2id[word] = len(word2id)
id2word = {id: word for word, id in word2id.items()}
return word2id, id2word
word2id, id2word = build_vocab(train_data)
```
4. 定义数据处理函数
```python
def process_data(data: List[Tuple[List[str], List[str]]], word2id: Dict[str, int]) -> List[Tuple[List[int], List[int]]]:
"""数据处理函数"""
return [(torch.tensor([word2id.get(word, word2id["[UNK]"]) for word in text]), torch.tensor([int(label) for label in labels])) for text, labels in data]
train_data = process_data(train_data, word2id)
valid_data = process_data(valid_data, word2id)
test_data = process_data(test_data, word2id)
```
5. 定义数据集和数据加载器
```python
class SegDataset(Dataset):
"""分词数据集"""
def __init__(self, data: List[Tuple[List[int], List[int]]]):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
return self.data[index]
def collate_fn(batch: List[Tuple[torch.Tensor, torch.Tensor]]) -> Tuple[torch.Tensor, torch.Tensor]:
"""数据处理函数"""
texts = [item[0] for item in batch]
labels = [item[1] for item in batch]
max_len = max([len(text) for text in texts])
texts = [torch.cat([text, torch.tensor([0] * (max_len - len(text)))]) for text in texts]
labels = [torch.cat([label, torch.tensor([-1] * (max_len - len(label)))]) for label in labels]
mask = torch.tensor([[1] * len(text) + [0] * (max_len - len(text)) for text in texts])
return torch.stack(texts), torch.stack(labels), mask
train_dataset = SegDataset(train_data)
valid_dataset = SegDataset(valid_data)
test_dataset = SegDataset(test_data)
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True, collate_fn=collate_fn)
valid_loader = DataLoader(valid_dataset, batch_size=16, shuffle=False, collate_fn=collate_fn)
test_loader = DataLoader(test_dataset, batch_size=16, shuffle=False, collate_fn=collate_fn)
```
6. 定义模型
```python
class SegModel(torch.nn.Module):
"""分词模型"""
def __init__(self, vocab_size: int, embedding_size: int, hidden_size: int):
super().__init__()
self.embedding = torch.nn.Embedding(vocab_size, embedding_size, padding_idx=0)
self.lstm = torch.nn.LSTM(embedding_size, hidden_size, batch_first=True, bidirectional=True)
self.linear = torch.nn.Linear(2 * hidden_size, 1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, inputs: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
embeddings = self.embedding(inputs)
outputs, _ = self.lstm(embeddings)
logits = self.linear(outputs)
logits = logits.squeeze(-1)
logits = self.sigmoid(logits)
logits = logits * mask
return logits
model = SegModel(len(word2id), 128, 256)
```
7. 定义训练函数
```python
def train(model: torch.nn.Module, optimizer: torch.optim.Optimizer, data_loader: DataLoader, device: str):
"""训练函数"""
model.train()
loss_func = torch.nn.BCELoss(reduction="none")
total_loss = 0
total_num = 0
for texts, labels, mask in tqdm(data_loader, desc="Train"):
texts = texts.to(device)
labels = labels.float().to(device)
mask = mask.float().to(device)
logits = model(texts, mask)
loss = loss_func(logits, labels)
loss = (loss * mask).sum() / mask.sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item() * mask.sum().item()
total_num += mask.sum().item()
return total_loss / total_num
def evaluate(model: torch.nn.Module, data_loader: DataLoader, device: str):
"""评估函数"""
model.eval()
loss_func = torch.nn.BCELoss(reduction="none")
total_loss = 0
total_num = 0
with torch.no_grad():
for texts, labels, mask in tqdm(data_loader, desc="Evaluate"):
texts = texts.to(device)
labels = labels.float().to(device)
mask = mask.float().to(device)
logits = model(texts, mask)
loss = loss_func(logits, labels)
loss = (loss * mask).sum() / mask.sum()
total_loss += loss.item() * mask.sum().item()
total_num += mask.sum().item()
return total_loss / total_num
def train_and_evaluate(model: torch.nn.Module, optimizer: torch.optim.Optimizer, train_loader: DataLoader,
valid_loader: DataLoader, num_epochs: int, device: str):
"""训练和评估函数"""
best_valid_loss = float("inf")
for epoch in range(num_epochs):
train_loss = train(model, optimizer, train_loader, device)
valid_loss = evaluate(model, valid_loader, device)
print(f"Epoch [{epoch + 1}/{num_epochs}], Train Loss: {train_loss:.4f}, Valid Loss: {valid_loss:.4f}")
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), "best_model.pt")
model.load_state_dict(torch.load("best_model.pt"))
test_loss = evaluate(model, test_loader, device)
print(f"Test Loss: {test_loss:.4f}")
```
8. 训练模型
```python
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters())
train_and_evaluate(model, optimizer, train_loader, valid_loader, 10, device)
```
以上就是将 `seg.train` 转化为未封装的代码的步骤,你可以根据需要进行修改和调整。
阅读全文