def train(model, train_loader, criterion, optimizer): model.train() train_loss = 0.0 train_acc = 0.0 for i, (inputs, labels) in enumerate(train_loader): optimizer.zero_grad() outputs = model(inputs.unsqueeze(1).float()) loss = criterion(outputs, labels.long()) loss.backward() optimizer.step() train_loss += loss.item() * inputs.size(0) _, preds = torch.max(outputs, 1) train_acc += torch.sum(preds == labels.data) train_loss = train_loss / len(train_loader.dataset) train_acc = train_acc.double() / len(train_loader.dataset) return train_loss, train_acc def test(model, verify_loader, criterion): model.eval() test_loss = 0.0 test_acc = 0.0 with torch.no_grad(): for i, (inputs, labels) in enumerate(test_loader): outputs = model(inputs.unsqueeze(1).float()) loss = criterion(outputs, labels.long()) test_loss += loss.item() * inputs.size(0) _, preds = torch.max(outputs, 1) test_acc += torch.sum(preds == labels.data) test_loss = test_loss / len(test_loader.dataset) test_acc = test_acc.double() / len(test_loader.dataset) return test_loss, test_acc # Instantiate the model model = CNN() # Define the loss function and optimizer criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) # Instantiate the data loaders train_dataset = MyDataset1('1MATRICE') train_loader = DataLoader(train_dataset, batch_size=5, shuffle=True) test_dataset = MyDataset2('2MATRICE') test_loader = DataLoader(test_dataset, batch_size=5, shuffle=False) train_losses, train_accs, test_losses, test_accs = [], [], [], [] for epoch in range(500): train_loss, train_acc = train(model, train_loader, criterion, optimizer) test_loss, test_acc = test(model, test_loader, criterion) train_losses.append(train_loss) train_accs.append(train_acc) test_losses.append(test_loss) test_accs.append(test_acc) print('Epoch: {} Train Loss: {:.4f} Train Acc: {:.4f} Test Loss: {:.4f} Test Acc: {:.4f}'.format( epoch, train_loss, train_acc, test_loss, test_acc))
时间: 2024-04-28 20:21:51 浏览: 127
这是一个基于PyTorch框架的CNN模型的训练过程。代码中定义了两个函数:train和test,分别用于训练模型和测试模型。
在训练过程中,首先将模型设置为训练模式,然后遍历训练数据集,对每个batch的数据进行前向传播、反向传播和优化器更新。在每个batch的训练结束后,计算该batch的损失和精度,并将其累加到总的训练损失和训练精度中。
在测试过程中,首先将模型设置为评估模式,然后遍历测试数据集,对每个batch的数据进行前向传播和损失计算。在每个batch的测试结束后,计算该batch的损失和精度,并将其累加到总的测试损失和测试精度中。
最后,将训练过程中的损失和精度以及测试过程中的损失和精度保存到相应的列表中,并打印出当前epoch的训练损失、训练精度、测试损失和测试精度。
整个训练过程会重复执行500个epoch,每个epoch都是一个完整的训练和测试过程。
相关问题
def train(model, loader, criterion, optimizer, device): model.train() running_loss = 0.0 correct = 0 total = 0 for batch_idx, (inputs, labels) in enumerate(loader): inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() _, predicted = torch.max(outputs, 1) _, true_labels = torch.max(labels, 1) total += true_labels.size(0) correct += (predicted == true_labels).sum().item() print("Train Loss: {:.4f}, Acc: {:.2f}%".format(running_loss / (batch_idx + 1), 100 * correct / total))
这是一个用于训练模型的函数。以下是函数的详细解释:
- `train` 函数接受五个参数:`model`(模型),`loader`(数据加载器),`criterion`(损失函数),`optimizer`(优化器)和 `device`(设备)。
- 在函数开始时,将模型设置为训练模式(`model.train()`)。
- 初始化 `running_loss`(累计损失值),`correct`(正确预测的数量)和 `total`(总样本数)为零。
- 使用 `enumerate(loader)` 迭代数据加载器中的批次数据。
- 将输入数据和标签移到指定的设备上(`inputs, labels = inputs.to(device), labels.to(device)`)。
- 清空优化器的梯度(`optimizer.zero_grad()`)。
- 通过模型前向传播计算输出(`outputs = model(inputs)`)。
- 计算损失值(`loss = criterion(outputs, labels)`)。
- 根据损失值计算梯度并进行反向传播(`loss.backward()`)。
- 使用优化器更新模型的参数(`optimizer.step()`)。
- 累计损失值(`running_loss += loss.item()`)。
- 计算预测值和真实标签的正确数量(`_, predicted = torch.max(outputs, 1)` 和 `_, true_labels = torch.max(labels, 1)`)。
- 更新总样本数(`total += true_labels.size(0)`)和正确预测的数量(`correct += (predicted == true_labels).sum().item()`)。
- 在每个批次结束后,打印训练损失和准确率(`print("Train Loss: {:.4f}, Acc: {:.2f}%".format(running_loss / (batch_idx + 1), 100 * correct / total))`)。
这个函数用于训练模型。它使用给定的数据加载器迭代数据批次,计算损失并进行反向传播更新模型参数,同时计算训练损失和准确率。
LDAM损失函数pytorch代码如下:class LDAMLoss(nn.Module): def init(self, cls_num_list, max_m=0.5, weight=None, s=30): super(LDAMLoss, self).init() m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list)) m_list = m_list * (max_m / np.max(m_list)) m_list = torch.cuda.FloatTensor(m_list) self.m_list = m_list assert s > 0 self.s = s if weight is not None: weight = torch.FloatTensor(weight).cuda() self.weight = weight self.cls_num_list = cls_num_list def forward(self, x, target): index = torch.zeros_like(x, dtype=torch.uint8) index_float = index.type(torch.cuda.FloatTensor) batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(1,0)) # 0,1 batch_m = batch_m.view((16, 1)) # size=(batch_size, 1) (-1,1) x_m = x - batch_m output = torch.where(index, x_m, x) if self.weight is not None: output = output * self.weight[None, :] target = torch.flatten(target) # 将 target 转换成 1D Tensor logit = output * self.s return F.cross_entropy(logit, target, weight=self.weight) 模型部分参数如下:# 设置全局参数 model_lr = 1e-5 BATCH_SIZE = 16 EPOCHS = 50 DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') use_amp = True use_dp = True classes = 7 resume = None CLIP_GRAD = 5.0 Best_ACC = 0 #记录最高得分 use_ema=True model_ema_decay=0.9998 start_epoch=1 seed=1 seed_everything(seed) # 数据增强 mixup mixup_fn = Mixup( mixup_alpha=0.8, cutmix_alpha=1.0, cutmix_minmax=None, prob=0.1, switch_prob=0.5, mode='batch', label_smoothing=0.1, num_classes=classes) # 读取数据集 dataset_train = datasets.ImageFolder('/home/adminis/hpy/ConvNextV2_Demo/RAF-DB/RAF/train', transform=transform) dataset_test = datasets.ImageFolder("/home/adminis/hpy/ConvNextV2_Demo/RAF-DB/RAF/valid", transform=transform_test) 帮我用pytorch实现模型在模型训练中使用LDAM损失函数
好的,下面是使用LDAM损失函数训练模型的代码:
```python
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader
# 定义LDAM损失函数
class LDAMLoss(nn.Module):
def __init__(self, cls_num_list, max_m=0.5, weight=None, s=30):
super(LDAMLoss, self).__init__()
m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list))
m_list = m_list * (max_m / np.max(m_list))
m_list = torch.cuda.FloatTensor(m_list)
self.m_list = m_list
assert s > 0
self.s = s
if weight is not None:
weight = torch.FloatTensor(weight).cuda()
self.weight = weight
self.cls_num_list = cls_num_list
def forward(self, x, target):
index = torch.zeros_like(x, dtype=torch.uint8)
index_float = index.type(torch.cuda.FloatTensor)
batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(1,0)) # 0,1
batch_m = batch_m.view((x.size(0), 1)) # size=(batch_size, 1)
x_m = x - batch_m
output = torch.where(index, x_m, x)
if self.weight is not None:
output = output * self.weight[None, :]
target = torch.flatten(target) # 将 target 转换成 1D Tensor
logit = output * self.s
return F.cross_entropy(logit, target, weight=self.weight)
# 定义模型
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, classes)
model.to(DEVICE)
# 定义优化器和学习率调整器
optimizer = optim.Adam(model.parameters(), lr=model_lr)
scheduler = CosineAnnealingLR(optimizer, T_max=EPOCHS, eta_min=1e-6)
# 定义LDAM损失函数
cls_num_list = [len(dataset_train[dataset_train.targets == t]) for t in range(classes)]
criterion = LDAMLoss(cls_num_list)
# 定义数据加载器
train_loader = DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, pin_memory=True)
test_loader = DataLoader(dataset_test, batch_size=BATCH_SIZE, shuffle=False, num_workers=4, pin_memory=True)
# 训练模型
best_acc = 0.0
for epoch in range(start_epoch, EPOCHS + 1):
model.train()
train_loss = 0.0
train_corrects = 0
for inputs, labels in train_loader:
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
if use_dp:
inputs, labels = dp(inputs, labels)
if use_amp:
with amp.autocast():
inputs, labels = mixup_fn(inputs, labels)
outputs = model(inputs)
loss = criterion(outputs, labels)
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP_GRAD)
scaler.step(optimizer)
scaler.update()
else:
inputs, labels_a, labels_b, lam = mixup_fn(inputs, labels)
outputs = model(inputs)
loss = mixup_criterion(criterion, outputs, labels_a, labels_b, lam)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP_GRAD)
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * inputs.size(0)
_, preds = torch.max(outputs, 1)
train_corrects += torch.sum(preds == labels.data)
train_loss /= len(dataset_train)
train_acc = train_corrects.double() / len(dataset_train)
model.eval()
test_loss = 0.0
test_corrects = 0
with torch.no_grad():
for inputs, labels in test_loader:
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
outputs = model(inputs)
loss = criterion(outputs, labels)
test_loss += loss.item() * inputs.size(0)
_, preds = torch.max(outputs, 1)
test_corrects += torch.sum(preds == labels.data)
test_loss /= len(dataset_test)
test_acc = test_corrects.double() / len(dataset_test)
# 更新最佳模型
if test_acc > best_acc:
if use_ema:
ema_model.load_state_dict(model.state_dict())
best_acc = test_acc
# 更新学习率
scheduler.step()
# 打印训练结果
print('Epoch [{}/{}], Train Loss: {:.4f}, Train Acc: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'.format(
epoch, EPOCHS, train_loss, train_acc, test_loss, test_acc))
```
阅读全文