def train_one_epoch(epoch): logger.info('Start training process in epoch {}.'.format(epoch + 1)) if Ir_scheduler is not None: logger.info('Learning rate: {}.'.format(Ir scheduler.get last Ir())) model.train) losses = [ with tqdm(train_dataloader) as pbar: for data dict in pbar: optimizer.zero_grad() data_dict = to_device (data_dict, device) res = model (data_dict['rgb'], data_dict['depth']) depth scale = data dict[ 'depth max'] - data dict['depth min'] res = res * depth_scale.reshape(-1, 1, 1) + data_dict ['depth_min'].reshape(-1, 1, 1) data dict[ 'pred'] = res loss_dict = criterion (data_dict) loss = loss dict['loss'] loss.backward() optimizer.step() if 'smooth' in loss_dict.keys (): pbar.set_description('Epoch (}, loss: (:.8f}, smooth loss: {:.8f}'.format(epoch + 1, loss.item(), loss_dict['smooth'].item())) else: pbar.set_description('Epoch (), loss: (:.8f]'.format(epoch + 1, loss.item ())) losses.append(loss.mean) .item)) mean loss = np.stack(losses).mean() logger.info('Finishtrainingprocessinepochf},meantraining1oss:{:.8f}'.format(epoch+1,mean_1oss))改进这段代码
时间: 2024-02-12 10:08:09 浏览: 139
以下是我认为可以改进的几点:
1. 在训练开始时,打印出当前 epoch 的时间,方便后续对训练过程的分析。
2. 在训练过程中,计算每个 batch 的 loss 并输出,方便实时跟进模型的训练情况。
3. 在训练过程中,可以加入验证集的评估过程,以便更好地监测模型的泛化能力。
下面是改进后的代码:
```
def train_one_epoch(epoch, model, optimizer, criterion, device, train_dataloader, val_dataloader=None):
start_time = time.time()
logger.info('Start training process in epoch {} at {}.'.format(epoch + 1, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
model.train()
train_losses = []
val_losses = []
with tqdm(train_dataloader) as pbar:
for i, data_dict in enumerate(pbar):
optimizer.zero_grad()
data_dict = to_device(data_dict, device)
res = model(data_dict['rgb'], data_dict['depth'])
depth_scale = data_dict['depth_max'] - data_dict['depth_min']
res = res * depth_scale.reshape(-1, 1, 1) + data_dict['depth_min'].reshape(-1, 1, 1)
data_dict['pred'] = res
loss_dict = criterion(data_dict)
loss = loss_dict['loss']
loss.backward()
optimizer.step()
train_losses.append(loss.item())
pbar.set_description('Epoch {}, Batch {}, Loss {:.8f}'.format(epoch + 1, i + 1, loss.item()))
mean_train_loss = np.mean(train_losses)
logger.info('Finish training process in epoch {} at {}, mean training loss: {:.8f}'.format(epoch + 1, datetime.now().strftime("%Y-%m-%d %H:%M:%S"), mean_train_loss))
if val_dataloader:
model.eval()
with torch.no_grad():
for data_dict in val_dataloader:
data_dict = to_device(data_dict, device)
res = model(data_dict['rgb'], data_dict['depth'])
depth_scale = data_dict['depth_max'] - data_dict['depth_min']
res = res * depth_scale.reshape(-1, 1, 1) + data_dict['depth_min'].reshape(-1, 1, 1)
data_dict['pred'] = res
loss_dict = criterion(data_dict)
loss = loss_dict['loss']
val_losses.append(loss.item())
mean_val_loss = np.mean(val_losses)
logger.info('Finish validation process in epoch {} at {}, mean validation loss: {:.8f}'.format(epoch + 1, datetime.now().strftime("%Y-%m-%d %H:%M:%S"), mean_val_loss))
end_time = time.time()
logger.info('Epoch {} finished in {:.2f} seconds.'.format(epoch + 1, end_time - start_time))
```
请注意,这里假设您已经定义了 logger、to_device()、datetime、torch、numpy、time 等必要的库和函数。此外,由于您没有提供完整的代码,因此我可能需要进行一些假设和推测。
阅读全文