姿态估计4

您所在的位置:网站首页 3d人体姿态识别视频教学下载安装 姿态估计4

姿态估计4

2024-01-06 12:43| 来源: 网络整理| 查看: 265

以下链接是个人关于 voxelpose (多视角3D人体姿态估算) 所有见解,如有错误欢迎大家指出,我会第一时间纠正。有兴趣的朋友可以加微信:17575010159 相互讨论技术。若是帮助到了你什么,一定要记得点赞!因为这是对我最大的鼓励。文末附带 公众号 − \color{blue}{公众号 -} 公众号− 海量资源。 \color{blue}{ 海量资源}。 海量资源。 姿态估计4-00:voxelpose(多视角3D人体姿态估算)-目录-史上最新无死角讲解

前言

本人之后的篇章,都以以下命令为基础进行讲解:  

python run/train_3d.py --cfg configs/campus/prn64_cpn80x80x20.yaml

所以,本人注释的文件为 run/train_3d.py。  

train_3d.py 注释 # ------------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # ------------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import torch.nn as nn import torch.optim as optim import torch.backends.cudnn as cudnn import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms from tensorboardX import SummaryWriter import argparse import os import pprint import logging import json import _init_paths from core.config import config from core.config import update_config from core.function import train_3d, validate_3d from utils.utils import create_logger from utils.utils import save_checkpoint, load_checkpoint, load_model_state from utils.utils import load_backbone_panoptic import dataset import models def parse_args(): parser = argparse.ArgumentParser(description='Train keypoints network') parser.add_argument( '--cfg', help='experiment configure file name', required=True, type=str) args, rest = parser.parse_known_args() update_config(args.cfg) return args def get_optimizer(model): lr = config.TRAIN.LR if model.module.backbone is not None: for params in model.module.backbone.parameters(): params.requires_grad = False # If you want to train the whole model jointly, set it to be True. for params in model.module.root_net.parameters(): params.requires_grad = True for params in model.module.pose_net.parameters(): params.requires_grad = True optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.module.parameters()), lr=lr) # optimizer = optim.Adam(model.module.parameters(), lr=lr) return model, optimizer def main(): # 对命令行参数以及 config 文件参数进行解析 args = parse_args() # 创建 logger,用于信息打印 logger, final_output_dir, tb_log_dir = create_logger( config, args.cfg, 'train') # 根据参数设置打印格式 logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) # 根据参数获得运行的GPU编号 gpus = [int(i) for i in config.GPUS.split(',')] print('=> Loading data ..') # 正则化处理 normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # 根据confg参数(包含了数据路径,数据增强等等)构建训练数据迭代器 train_dataset = eval('dataset.' + config.DATASET.TRAIN_DATASET)( config, config.DATASET.TRAIN_SUBSET, True, transforms.Compose([ transforms.ToTensor(), normalize, ])) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=config.TRAIN.BATCH_SIZE * len(gpus), # 总的batch_size数目 shuffle=config.TRAIN.SHUFFLE, # 随机迭代数据标志位 num_workers=config.WORKERS, # 读取数据的线程数目 pin_memory=True) # 根据confg参数(包含了数据路径,数据增强等等)构建测试数据迭代器 test_dataset = eval('dataset.' + config.DATASET.TEST_DATASET)( config, config.DATASET.TEST_SUBSET, False, transforms.Compose([ transforms.ToTensor(), normalize, ])) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=config.TEST.BATCH_SIZE * len(gpus), # 总的batch_size数目 shuffle=False, # 随机迭代数据标志位 num_workers=config.WORKERS, # 读取数据的线程数目 pin_memory=True) # GPU的一些相关配置,主要用于提速训练,可以不用理会 cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED print('=> Constructing models ..') # 根据 config 配置信息,构建对应的模型 model = eval('models.' + config.MODEL + '.get_multi_person_pose_net')( config, is_train=True) # 使用多GPU进行训练 with torch.no_grad(): model = torch.nn.DataParallel(model, device_ids=gpus).cuda() # 获得模型优化器 model, optimizer = get_optimizer(model) # 从 config 获得起始的 epoch 数以及结束的 epoch 数 start_epoch = config.TRAIN.BEGIN_EPOCH end_epoch = config.TRAIN.END_EPOCH # 用于记录最好的精度 best_precision = 0 # 如果配置了主干网络的预训练模型 if config.NETWORK.PRETRAINED_BACKBONE: model = load_backbone_panoptic(model, config.NETWORK.PRETRAINED_BACKBONE) # 根据配置决定重新开始训练还是继续训练 if config.TRAIN.RESUME: start_epoch, model, optimizer, best_precision = load_checkpoint(model, optimizer, final_output_dir) # 记录 log 的信息 writer_dict = { 'writer': SummaryWriter(log_dir=tb_log_dir), 'train_global_steps': 0, 'valid_global_steps': 0, } # 开始迭代进行训练 print('=> Training...') for epoch in range(start_epoch, end_epoch): print('Epoch: {}'.format(epoch)) # lr_scheduler.step(),进行一个 epoch 的迭代 train_3d(config, model, optimizer, train_loader, epoch, final_output_dir, writer_dict) # 迭代一个 epoch 之后,进行一评估 precision = validate_3d(config, model, test_loader, final_output_dir) # 记录精度最好的模型 if precision > best_precision: best_precision = precision best_model = True else: best_model = False # 训练一个epoch之后,保存一次模型 logger.info('=> saving checkpoint to {} (Best: {})'.format(final_output_dir, best_model)) save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.module.state_dict(), 'precision': best_precision, 'optimizer': optimizer.state_dict(), }, best_model, final_output_dir) # 打印最后一个模型的信息,并且保存该模型 final_model_state_file = os.path.join(final_output_dir, 'final_state.pth.tar') logger.info('saving final model state to {}'.format( final_model_state_file)) torch.save(model.module.state_dict(), final_model_state_file) # 关闭log记录文件 writer_dict['writer'].close() if __name__ == '__main__': main()

 

流程总结

训练代码的套路基本都是差不多的,基本就是 1.解析参数 2.构建网络模型 3.加载训练测试数据集迭代器 4.迭代训练 5.模型评估保存

训练细节

相信大家已经注意到了,上述的代码之中,最重要的两个地方分别如下:

# lr_scheduler.step(),进行一个 epoch 的迭代 train_3d(config, model, optimizer, train_loader, epoch, final_output_dir, writer_dict) # 迭代一个 epoch 之后,进行一评估 precision = validate_3d(config, model, test_loader, final_output_dir)

上述两个函数实现的细节基本上差不多,本人这里就以其中的 train_3d 为代表,个人注释如下:

def train_3d(config, model, optimizer, loader, epoch, output_dir, writer_dict, device=torch.device('cuda'), dtype=torch.float): """ :param config: 训练网络相关配置参数 :param model: 训练迭代得模型 :param optimizer: 优化器 :param loader: 数据迭代器 :param epoch: 当前训练epoch数目 :param output_dir: 输出目录 :param writer_dict: # 训练log日志的保存 :param device: GPU和CPU设置 :param dtype: 数据类型 :return: """ # AverageMeter 的作用是用于保存当前值以及计算平均值 batch_time = AverageMeter() # 运行一个batch 数据消耗的时间 data_time = AverageMeter() # losses = AverageMeter() # losses_2d = AverageMeter() losses_3d = AverageMeter() losses_cord = AverageMeter() model.train() # 如果存在 model.module.backbone,默认设置不训练检测2D关键点的网络 if model.module.backbone is not None: # 如果您想共同训练2D骨干,请注释掉这一行 model.module.backbone.eval() # Comment out this line if you want to train 2D backbone jointly accumulation_steps = 4 accu_loss_3d = 0 # 获得当前时间点 end = time.time() # for i, (inputs, targets_2d, weights_2d, targets_3d, meta, input_heatmap) in enumerate(loader): data_time.update(time.time() - end) # 获得数据读取消耗的时间 # 如果测试数据集包含了 panoptic if 'panoptic' in config.DATASET.TEST_DATASET: pred, heatmaps, grid_centers, loss_2d, loss_3d, loss_cord = model(views=inputs, meta=meta, targets_2d=targets_2d, weights_2d=weights_2d, targets_3d=targets_3d[0]) # 如果测试数据集包含了 campus 与 shelf elif 'campus' in config.DATASET.TEST_DATASET or 'shelf' in config.DATASET.TEST_DATASET: pred, heatmaps, grid_centers, loss_2d, loss_3d, loss_cord = model(meta=meta, targets_3d=targets_3d[0], input_heatmaps=input_heatmap) # 获得当前的平均值 loss_2d = loss_2d.mean() loss_3d = loss_3d.mean() loss_cord = loss_cord.mean() # 加入当前的loss, 并且求得总 loss losses_2d.update(loss_2d.item()) losses_3d.update(loss_3d.item()) losses_cord.update(loss_cord.item()) loss = loss_2d + loss_3d + loss_cord losses.update(loss.item()) # 如果总的 loss 大于0,则进行反向传播 if loss_cord > 0: optimizer.zero_grad() (loss_2d + loss_cord).backward() optimizer.step() # 没迭代训练 accumulation_steps 次之后,对3D loss 多进行一次方向传播。也就是把 3D 模型当作重点来训练 if accu_loss_3d > 0 and (i + 1) % accumulation_steps == 0: optimizer.zero_grad() accu_loss_3d.backward() optimizer.step() accu_loss_3d = 0.0 else: accu_loss_3d += loss_3d / accumulation_steps # 获得迭代一个 batch_size 数据消耗的时间 batch_time.update(time.time() - end) end = time.time() # 以 config.PRINT_FREQ 的频率进行信息打印,以及保存一些debug的图片 if i % config.PRINT_FREQ == 0: gpu_memory_usage = torch.cuda.memory_allocated(0) msg = 'Epoch: [{0}][{1}/{2}]\t' \ 'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \ 'Speed: {speed:.1f} samples/s\t' \ 'Data: {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \ 'Loss: {loss.val:.6f} ({loss.avg:.6f})\t' \ 'Loss_2d: {loss_2d.val:.7f} ({loss_2d.avg:.7f})\t' \ 'Loss_3d: {loss_3d.val:.7f} ({loss_3d.avg:.7f})\t' \ 'Loss_cord: {loss_cord.val:.6f} ({loss_cord.avg:.6f})\t' \ 'Memory {memory:.1f}'.format( epoch, i, len(loader), batch_time=batch_time, speed=len(inputs) * inputs[0].size(0) / batch_time.val, data_time=data_time, loss=losses, loss_2d=losses_2d, loss_3d=losses_3d, loss_cord=losses_cord, memory=gpu_memory_usage) logger.info(msg) # 把训练的log信息等写入到日志之中,也就是生成的log文件之中 writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss_3d', losses_3d.val, global_steps) writer.add_scalar('train_loss_cord', losses_cord.val, global_steps) writer.add_scalar('train_loss', losses.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 # 把模型输输入图像保存下来,包含多个视角,在生成的output文件夹中可以找到 for k in range(len(inputs)): view_name = 'view_{}'.format(k + 1) prefix = '{}_{:08}_{}'.format( os.path.join(output_dir, 'train'), i, view_name) save_debug_images_multi(config, inputs[k], meta[k], targets_2d[k], heatmaps[k], prefix) prefix2 = '{}_{:08}'.format( os.path.join(output_dir, 'train'), i) # 把论文 Fig1 中的 Feature volume 保存下来,在生成的output文件夹中可以找到 save_debug_3d_cubes(config, meta[0], grid_centers, prefix2) # 检测目标空间的可视化,包含预测结果以及对应的 ground turch save_debug_3d_images(config, meta[0], pred, prefix2) 结语

到这里为止,我们已经知道了如何训练数据,以及总体的训练过程了。下面我们就需要对总体模型构建过程进行讲解了,当然在这之前,我们需要先了解数据读取, 以及预处理过程。请观看下篇博客!

在这里插入图片描述



【本文地址】


今日新闻


推荐新闻


    CopyRight 2018-2019 办公设备维修网 版权所有 豫ICP备15022753号-3