Unverified 提交 886b9841 authored 作者: NanoCode012's avatar NanoCode012 提交者: GitHub

Add Multi-Node support for DDP Training (#504)

* Add support for multi-node DDP * Remove local_rank confusion * Fix spacing
上级 7eaf225d
...@@ -62,9 +62,9 @@ def train(hyp, opt, device, tb_writer=None): ...@@ -62,9 +62,9 @@ def train(hyp, opt, device, tb_writer=None):
best = wdir + 'best.pt' best = wdir + 'best.pt'
results_file = log_dir + os.sep + 'results.txt' results_file = log_dir + os.sep + 'results.txt'
epochs, batch_size, total_batch_size, weights, rank = \ epochs, batch_size, total_batch_size, weights, rank = \
opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.local_rank opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# TODO: Use DDP logging. Only the first process is allowed to log. # TODO: Use DDP logging. Only the first process is allowed to log.
# Save run settings # Save run settings
with open(Path(log_dir) / 'hyp.yaml', 'w') as f: with open(Path(log_dir) / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False) yaml.dump(hyp, f, sort_keys=False)
...@@ -184,7 +184,7 @@ def train(hyp, opt, device, tb_writer=None): ...@@ -184,7 +184,7 @@ def train(hyp, opt, device, tb_writer=None):
# DDP mode # DDP mode
if cuda and rank != -1: if cuda and rank != -1:
model = DDP(model, device_ids=[rank], output_device=rank) model = DDP(model, device_ids=[opt.local_rank], output_device=(opt.local_rank))
# Trainloader # Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True, dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True,
...@@ -441,8 +441,7 @@ if __name__ == '__main__': ...@@ -441,8 +441,7 @@ if __name__ == '__main__':
if last and not opt.weights: if last and not opt.weights:
print(f'Resuming training from {last}') print(f'Resuming training from {last}')
opt.weights = last if opt.resume and not opt.weights else opt.weights opt.weights = last if opt.resume and not opt.weights else opt.weights
if opt.local_rank == -1 or ("RANK" in os.environ and os.environ["RANK"] == "0"):
if opt.local_rank in [-1, 0]:
check_git_status() check_git_status()
opt.cfg = check_file(opt.cfg) # check file opt.cfg = check_file(opt.cfg) # check file
opt.data = check_file(opt.data) # check file opt.data = check_file(opt.data) # check file
...@@ -454,7 +453,8 @@ if __name__ == '__main__': ...@@ -454,7 +453,8 @@ if __name__ == '__main__':
device = select_device(opt.device, batch_size=opt.batch_size) device = select_device(opt.device, batch_size=opt.batch_size)
opt.total_batch_size = opt.batch_size opt.total_batch_size = opt.batch_size
opt.world_size = 1 opt.world_size = 1
opt.global_rank = -1
# DDP mode # DDP mode
if opt.local_rank != -1: if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank assert torch.cuda.device_count() > opt.local_rank
...@@ -462,6 +462,7 @@ if __name__ == '__main__': ...@@ -462,6 +462,7 @@ if __name__ == '__main__':
device = torch.device('cuda', opt.local_rank) device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
opt.world_size = dist.get_world_size() opt.world_size = dist.get_world_size()
opt.global_rank = dist.get_rank()
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.total_batch_size // opt.world_size opt.batch_size = opt.total_batch_size // opt.world_size
...@@ -470,7 +471,7 @@ if __name__ == '__main__': ...@@ -470,7 +471,7 @@ if __name__ == '__main__':
# Train # Train
if not opt.evolve: if not opt.evolve:
tb_writer = None tb_writer = None
if opt.local_rank in [-1, 0]: if opt.global_rank in [-1, 0]:
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/') print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
tb_writer = SummaryWriter(log_dir=increment_dir('runs/exp', opt.name)) tb_writer = SummaryWriter(log_dir=increment_dir('runs/exp', opt.name))
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论