Unverified 提交 4447f4b9 authored 作者: Glenn Jocher's avatar Glenn Jocher 提交者: GitHub

--resume to same runs/exp directory (#765)

* initial commit * add weight backup dir on resume
上级 5e0b90de
import argparse import argparse
import logging
import math import math
import os import os
import random import random
import shutil
import time import time
import logging
from pathlib import Path from pathlib import Path
import numpy as np import numpy as np
...@@ -34,10 +35,10 @@ logger = logging.getLogger(__name__) ...@@ -34,10 +35,10 @@ logger = logging.getLogger(__name__)
def train(hyp, opt, device, tb_writer=None): def train(hyp, opt, device, tb_writer=None):
logger.info(f'Hyperparameters {hyp}') logger.info(f'Hyperparameters {hyp}')
log_dir = Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / 'evolve' # logging directory log_dir = Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / 'evolve' # logging directory
wdir = str(log_dir / 'weights') + os.sep # weights directory wdir = log_dir / 'weights' # weights directory
os.makedirs(wdir, exist_ok=True) os.makedirs(wdir, exist_ok=True)
last = wdir + 'last.pt' last = wdir / 'last.pt'
best = wdir + 'best.pt' best = wdir / 'best.pt'
results_file = str(log_dir / 'results.txt') results_file = str(log_dir / 'results.txt')
epochs, batch_size, total_batch_size, weights, rank = \ epochs, batch_size, total_batch_size, weights, rank = \
opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
...@@ -131,6 +132,7 @@ def train(hyp, opt, device, tb_writer=None): ...@@ -131,6 +132,7 @@ def train(hyp, opt, device, tb_writer=None):
start_epoch = ckpt['epoch'] + 1 start_epoch = ckpt['epoch'] + 1
if opt.resume: if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
shutil.copytree(wdir, wdir.parent / f'weights_backup_epoch{start_epoch - 1}') # save previous weights
if epochs < start_epoch: if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs)) (weights, ckpt['epoch'], epochs))
...@@ -365,13 +367,13 @@ def train(hyp, opt, device, tb_writer=None): ...@@ -365,13 +367,13 @@ def train(hyp, opt, device, tb_writer=None):
if rank in [-1, 0]: if rank in [-1, 0]:
# Strip optimizers # Strip optimizers
n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name
fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n fresults, flast, fbest = 'results%s.txt' % n, wdir / f'last{n}.pt', wdir / f'best{n}.pt'
for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]): for f1, f2 in zip([wdir / 'last.pt', wdir / 'best.pt', 'results.txt'], [flast, fbest, fresults]):
if os.path.exists(f1): if os.path.exists(f1):
os.rename(f1, f2) # rename os.rename(f1, f2) # rename
ispt = f2.endswith('.pt') # is *.pt if str(f2).endswith('.pt'): # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer strip_optimizer(f2) # strip optimizer
os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None # upload os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket else None # upload
# Finish # Finish
if not opt.evolve: if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png plot_results(save_dir=log_dir) # save as results.png
...@@ -421,8 +423,9 @@ if __name__ == '__main__': ...@@ -421,8 +423,9 @@ if __name__ == '__main__':
# Resume # Resume
if opt.resume: # resume an interrupted run if opt.resume: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
log_dir = Path(ckpt).parent.parent # runs/exp0
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
with open(Path(ckpt).parent.parent / 'opt.yaml') as f: with open(log_dir / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
opt.cfg, opt.weights, opt.resume = '', ckpt, True opt.cfg, opt.weights, opt.resume = '', ckpt, True
logger.info('Resuming training from %s' % ckpt) logger.info('Resuming training from %s' % ckpt)
...@@ -432,6 +435,7 @@ if __name__ == '__main__': ...@@ -432,6 +435,7 @@ if __name__ == '__main__':
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
log_dir = increment_dir(Path(opt.logdir) / 'exp', opt.name) # runs/exp1
device = select_device(opt.device, batch_size=opt.batch_size) device = select_device(opt.device, batch_size=opt.batch_size)
...@@ -453,7 +457,7 @@ if __name__ == '__main__': ...@@ -453,7 +457,7 @@ if __name__ == '__main__':
tb_writer = None tb_writer = None
if opt.global_rank in [-1, 0]: if opt.global_rank in [-1, 0]:
logger.info('Start Tensorboard with "tensorboard --logdir %s", view at http://localhost:6006/' % opt.logdir) logger.info('Start Tensorboard with "tensorboard --logdir %s", view at http://localhost:6006/' % opt.logdir)
tb_writer = SummaryWriter(log_dir=increment_dir(Path(opt.logdir) / 'exp', opt.name)) # runs/exp tb_writer = SummaryWriter(log_dir=log_dir) # runs/exp0
train(hyp, opt, device, tb_writer) train(hyp, opt, device, tb_writer)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论