Unverified 提交 acff977a authored 作者: Glenn Jocher's avatar Glenn Jocher 提交者: GitHub

Logger Cleanup (#9828)

上级 e3ff7806
...@@ -91,17 +91,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio ...@@ -91,17 +91,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
data_dict = None data_dict = None
if RANK in {-1, 0}: if RANK in {-1, 0}:
logger = GenericLogger(opt=opt, console_logger=LOGGER) logger = GenericLogger(opt=opt, console_logger=LOGGER)
# loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
# if loggers.clearml:
# data_dict = loggers.clearml.data_dict # None if no ClearML dataset or filled in by ClearML
# if loggers.wandb:
# data_dict = loggers.wandb.data_dict
# if resume:
# weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
#
# # Register actions
# for k in methods(loggers):
# callbacks.register_action(k, callback=getattr(loggers, k))
# Config # Config
plots = not evolve and not opt.noplots # create plots plots = not evolve and not opt.noplots # create plots
...@@ -400,7 +389,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio ...@@ -400,7 +389,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
'ema': deepcopy(ema.ema).half(), 'ema': deepcopy(ema.ema).half(),
'updates': ema.updates, 'updates': ema.updates,
'optimizer': optimizer.state_dict(), 'optimizer': optimizer.state_dict(),
# 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None,
'opt': vars(opt), 'opt': vars(opt),
'date': datetime.now().isoformat()} 'date': datetime.now().isoformat()}
......
...@@ -53,7 +53,6 @@ from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_g ...@@ -53,7 +53,6 @@ from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_g
one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) one_cycle, print_args, print_mutation, strip_optimizer, yaml_save)
from utils.loggers import Loggers from utils.loggers import Loggers
from utils.loggers.comet.comet_utils import check_comet_resume from utils.loggers.comet.comet_utils import check_comet_resume
from utils.loggers.wandb.wandb_utils import check_wandb_resume
from utils.loss import ComputeLoss from utils.loss import ComputeLoss
from utils.metrics import fitness from utils.metrics import fitness
from utils.plots import plot_evolve from utils.plots import plot_evolve
...@@ -375,7 +374,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio ...@@ -375,7 +374,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
'ema': deepcopy(ema.ema).half(), 'ema': deepcopy(ema.ema).half(),
'updates': ema.updates, 'updates': ema.updates,
'optimizer': optimizer.state_dict(), 'optimizer': optimizer.state_dict(),
'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None,
'opt': vars(opt), 'opt': vars(opt),
'date': datetime.now().isoformat()} 'date': datetime.now().isoformat()}
...@@ -483,7 +481,7 @@ def main(opt, callbacks=Callbacks()): ...@@ -483,7 +481,7 @@ def main(opt, callbacks=Callbacks()):
check_requirements() check_requirements()
# Resume (from specified or most recent last.pt) # Resume (from specified or most recent last.pt)
if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) and not opt.evolve: if opt.resume and not check_comet_resume(opt) and not opt.evolve:
last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
opt_data = opt.data # original dataset opt_data = opt.data # original dataset
......
...@@ -956,7 +956,7 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op ...@@ -956,7 +956,7 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op
x = torch.load(f, map_location=torch.device('cpu')) x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'): if x.get('ema'):
x['model'] = x['ema'] # replace model with ema x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys
x[k] = None x[k] = None
x['epoch'] = -1 x['epoch'] = -1
x['model'].half() # to FP16 x['model'].half() # to FP16
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论