Unverified 提交 e5b0200c authored 作者: Glenn Jocher's avatar Glenn Jocher 提交者: GitHub

Update tensorboard>=2.4.1 (#2576)

* Update tensorboard>=2.4.1 Update tensorboard version to attempt to address https://github.com/ultralytics/yolov5/issues/2573 (tensorboard logging fail in Docker image). * cleanup
上级 2b329b09
...@@ -8,12 +8,12 @@ opencv-python>=4.1.2 ...@@ -8,12 +8,12 @@ opencv-python>=4.1.2
Pillow Pillow
PyYAML>=5.3.1 PyYAML>=5.3.1
scipy>=1.4.1 scipy>=1.4.1
tensorboard>=2.2
torch>=1.7.0 torch>=1.7.0
torchvision>=0.8.1 torchvision>=0.8.1
tqdm>=4.41.0 tqdm>=4.41.0
# logging ------------------------------------- # logging -------------------------------------
tensorboard>=2.4.1
# wandb # wandb
# plotting ------------------------------------ # plotting ------------------------------------
......
import argparse import argparse
import logging import logging
import math import math
...@@ -34,7 +33,7 @@ from utils.google_utils import attempt_download ...@@ -34,7 +33,7 @@ from utils.google_utils import attempt_download
from utils.loss import ComputeLoss from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id, check_wandb_config_file from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -75,7 +74,7 @@ def train(hyp, opt, device, tb_writer=None): ...@@ -75,7 +74,7 @@ def train(hyp, opt, device, tb_writer=None):
data_dict = wandb_logger.data_dict data_dict = wandb_logger.data_dict
if wandb_logger.wandb: if wandb_logger.wandb:
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
...@@ -405,7 +404,7 @@ def train(hyp, opt, device, tb_writer=None): ...@@ -405,7 +404,7 @@ def train(hyp, opt, device, tb_writer=None):
wandb_logger.log_model( wandb_logger.log_model(
last.parent, opt, epoch, fi, best_model=best_fitness == fi) last.parent, opt, epoch, fi, best_model=best_fitness == fi)
del ckpt del ckpt
# end epoch ---------------------------------------------------------------------------------------------------- # end epoch ----------------------------------------------------------------------------------------------------
# end training # end training
if rank in [-1, 0]: if rank in [-1, 0]:
...@@ -534,7 +533,8 @@ if __name__ == '__main__': ...@@ -534,7 +533,8 @@ if __name__ == '__main__':
if not opt.evolve: if not opt.evolve:
tb_writer = None # init loggers tb_writer = None # init loggers
if opt.global_rank in [-1, 0]: if opt.global_rank in [-1, 0]:
logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/') prefix = colorstr('tensorboard: ')
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer) train(hyp, opt, device, tb_writer)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论