Unverified 提交 e8fc97aa authored 作者: Ayush Chaurasia's avatar Ayush Chaurasia 提交者: GitHub

Improved W&B integration (#2125)

* Init Commit * new wandb integration * Update * Use data_dict in test * Updates * Update: scope of log_img * Update: scope of log_img * Update * Update: Fix logging conditions * Add tqdm bar, support for .txt dataset format * Improve Result table Logger * Init Commit * new wandb integration * Update * Use data_dict in test * Updates * Update: scope of log_img * Update: scope of log_img * Update * Update: Fix logging conditions * Add tqdm bar, support for .txt dataset format * Improve Result table Logger * Add dataset creation in training script * Change scope: self.wandb_run * Add wandb-artifact:// natively you can now use --resume with wandb run links * Add suuport for logging dataset while training * Cleanup * Fix: Merge conflict * Fix: CI tests * Automatically use wandb config * Fix: Resume * Fix: CI * Enhance: Using val_table * More resume enhancement * FIX : CI * Add alias * Get useful opt config data * train.py cleanup * Cleanup train.py * more cleanup * Cleanup| CI fix * Reformat using PEP8 * FIX:CI * rebase * remove uneccesary changes * remove uneccesary changes * remove uneccesary changes * remove unecessary chage from test.py * FIX: resume from local checkpoint * FIX:resume * FIX:resume * Reformat * Performance improvement * Fix local resume * Fix local resume * FIX:CI * Fix: CI * Imporve image logging * (:(:Redo CI tests:):) * Remember epochs when resuming * Remember epochs when resuming * Update DDP location Potential fix for #2405 * PEP8 reformat * 0.25 confidence threshold * reset train.py plots syntax to previous * reset epochs completed syntax to previous * reset space to previous * remove brackets * reset comment to previous * Update: is_coco check, remove unused code * Remove redundant print statement * Remove wandb imports * remove dsviz logger from test.py * Remove redundant change from test.py * remove redundant changes from train.py * reformat and improvements * Fix typo * Add tqdm tqdm progress when scanning files, naming improvements Co-authored-by: 's avatarGlenn Jocher <glenn.jocher@ultralytics.com>
上级 ed2c7421
...@@ -35,8 +35,9 @@ def test(data, ...@@ -35,8 +35,9 @@ def test(data,
save_hybrid=False, # for hybrid auto-labelling save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences save_conf=False, # save auto-label confidences
plots=True, plots=True,
log_imgs=0, # number of logged images wandb_logger=None,
compute_loss=None): compute_loss=None,
is_coco=False):
# Initialize/load model and set device # Initialize/load model and set device
training = model is not None training = model is not None
if training: # called by train.py if training: # called by train.py
...@@ -66,21 +67,19 @@ def test(data, ...@@ -66,21 +67,19 @@ def test(data,
# Configure # Configure
model.eval() model.eval()
is_coco = data.endswith('coco.yaml') # is COCO dataset if isinstance(data, str):
is_coco = data.endswith('coco.yaml')
with open(data) as f: with open(data) as f:
data = yaml.load(f, Loader=yaml.SafeLoader) # model dict data = yaml.load(f, Loader=yaml.SafeLoader)
check_dataset(data) # check check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel() niou = iouv.numel()
# Logging # Logging
log_imgs, wandb = min(log_imgs, 100), None # ceil
try:
import wandb # Weights & Biases
except ImportError:
log_imgs = 0 log_imgs = 0
if wandb_logger and wandb_logger.wandb:
log_imgs = min(wandb_logger.log_imgs, 100)
# Dataloader # Dataloader
if not training: if not training:
if device.type != 'cpu': if device.type != 'cpu':
...@@ -147,15 +146,17 @@ def test(data, ...@@ -147,15 +146,17 @@ def test(data,
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n') f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging # W&B logging - Media Panel Plots
if plots and len(wandb_images) < log_imgs: if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation
if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls), "class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf), "box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf}, "scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name))
wandb_logger.log_training_progress(predn, path, names) # logs dsviz tables
# Append to pycocotools JSON dictionary # Append to pycocotools JSON dictionary
if save_json: if save_json:
...@@ -239,9 +240,11 @@ def test(data, ...@@ -239,9 +240,11 @@ def test(data,
# Plots # Plots
if plots: if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb and wandb.run: if wandb_logger and wandb_logger.wandb:
val_batches = [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
wandb.log({"Images": wandb_images, "Validation": val_batches}, commit=False) wandb_logger.log({"Validation": val_batches})
if wandb_images:
wandb_logger.log({"Bounding Box Debugger/Images": wandb_images})
# Save JSON # Save JSON
if save_json and len(jdict): if save_json and len(jdict):
......
差异被折叠。
...@@ -12,20 +12,7 @@ WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' ...@@ -12,20 +12,7 @@ WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
def create_dataset_artifact(opt): def create_dataset_artifact(opt):
with open(opt.data) as f: with open(opt.data) as f:
data = yaml.load(f, Loader=yaml.SafeLoader) # data dict data = yaml.load(f, Loader=yaml.SafeLoader) # data dict
logger = WandbLogger(opt, '', None, data, job_type='create_dataset') logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation')
nc, names = (1, ['item']) if opt.single_cls else (int(data['nc']), data['names'])
names = {k: v for k, v in enumerate(names)} # to index dictionary
logger.log_dataset_artifact(LoadImagesAndLabels(data['train']), names, name='train') # trainset
logger.log_dataset_artifact(LoadImagesAndLabels(data['val']), names, name='val') # valset
# Update data.yaml with artifact links
data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'train')
data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'val')
path = opt.data if opt.overwrite_config else opt.data.replace('.', '_wandb.') # updated data.yaml path
data.pop('download', None) # download via artifact instead of predefined field 'download:'
with open(path, 'w') as f:
yaml.dump(data, f)
print("New Config file => ", path)
if __name__ == '__main__': if __name__ == '__main__':
...@@ -33,7 +20,6 @@ if __name__ == '__main__': ...@@ -33,7 +20,6 @@ if __name__ == '__main__':
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')
parser.add_argument('--overwrite_config', action='store_true', help='overwrite data.yaml')
opt = parser.parse_args() opt = parser.parse_args()
create_dataset_artifact(opt) create_dataset_artifact(opt)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论