optimizer=optim.Adam(pg0,lr=hyp['lr0'],betas=(hyp['momentum'],0.999))#use default beta2, adjust beta1 for Adam momentum per momentum adjustments in https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
parser.add_argument('--resume',nargs='?',const='get_last',default=False,help='resume training from given path/to/last.pt, or most recent run if blank.')
parser.add_argument('--resume',nargs='?',const='get_last',default=False,help='resume from given path/to/last.pt, or most recent run if blank.')
parser.add_argument('--nosave',action='store_true',help='only save final checkpoint')
parser.add_argument('--notest',action='store_true',help='only test final epoch')