Unverified 提交 177da7f3 authored 作者: Duncan Moss's avatar Duncan Moss 提交者: GitHub

Add `--half` support for OpenVINO exports (#7615)

* feature(export): add half support for openvino models * Update export.py * Update export.py Co-authored-by: 's avatarGlenn Jocher <glenn.jocher@ultralytics.com>
上级 b53917de
...@@ -168,7 +168,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst ...@@ -168,7 +168,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst
LOGGER.info(f'{prefix} export failure: {e}') LOGGER.info(f'{prefix} export failure: {e}')
def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): def export_openvino(model, im, file, half, prefix=colorstr('OpenVINO:')):
# YOLOv5 OpenVINO export # YOLOv5 OpenVINO export
try: try:
check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
...@@ -177,7 +177,7 @@ def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): ...@@ -177,7 +177,7 @@ def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')):
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
f = str(file).replace('.pt', '_openvino_model' + os.sep) f = str(file).replace('.pt', '_openvino_model' + os.sep)
cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}" cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}"
subprocess.check_output(cmd, shell=True) subprocess.check_output(cmd, shell=True)
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
...@@ -477,7 +477,7 @@ def run( ...@@ -477,7 +477,7 @@ def run(
# Load PyTorch model # Load PyTorch model
device = select_device(device) device = select_device(device)
if half: if half:
assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' assert device.type != 'cpu' or coreml or xml, '--half only compatible with GPU export, i.e. use --device 0'
model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model
nc, names = model.nc, model.names # number of classes, class names nc, names = model.nc, model.names # number of classes, class names
...@@ -491,7 +491,7 @@ def run( ...@@ -491,7 +491,7 @@ def run(
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
# Update model # Update model
if half and not coreml: if half and not (coreml or xml):
im, model = im.half(), model.half() # to FP16 im, model = im.half(), model.half() # to FP16
model.train() if train else model.eval() # training mode = no Detect() layer grid construction model.train() if train else model.eval() # training mode = no Detect() layer grid construction
for k, m in model.named_modules(): for k, m in model.named_modules():
...@@ -515,7 +515,7 @@ def run( ...@@ -515,7 +515,7 @@ def run(
if onnx or xml: # OpenVINO requires ONNX if onnx or xml: # OpenVINO requires ONNX
f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify)
if xml: # OpenVINO if xml: # OpenVINO
f[3] = export_openvino(model, im, file) f[3] = export_openvino(model, im, file, half)
if coreml: if coreml:
_, f[4] = export_coreml(model, im, file, int8, half) _, f[4] = export_coreml(model, im, file, int8, half)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论