Unverified 提交 0c132406 authored 作者: Glenn Jocher's avatar Glenn Jocher 提交者: GitHub

Fix ONNX `--dynamic` export on GPU (#8378)

* Fix ONNX `--dynamic` export on GPU Patch forces --dynamic export model and image to CPU. Resolves bug raised in https://github.com/ultralytics/yolov5/issues/8377 * Update export.py
上级 50ff6eee
......@@ -119,8 +119,8 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst
f = file.with_suffix('.onnx')
torch.onnx.export(
model,
im,
model.cpu() if dynamic else model, # --dynamic only compatible with cpu
im.cpu() if dynamic else im,
f,
verbose=False,
opset_version=opset,
......@@ -499,8 +499,6 @@ def run(
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
# Update model
if half and not coreml and not xml:
im, model = im.half(), model.half() # to FP16
model.train() if train else model.eval() # training mode = no Detect() layer grid construction
for k, m in model.named_modules():
if isinstance(m, Detect):
......@@ -510,6 +508,8 @@ def run(
for _ in range(2):
y = model(im) # dry runs
if half and not coreml:
im, model = im.half(), model.half() # to FP16
shape = tuple(y[0].shape) # model output shape
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论