Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
Y
yolov5
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
Administrator
yolov5
Commits
00e308f7
Unverified
提交
00e308f7
authored
12月 02, 2021
作者:
Glenn Jocher
提交者:
GitHub
12月 02, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Update TorchScript suffix to `*.torchscript` (#5856)
上级
30db14fe
隐藏空白字符变更
内嵌
并排
正在显示
5 个修改的文件
包含
20 行增加
和
20 行删除
+20
-20
detect.py
detect.py
+4
-4
export.py
export.py
+3
-3
common.py
models/common.py
+6
-6
activations.py
utils/activations.py
+2
-2
val.py
val.py
+5
-5
没有找到文件。
detect.py
浏览文件 @
00e308f7
...
@@ -81,18 +81,18 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
...
@@ -81,18 +81,18 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
imgsz
=
check_img_size
(
imgsz
,
s
=
stride
)
# check image size
imgsz
=
check_img_size
(
imgsz
,
s
=
stride
)
# check image size
# Half
# Half
half
&=
(
pt
or
engine
)
and
device
.
type
!=
'cpu'
# half precision only supported by PyTorch on CUDA
half
&=
(
pt
or
jit
or
engine
)
and
device
.
type
!=
'cpu'
# half precision only supported by PyTorch on CUDA
if
pt
:
if
pt
or
jit
:
model
.
model
.
half
()
if
half
else
model
.
model
.
float
()
model
.
model
.
half
()
if
half
else
model
.
model
.
float
()
# Dataloader
# Dataloader
if
webcam
:
if
webcam
:
view_img
=
check_imshow
()
view_img
=
check_imshow
()
cudnn
.
benchmark
=
True
# set True to speed up constant image size inference
cudnn
.
benchmark
=
True
# set True to speed up constant image size inference
dataset
=
LoadStreams
(
source
,
img_size
=
imgsz
,
stride
=
stride
,
auto
=
pt
and
not
jit
)
dataset
=
LoadStreams
(
source
,
img_size
=
imgsz
,
stride
=
stride
,
auto
=
pt
)
bs
=
len
(
dataset
)
# batch_size
bs
=
len
(
dataset
)
# batch_size
else
:
else
:
dataset
=
LoadImages
(
source
,
img_size
=
imgsz
,
stride
=
stride
,
auto
=
pt
and
not
jit
)
dataset
=
LoadImages
(
source
,
img_size
=
imgsz
,
stride
=
stride
,
auto
=
pt
)
bs
=
1
# batch_size
bs
=
1
# batch_size
vid_path
,
vid_writer
=
[
None
]
*
bs
,
[
None
]
*
bs
vid_path
,
vid_writer
=
[
None
]
*
bs
,
[
None
]
*
bs
...
...
export.py
浏览文件 @
00e308f7
...
@@ -5,7 +5,7 @@ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by h
...
@@ -5,7 +5,7 @@ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by h
Format | Example | Export `include=(...)` argument
Format | Example | Export `include=(...)` argument
--- | --- | ---
--- | --- | ---
PyTorch | yolov5s.pt | -
PyTorch | yolov5s.pt | -
TorchScript | yolov5s.torchscript
.pt
| 'torchscript'
TorchScript | yolov5s.torchscript
| 'torchscript'
ONNX | yolov5s.onnx | 'onnx'
ONNX | yolov5s.onnx | 'onnx'
CoreML | yolov5s.mlmodel | 'coreml'
CoreML | yolov5s.mlmodel | 'coreml'
TensorFlow SavedModel | yolov5s_saved_model/ | 'saved_model'
TensorFlow SavedModel | yolov5s_saved_model/ | 'saved_model'
...
@@ -19,7 +19,7 @@ Usage:
...
@@ -19,7 +19,7 @@ Usage:
Inference:
Inference:
$ python path/to/detect.py --weights yolov5s.pt
$ python path/to/detect.py --weights yolov5s.pt
yolov5s.torchscript
.pt
yolov5s.torchscript
yolov5s.onnx
yolov5s.onnx
yolov5s.mlmodel (under development)
yolov5s.mlmodel (under development)
yolov5s_saved_model
yolov5s_saved_model
...
@@ -66,7 +66,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:'
...
@@ -66,7 +66,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:'
# YOLOv5 TorchScript model export
# YOLOv5 TorchScript model export
try
:
try
:
LOGGER
.
info
(
f
'
\n
{prefix} starting export with torch {torch.__version__}...'
)
LOGGER
.
info
(
f
'
\n
{prefix} starting export with torch {torch.__version__}...'
)
f
=
file
.
with_suffix
(
'.torchscript
.pt
'
)
f
=
file
.
with_suffix
(
'.torchscript'
)
ts
=
torch
.
jit
.
trace
(
model
,
im
,
strict
=
False
)
ts
=
torch
.
jit
.
trace
(
model
,
im
,
strict
=
False
)
d
=
{
"shape"
:
im
.
shape
,
"stride"
:
int
(
max
(
model
.
stride
)),
"names"
:
model
.
names
}
d
=
{
"shape"
:
im
.
shape
,
"stride"
:
int
(
max
(
model
.
stride
)),
"names"
:
model
.
names
}
...
...
models/common.py
浏览文件 @
00e308f7
...
@@ -279,7 +279,7 @@ class DetectMultiBackend(nn.Module):
...
@@ -279,7 +279,7 @@ class DetectMultiBackend(nn.Module):
def
__init__
(
self
,
weights
=
'yolov5s.pt'
,
device
=
None
,
dnn
=
True
):
def
__init__
(
self
,
weights
=
'yolov5s.pt'
,
device
=
None
,
dnn
=
True
):
# Usage:
# Usage:
# PyTorch: weights = *.pt
# PyTorch: weights = *.pt
# TorchScript: *.torchscript
.pt
# TorchScript: *.torchscript
# CoreML: *.mlmodel
# CoreML: *.mlmodel
# TensorFlow: *_saved_model
# TensorFlow: *_saved_model
# TensorFlow: *.pb
# TensorFlow: *.pb
...
@@ -289,10 +289,10 @@ class DetectMultiBackend(nn.Module):
...
@@ -289,10 +289,10 @@ class DetectMultiBackend(nn.Module):
# TensorRT: *.engine
# TensorRT: *.engine
super
()
.
__init__
()
super
()
.
__init__
()
w
=
str
(
weights
[
0
]
if
isinstance
(
weights
,
list
)
else
weights
)
w
=
str
(
weights
[
0
]
if
isinstance
(
weights
,
list
)
else
weights
)
suffix
,
suffixes
=
Path
(
w
)
.
suffix
.
lower
(),
[
'.pt'
,
'.onnx'
,
'.engine'
,
'.tflite'
,
'.pb'
,
''
,
'.mlmodel'
]
suffix
=
Path
(
w
)
.
suffix
.
lower
()
suffixes
=
[
'.pt'
,
'.torchscript'
,
'.onnx'
,
'.engine'
,
'.tflite'
,
'.pb'
,
''
,
'.mlmodel'
]
check_suffix
(
w
,
suffixes
)
# check weights have acceptable suffix
check_suffix
(
w
,
suffixes
)
# check weights have acceptable suffix
pt
,
onnx
,
engine
,
tflite
,
pb
,
saved_model
,
coreml
=
(
suffix
==
x
for
x
in
suffixes
)
# backend booleans
pt
,
jit
,
onnx
,
engine
,
tflite
,
pb
,
saved_model
,
coreml
=
(
suffix
==
x
for
x
in
suffixes
)
# backend booleans
jit
=
pt
and
'torchscript'
in
w
.
lower
()
stride
,
names
=
64
,
[
f
'class{i}'
for
i
in
range
(
1000
)]
# assign defaults
stride
,
names
=
64
,
[
f
'class{i}'
for
i
in
range
(
1000
)]
# assign defaults
if
jit
:
# TorchScript
if
jit
:
# TorchScript
...
@@ -304,10 +304,10 @@ class DetectMultiBackend(nn.Module):
...
@@ -304,10 +304,10 @@ class DetectMultiBackend(nn.Module):
stride
,
names
=
int
(
d
[
'stride'
]),
d
[
'names'
]
stride
,
names
=
int
(
d
[
'stride'
]),
d
[
'names'
]
elif
pt
:
# PyTorch
elif
pt
:
# PyTorch
from
models.experimental
import
attempt_load
# scoped to avoid circular import
from
models.experimental
import
attempt_load
# scoped to avoid circular import
model
=
torch
.
jit
.
load
(
w
)
if
'torchscript'
in
w
else
attempt_load
(
weights
,
map_location
=
device
)
model
=
attempt_load
(
weights
,
map_location
=
device
)
stride
=
int
(
model
.
stride
.
max
())
# model stride
stride
=
int
(
model
.
stride
.
max
())
# model stride
names
=
model
.
module
.
names
if
hasattr
(
model
,
'module'
)
else
model
.
names
# get class names
names
=
model
.
module
.
names
if
hasattr
(
model
,
'module'
)
else
model
.
names
# get class names
elif
coreml
:
# CoreML
*.mlmodel
elif
coreml
:
# CoreML
import
coremltools
as
ct
import
coremltools
as
ct
model
=
ct
.
models
.
MLModel
(
w
)
model
=
ct
.
models
.
MLModel
(
w
)
elif
dnn
:
# ONNX OpenCV DNN
elif
dnn
:
# ONNX OpenCV DNN
...
...
utils/activations.py
浏览文件 @
00e308f7
...
@@ -18,8 +18,8 @@ class SiLU(nn.Module): # export-friendly version of nn.SiLU()
...
@@ -18,8 +18,8 @@ class SiLU(nn.Module): # export-friendly version of nn.SiLU()
class
Hardswish
(
nn
.
Module
):
# export-friendly version of nn.Hardswish()
class
Hardswish
(
nn
.
Module
):
# export-friendly version of nn.Hardswish()
@staticmethod
@staticmethod
def
forward
(
x
):
def
forward
(
x
):
# return x * F.hardsigmoid(x) # for
torchs
cript and CoreML
# return x * F.hardsigmoid(x) # for
TorchS
cript and CoreML
return
x
*
F
.
hardtanh
(
x
+
3
,
0.0
,
6.0
)
/
6.0
# for
torchs
cript, CoreML and ONNX
return
x
*
F
.
hardtanh
(
x
+
3
,
0.0
,
6.0
)
/
6.0
# for
TorchS
cript, CoreML and ONNX
# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
...
...
val.py
浏览文件 @
00e308f7
...
@@ -111,7 +111,7 @@ def run(data,
...
@@ -111,7 +111,7 @@ def run(data,
# Initialize/load model and set device
# Initialize/load model and set device
training
=
model
is
not
None
training
=
model
is
not
None
if
training
:
# called by train.py
if
training
:
# called by train.py
device
,
pt
,
engine
=
next
(
model
.
parameters
())
.
device
,
Tru
e
,
False
# get model device, PyTorch model
device
,
pt
,
jit
,
engine
=
next
(
model
.
parameters
())
.
device
,
True
,
Fals
e
,
False
# get model device, PyTorch model
half
&=
device
.
type
!=
'cpu'
# half precision only supported on CUDA
half
&=
device
.
type
!=
'cpu'
# half precision only supported on CUDA
model
.
half
()
if
half
else
model
.
float
()
model
.
half
()
if
half
else
model
.
float
()
...
@@ -124,10 +124,10 @@ def run(data,
...
@@ -124,10 +124,10 @@ def run(data,
# Load model
# Load model
model
=
DetectMultiBackend
(
weights
,
device
=
device
,
dnn
=
dnn
)
model
=
DetectMultiBackend
(
weights
,
device
=
device
,
dnn
=
dnn
)
stride
,
pt
,
engine
=
model
.
stride
,
model
.
p
t
,
model
.
engine
stride
,
pt
,
jit
,
engine
=
model
.
stride
,
model
.
pt
,
model
.
ji
t
,
model
.
engine
imgsz
=
check_img_size
(
imgsz
,
s
=
stride
)
# check image size
imgsz
=
check_img_size
(
imgsz
,
s
=
stride
)
# check image size
half
&=
(
pt
or
engine
)
and
device
.
type
!=
'cpu'
# half precision only supported by PyTorch on CUDA
half
&=
(
pt
or
jit
or
engine
)
and
device
.
type
!=
'cpu'
# half precision only supported by PyTorch on CUDA
if
pt
:
if
pt
or
jit
:
model
.
model
.
half
()
if
half
else
model
.
model
.
float
()
model
.
model
.
half
()
if
half
else
model
.
model
.
float
()
elif
engine
:
elif
engine
:
batch_size
=
model
.
batch_size
batch_size
=
model
.
batch_size
...
@@ -166,7 +166,7 @@ def run(data,
...
@@ -166,7 +166,7 @@ def run(data,
pbar
=
tqdm
(
dataloader
,
desc
=
s
,
bar_format
=
'{l_bar}{bar:10}{r_bar}{bar:-10b}'
)
# progress bar
pbar
=
tqdm
(
dataloader
,
desc
=
s
,
bar_format
=
'{l_bar}{bar:10}{r_bar}{bar:-10b}'
)
# progress bar
for
batch_i
,
(
im
,
targets
,
paths
,
shapes
)
in
enumerate
(
pbar
):
for
batch_i
,
(
im
,
targets
,
paths
,
shapes
)
in
enumerate
(
pbar
):
t1
=
time_sync
()
t1
=
time_sync
()
if
pt
or
engine
:
if
pt
or
jit
or
engine
:
im
=
im
.
to
(
device
,
non_blocking
=
True
)
im
=
im
.
to
(
device
,
non_blocking
=
True
)
targets
=
targets
.
to
(
device
)
targets
=
targets
.
to
(
device
)
im
=
im
.
half
()
if
half
else
im
.
float
()
# uint8 to fp16/32
im
=
im
.
half
()
if
half
else
im
.
float
()
# uint8 to fp16/32
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论