Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
Y
yolov5
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
Administrator
yolov5
Commits
0ad6301c
Unverified
提交
0ad6301c
authored
7月 26, 2021
作者:
Glenn Jocher
提交者:
GitHub
7月 26, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Update script headers (#4163)
* Update download script headers * cleanup * bug fix attempt * bug fix attempt2 * bug fix attempt3 * cleanup
上级
f8e11483
隐藏空白字符变更
内嵌
并排
正在显示
9 个修改的文件
包含
36 行增加
和
32 行删除
+36
-32
download_weights.sh
data/scripts/download_weights.sh
+7
-2
get_coco.sh
data/scripts/get_coco.sh
+7
-7
get_coco128.sh
data/scripts/get_coco128.sh
+8
-8
train.py
train.py
+3
-2
autoanchor.py
utils/autoanchor.py
+4
-6
datasets.py
utils/datasets.py
+1
-1
log_dataset.py
utils/loggers/wandb/log_dataset.py
+2
-2
wandb_utils.py
utils/loggers/wandb/wandb_utils.py
+3
-3
val.py
val.py
+1
-1
没有找到文件。
data/scripts/download_weights.sh
浏览文件 @
0ad6301c
#!/bin/bash
#!/bin/bash
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
# Download latest models from https://github.com/ultralytics/yolov5/releases
# Download latest models from https://github.com/ultralytics/yolov5/releases
# Usage:
# YOLOv5 🚀 example usage: bash path/to/download_weights.sh
# $ bash path/to/download_weights.sh
# parent
# └── yolov5
# ├── yolov5s.pt ← downloads here
# ├── yolov5m.pt
# └── ...
python -
<<
EOF
python -
<<
EOF
from utils.google_utils import attempt_download
from utils.google_utils import attempt_download
...
...
data/scripts/get_coco.sh
浏览文件 @
0ad6301c
#!/bin/bash
#!/bin/bash
# C
OCO 2017 dataset http://cocodataset.org
# C
opyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
# Download
command: bash data/scripts/get_coco.sh
# Download
COCO 2017 dataset http://cocodataset.org
#
Train command: python train.py --data coco.yaml
#
YOLOv5 🚀 example usage: bash data/scripts/get_coco.sh
#
Default dataset location is next to YOLOv5:
#
parent
#
/parent_folder
#
├── yolov5
#
/coco
#
└── datasets
#
/yolov5
#
└── coco ← downloads here
# Download/unzip labels
# Download/unzip labels
d
=
'../datasets'
# unzip directory
d
=
'../datasets'
# unzip directory
...
...
data/scripts/get_coco128.sh
浏览文件 @
0ad6301c
#!/bin/bash
#!/bin/bash
# C
OCO128 dataset https://www.kaggle.com/ultralytics/coco128
# C
opyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
# Download
command: bash data/scripts/get_coco128.sh
# Download
COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
#
Train command: python train.py --data coco128.yaml
#
YOLOv5 🚀 example usage: bash data/scripts/get_coco128.sh
#
Default dataset location is next to /yolov5:
#
parent
#
/parent_folder
#
├── yolov5
#
/coco128
#
└── datasets
#
/yolov5
#
└── coco128 ← downloads here
# Download/unzip images and labels
# Download/unzip images and labels
d
=
'../'
# unzip directory
d
=
'../
datasets
'
# unzip directory
url
=
https://github.com/ultralytics/yolov5/releases/download/v1.0/
url
=
https://github.com/ultralytics/yolov5/releases/download/v1.0/
f
=
'coco128.zip'
# or 'coco2017labels-segments.zip', 68 MB
f
=
'coco128.zip'
# or 'coco2017labels-segments.zip', 68 MB
echo
'Downloading'
$url$f
' ...'
echo
'Downloading'
$url$f
' ...'
...
...
train.py
浏览文件 @
0ad6301c
...
@@ -78,8 +78,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
...
@@ -78,8 +78,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
plots
=
not
evolve
# create plots
plots
=
not
evolve
# create plots
cuda
=
device
.
type
!=
'cpu'
cuda
=
device
.
type
!=
'cpu'
init_seeds
(
1
+
RANK
)
init_seeds
(
1
+
RANK
)
with
open
(
data
)
as
f
:
with
open
(
data
,
encoding
=
'ascii'
,
errors
=
'ignore'
)
as
f
:
data_dict
=
yaml
.
safe_load
(
f
)
# data dict
data_dict
=
yaml
.
safe_load
(
f
)
nc
=
1
if
single_cls
else
int
(
data_dict
[
'nc'
])
# number of classes
nc
=
1
if
single_cls
else
int
(
data_dict
[
'nc'
])
# number of classes
names
=
[
'item'
]
if
single_cls
and
len
(
data_dict
[
'names'
])
!=
1
else
data_dict
[
'names'
]
# class names
names
=
[
'item'
]
if
single_cls
and
len
(
data_dict
[
'names'
])
!=
1
else
data_dict
[
'names'
]
# class names
assert
len
(
names
)
==
nc
,
f
'{len(names)} names found for nc={nc} dataset in {data}'
# check
assert
len
(
names
)
==
nc
,
f
'{len(names)} names found for nc={nc} dataset in {data}'
# check
...
...
utils/autoanchor.py
浏览文件 @
0ad6301c
...
@@ -60,11 +60,11 @@ def check_anchors(dataset, model, thr=4.0, imgsz=640):
...
@@ -60,11 +60,11 @@ def check_anchors(dataset, model, thr=4.0, imgsz=640):
print
(
''
)
# newline
print
(
''
)
# newline
def
kmean_anchors
(
path
=
'./data/coco128.yaml'
,
n
=
9
,
img_size
=
640
,
thr
=
4.0
,
gen
=
1000
,
verbose
=
True
):
def
kmean_anchors
(
dataset
=
'./data/coco128.yaml'
,
n
=
9
,
img_size
=
640
,
thr
=
4.0
,
gen
=
1000
,
verbose
=
True
):
""" Creates kmeans-evolved anchors from training dataset
""" Creates kmeans-evolved anchors from training dataset
Arguments:
Arguments:
path: path to dataset *
.yaml, or a loaded dataset
dataset: path to data
.yaml, or a loaded dataset
n: number of anchors
n: number of anchors
img_size: image size used for training
img_size: image size used for training
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
...
@@ -103,13 +103,11 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10
...
@@ -103,13 +103,11 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10
print
(
'
%
i,
%
i'
%
(
round
(
x
[
0
]),
round
(
x
[
1
])),
end
=
', '
if
i
<
len
(
k
)
-
1
else
'
\n
'
)
# use in *.cfg
print
(
'
%
i,
%
i'
%
(
round
(
x
[
0
]),
round
(
x
[
1
])),
end
=
', '
if
i
<
len
(
k
)
-
1
else
'
\n
'
)
# use in *.cfg
return
k
return
k
if
isinstance
(
path
,
str
):
# *.yaml file
if
isinstance
(
dataset
,
str
):
# *.yaml file
with
open
(
path
)
as
f
:
with
open
(
dataset
,
encoding
=
'ascii'
,
errors
=
'ignore'
)
as
f
:
data_dict
=
yaml
.
safe_load
(
f
)
# model dict
data_dict
=
yaml
.
safe_load
(
f
)
# model dict
from
utils.datasets
import
LoadImagesAndLabels
from
utils.datasets
import
LoadImagesAndLabels
dataset
=
LoadImagesAndLabels
(
data_dict
[
'train'
],
augment
=
True
,
rect
=
True
)
dataset
=
LoadImagesAndLabels
(
data_dict
[
'train'
],
augment
=
True
,
rect
=
True
)
else
:
dataset
=
path
# dataset
# Get label wh
# Get label wh
shapes
=
img_size
*
dataset
.
shapes
/
dataset
.
shapes
.
max
(
1
,
keepdims
=
True
)
shapes
=
img_size
*
dataset
.
shapes
/
dataset
.
shapes
.
max
(
1
,
keepdims
=
True
)
...
...
utils/datasets.py
浏览文件 @
0ad6301c
...
@@ -909,7 +909,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False):
...
@@ -909,7 +909,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False):
return
False
,
None
,
path
return
False
,
None
,
path
zipped
,
data_dir
,
yaml_path
=
unzip
(
Path
(
path
))
zipped
,
data_dir
,
yaml_path
=
unzip
(
Path
(
path
))
with
open
(
check_file
(
yaml_path
))
as
f
:
with
open
(
check_file
(
yaml_path
)
,
encoding
=
'ascii'
,
errors
=
'ignore'
)
as
f
:
data
=
yaml
.
safe_load
(
f
)
# data dict
data
=
yaml
.
safe_load
(
f
)
# data dict
if
zipped
:
if
zipped
:
data
[
'path'
]
=
data_dir
# TODO: should this be dir.resolve()?
data
[
'path'
]
=
data_dir
# TODO: should this be dir.resolve()?
...
...
utils/loggers/wandb/log_dataset.py
浏览文件 @
0ad6301c
...
@@ -8,9 +8,9 @@ WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
...
@@ -8,9 +8,9 @@ WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
def
create_dataset_artifact
(
opt
):
def
create_dataset_artifact
(
opt
):
with
open
(
opt
.
data
)
as
f
:
with
open
(
opt
.
data
,
encoding
=
'ascii'
,
errors
=
'ignore'
)
as
f
:
data
=
yaml
.
safe_load
(
f
)
# data dict
data
=
yaml
.
safe_load
(
f
)
# data dict
logger
=
WandbLogger
(
opt
,
''
,
None
,
data
,
job_type
=
'Dataset Creation'
)
logger
=
WandbLogger
(
opt
,
''
,
None
,
data
,
job_type
=
'Dataset Creation'
)
# TODO: return value unused
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
utils/loggers/wandb/wandb_utils.py
浏览文件 @
0ad6301c
...
@@ -62,7 +62,7 @@ def check_wandb_resume(opt):
...
@@ -62,7 +62,7 @@ def check_wandb_resume(opt):
def
process_wandb_config_ddp_mode
(
opt
):
def
process_wandb_config_ddp_mode
(
opt
):
with
open
(
check_file
(
opt
.
data
))
as
f
:
with
open
(
check_file
(
opt
.
data
)
,
encoding
=
'ascii'
,
errors
=
'ignore'
)
as
f
:
data_dict
=
yaml
.
safe_load
(
f
)
# data dict
data_dict
=
yaml
.
safe_load
(
f
)
# data dict
train_dir
,
val_dir
=
None
,
None
train_dir
,
val_dir
=
None
,
None
if
isinstance
(
data_dict
[
'train'
],
str
)
and
data_dict
[
'train'
]
.
startswith
(
WANDB_ARTIFACT_PREFIX
):
if
isinstance
(
data_dict
[
'train'
],
str
)
and
data_dict
[
'train'
]
.
startswith
(
WANDB_ARTIFACT_PREFIX
):
...
@@ -150,7 +150,7 @@ class WandbLogger():
...
@@ -150,7 +150,7 @@ class WandbLogger():
opt
.
single_cls
,
opt
.
single_cls
,
'YOLOv5'
if
opt
.
project
==
'runs/train'
else
Path
(
opt
.
project
)
.
stem
)
'YOLOv5'
if
opt
.
project
==
'runs/train'
else
Path
(
opt
.
project
)
.
stem
)
print
(
"Created dataset config file "
,
config_path
)
print
(
"Created dataset config file "
,
config_path
)
with
open
(
config_path
)
as
f
:
with
open
(
config_path
,
encoding
=
'ascii'
,
errors
=
'ignore'
)
as
f
:
wandb_data_dict
=
yaml
.
safe_load
(
f
)
wandb_data_dict
=
yaml
.
safe_load
(
f
)
return
wandb_data_dict
return
wandb_data_dict
...
@@ -226,7 +226,7 @@ class WandbLogger():
...
@@ -226,7 +226,7 @@ class WandbLogger():
print
(
"Saving model artifact on epoch "
,
epoch
+
1
)
print
(
"Saving model artifact on epoch "
,
epoch
+
1
)
def
log_dataset_artifact
(
self
,
data_file
,
single_cls
,
project
,
overwrite_config
=
False
):
def
log_dataset_artifact
(
self
,
data_file
,
single_cls
,
project
,
overwrite_config
=
False
):
with
open
(
data_file
)
as
f
:
with
open
(
data_file
,
encoding
=
'ascii'
,
errors
=
'ignore'
)
as
f
:
data
=
yaml
.
safe_load
(
f
)
# data dict
data
=
yaml
.
safe_load
(
f
)
# data dict
check_dataset
(
data
)
check_dataset
(
data
)
nc
,
names
=
(
1
,
[
'item'
])
if
single_cls
else
(
int
(
data
[
'nc'
]),
data
[
'names'
])
nc
,
names
=
(
1
,
[
'item'
])
if
single_cls
else
(
int
(
data
[
'nc'
]),
data
[
'names'
])
...
...
val.py
浏览文件 @
0ad6301c
...
@@ -123,7 +123,7 @@ def run(data,
...
@@ -123,7 +123,7 @@ def run(data,
# model = nn.DataParallel(model)
# model = nn.DataParallel(model)
# Data
# Data
with
open
(
data
)
as
f
:
with
open
(
data
,
encoding
=
'ascii'
,
errors
=
'ignore'
)
as
f
:
data
=
yaml
.
safe_load
(
f
)
data
=
yaml
.
safe_load
(
f
)
check_dataset
(
data
)
# check
check_dataset
(
data
)
# check
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论