Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
Y
yolov5
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
Administrator
yolov5
Commits
5373a28c
提交
5373a28c
authored
8月 21, 2022
作者:
Glenn Jocher
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Created using Colaboratory
上级
8665d557
隐藏空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
243 行增加
和
208 行删除
+243
-208
tutorial.ipynb
tutorial.ipynb
+243
-208
没有找到文件。
tutorial.ipynb
浏览文件 @
5373a28c
...
...
@@ -17,110 +17,121 @@
"accelerator": "GPU",
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"
6d6b90ead2db49b3bdf624b6ba9b44e9
": {
"
da0946bcefd9414fa282977f7f609e36
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"model_module_version": "
1.5
.0",
"model_module_version": "
2.0
.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "
1.5
.0",
"_model_module_version": "
2.0
.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "
1.5
.0",
"_view_module_version": "
2.0
.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_
cb77443edb9e43328a56aaa4413a0df3
",
"IPY_MODEL_
954c8b8699e143bf92be6bfc02fc52f6
",
"IPY_MODEL_
a64775946e13477f83d8bba6086385b9
"
"IPY_MODEL_
7838c0af44244ccc906c413cea0989d7
",
"IPY_MODEL_
309ea78b3e814198b4080beb878d5329
",
"IPY_MODEL_
b2d1d998e5db4ca1a36280902e1647c7
"
],
"layout": "IPY_MODEL_1413611b7f4f4ef99e4f541f5ca35ed6"
"layout": "IPY_MODEL_e7d7f56c77884717ba122f1d603c0852",
"tabbable": null,
"tooltip": null
}
},
"
cb77443edb9e43328a56aaa4413a0df3
": {
"
7838c0af44244ccc906c413cea0989d7
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "
1.5
.0",
"model_module_version": "
2.0
.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "
1.5
.0",
"_model_module_version": "
2.0
.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "
1.5
.0",
"_view_module_version": "
2.0
.0",
"_view_name": "HTMLView",
"description": "",
"description_
tooltip": null
,
"layout": "IPY_MODEL_
00737f5558eb4fbd968172acb978e54a
",
"description_
allow_html": false
,
"layout": "IPY_MODEL_
abf60d6b8ea847f9bb358ae2b045458b
",
"placeholder": "",
"style": "IPY_MODEL_f03e5ddfd1c04bedaf68ab02c3f6f0ea",
"style": "IPY_MODEL_379196a2761b4a29aca8ef088dc60c10",
"tabbable": null,
"tooltip": null,
"value": "100%"
}
},
"
954c8b8699e143bf92be6bfc02fc52f6
": {
"
309ea78b3e814198b4080beb878d5329
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"model_module_version": "
1.5
.0",
"model_module_version": "
2.0
.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "
1.5
.0",
"_model_module_version": "
2.0
.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "
1.5
.0",
"_view_module_version": "
2.0
.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_
tooltip": null
,
"layout": "IPY_MODEL_
6926db7e0035455f99e1dd4508c4b19c
",
"description_
allow_html": false
,
"layout": "IPY_MODEL_
52b546a356e54174a95049b30cb52c81
",
"max": 818322941,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_a6a52c9f828b458e97ddf7a11ae9275f",
"style": "IPY_MODEL_0889e134327e4aa0a8719d03a0d6941b",
"tabbable": null,
"tooltip": null,
"value": 818322941
}
},
"
a64775946e13477f83d8bba6086385b9
": {
"
b2d1d998e5db4ca1a36280902e1647c7
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "
1.5
.0",
"model_module_version": "
2.0
.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "
1.5
.0",
"_model_module_version": "
2.0
.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "
1.5
.0",
"_view_module_version": "
2.0
.0",
"_view_name": "HTMLView",
"description": "",
"description_
tooltip": null
,
"layout": "IPY_MODEL_
c4c7dc45a1c24dc4b2c709e21271a37e
",
"description_
allow_html": false
,
"layout": "IPY_MODEL_
30f22a3e42d24f10ad9851f40a6703f3
",
"placeholder": "",
"style": "IPY_MODEL_09c43ffe2c7e4bdc9489e83f9d82ab73",
"value": " 780M/780M [01:12<00:00, 23.8MB/s]"
"style": "IPY_MODEL_648b3512bb7d4ccca5d75af36c133e92",
"tabbable": null,
"tooltip": null,
"value": " 780M/780M [01:31<00:00, 12.3MB/s]"
}
},
"
1413611b7f4f4ef99e4f541f5ca35ed6
": {
"
e7d7f56c77884717ba122f1d603c0852
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "
1.2
.0",
"model_module_version": "
2.0
.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "
1.2
.0",
"_model_module_version": "
2.0
.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "
1.2
.0",
"_view_module_version": "
2.0
.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"border_bottom": null,
"border_left": null,
"border_right": null,
"border_top": null,
"bottom": null,
"display": null,
"flex": null,
...
...
@@ -148,8 +159,6 @@
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
...
...
@@ -157,22 +166,25 @@
"width": null
}
},
"
00737f5558eb4fbd968172acb978e54a
": {
"
abf60d6b8ea847f9bb358ae2b045458b
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "
1.2
.0",
"model_module_version": "
2.0
.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "
1.2
.0",
"_model_module_version": "
2.0
.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "
1.2
.0",
"_view_module_version": "
2.0
.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"border_bottom": null,
"border_left": null,
"border_right": null,
"border_top": null,
"bottom": null,
"display": null,
"flex": null,
...
...
@@ -200,8 +212,6 @@
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
...
...
@@ -209,37 +219,43 @@
"width": null
}
},
"
f03e5ddfd1c04bedaf68ab02c3f6f0ea
": {
"
379196a2761b4a29aca8ef088dc60c10
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "
Description
StyleModel",
"model_module_version": "
1.5
.0",
"model_name": "
HTML
StyleModel",
"model_module_version": "
2.0
.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "
1.5
.0",
"_model_name": "
Description
StyleModel",
"_model_module_version": "
2.0
.0",
"_model_name": "
HTML
StyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "
1.2
.0",
"_view_module_version": "
2.0
.0",
"_view_name": "StyleView",
"description_width": ""
"background": null,
"description_width": "",
"font_size": null,
"text_color": null
}
},
"
6926db7e0035455f99e1dd4508c4b19c
": {
"
52b546a356e54174a95049b30cb52c81
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "
1.2
.0",
"model_module_version": "
2.0
.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "
1.2
.0",
"_model_module_version": "
2.0
.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "
1.2
.0",
"_view_module_version": "
2.0
.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"border_bottom": null,
"border_left": null,
"border_right": null,
"border_top": null,
"bottom": null,
"display": null,
"flex": null,
...
...
@@ -267,8 +283,6 @@
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
...
...
@@ -276,38 +290,41 @@
"width": null
}
},
"
a6a52c9f828b458e97ddf7a11ae9275f
": {
"
0889e134327e4aa0a8719d03a0d6941b
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"model_module_version": "
1.5
.0",
"model_module_version": "
2.0
.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "
1.5
.0",
"_model_module_version": "
2.0
.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "
1.2
.0",
"_view_module_version": "
2.0
.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"
c4c7dc45a1c24dc4b2c709e21271a37e
": {
"
30f22a3e42d24f10ad9851f40a6703f3
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "
1.2
.0",
"model_module_version": "
2.0
.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "
1.2
.0",
"_model_module_version": "
2.0
.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "
1.2
.0",
"_view_module_version": "
2.0
.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"border_bottom": null,
"border_left": null,
"border_right": null,
"border_top": null,
"bottom": null,
"display": null,
"flex": null,
...
...
@@ -335,8 +352,6 @@
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
...
...
@@ -344,19 +359,22 @@
"width": null
}
},
"
09c43ffe2c7e4bdc9489e83f9d82ab73
": {
"
648b3512bb7d4ccca5d75af36c133e92
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "
Description
StyleModel",
"model_module_version": "
1.5
.0",
"model_name": "
HTML
StyleModel",
"model_module_version": "
2.0
.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "
1.5
.0",
"_model_name": "
Description
StyleModel",
"_model_module_version": "
2.0
.0",
"_model_name": "
HTML
StyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "
1.2
.0",
"_view_module_version": "
2.0
.0",
"_view_name": "StyleView",
"description_width": ""
"background": null,
"description_width": "",
"font_size": null,
"text_color": null
}
}
}
...
...
@@ -404,7 +422,7 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "
508de90c-846e-495d-c7d6-50681af62a98
"
"outputId": "
4200fd6f-c6f5-4505-a4f9-a918f3ed1f86
"
},
"source": [
"!git clone https://github.com/ultralytics/yolov5 # clone\n",
...
...
@@ -415,13 +433,13 @@
"import utils\n",
"display = utils.notebook_init() # checks"
],
"execution_count":
null
,
"execution_count":
1
,
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"YOLOv5 🚀 v6.2-
15-g61adf01
Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n"
"YOLOv5 🚀 v6.2-
41-g8665d55
Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n"
]
},
{
...
...
@@ -461,29 +479,29 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "
93881540-331e-4890-cd38-4c2776933238
"
"outputId": "
1af15107-bcd1-4e8f-b5bd-0ee1a737e051
"
},
"source": [
"!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n",
"# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)"
],
"execution_count":
null
,
"execution_count":
2
,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n",
"YOLOv5 🚀 v6.2-
15-g61adf01
Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"YOLOv5 🚀 v6.2-
41-g8665d55
Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n",
"100% 14.1M/14.1M [00:00<00:00,
39.3
MB/s]\n",
"100% 14.1M/14.1M [00:00<00:00,
41.7
MB/s]\n",
"\n",
"Fusing layers... \n",
"YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n",
"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.
9
ms\n",
"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties,
22.0
ms\n",
"Speed: 0.
6ms pre-process, 18.4ms inference, 24.1
ms NMS per image at shape (1, 3, 640, 640)\n",
"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.
5
ms\n",
"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties,
18.9
ms\n",
"Speed: 0.
5ms pre-process, 16.7ms inference, 21.4
ms NMS per image at shape (1, 3, 640, 640)\n",
"Results saved to \u001b[1mruns/detect/exp\u001b[0m\n"
]
}
...
...
@@ -515,29 +533,29 @@
"id": "WQPtK1QYVaD_",
"colab": {
"base_uri": "https://localhost:8080/",
"height":
49
,
"height":
17
,
"referenced_widgets": [
"
6d6b90ead2db49b3bdf624b6ba9b44e9
",
"
cb77443edb9e43328a56aaa4413a0df3
",
"
954c8b8699e143bf92be6bfc02fc52f6
",
"
a64775946e13477f83d8bba6086385b9
",
"
1413611b7f4f4ef99e4f541f5ca35ed6
",
"
00737f5558eb4fbd968172acb978e54a
",
"
f03e5ddfd1c04bedaf68ab02c3f6f0ea
",
"
6926db7e0035455f99e1dd4508c4b19c
",
"
a6a52c9f828b458e97ddf7a11ae9275f
",
"
c4c7dc45a1c24dc4b2c709e21271a37e
",
"
09c43ffe2c7e4bdc9489e83f9d82ab73
"
"
da0946bcefd9414fa282977f7f609e36
",
"
7838c0af44244ccc906c413cea0989d7
",
"
309ea78b3e814198b4080beb878d5329
",
"
b2d1d998e5db4ca1a36280902e1647c7
",
"
e7d7f56c77884717ba122f1d603c0852
",
"
abf60d6b8ea847f9bb358ae2b045458b
",
"
379196a2761b4a29aca8ef088dc60c10
",
"
52b546a356e54174a95049b30cb52c81
",
"
0889e134327e4aa0a8719d03a0d6941b
",
"
30f22a3e42d24f10ad9851f40a6703f3
",
"
648b3512bb7d4ccca5d75af36c133e92
"
]
},
"outputId": "
ed2ca46e-a1a9-4a16-c449-859278d8aa18
"
"outputId": "
5f129105-eca5-4f33-fb1d-981255f814ad
"
},
"source": [
"# Download COCO val\n",
"torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n",
"!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip"
],
"execution_count":
null
,
"execution_count":
3
,
"outputs": [
{
"output_type": "display_data",
...
...
@@ -548,7 +566,24 @@
"application/vnd.jupyter.widget-view+json": {
"version_major": 2,
"version_minor": 0,
"model_id": "6d6b90ead2db49b3bdf624b6ba9b44e9"
"model_id": "da0946bcefd9414fa282977f7f609e36"
},
"application/json": {
"n": 0,
"total": 818322941,
"elapsed": 0.020366430282592773,
"ncols": null,
"nrows": null,
"prefix": "",
"ascii": false,
"unit": "B",
"unit_scale": true,
"rate": null,
"bar_format": null,
"postfix": null,
"unit_divisor": 1024,
"initial": 0,
"colour": null
}
},
"metadata": {}
...
...
@@ -562,48 +597,48 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "
19a590ef-363e-424c-d9ce-78bbe0593cd5
"
"outputId": "
40d5d000-abee-46a0-c07d-1066e1662e01
"
},
"source": [
"# Validate YOLOv5x on COCO val\n",
"!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half"
],
"execution_count":
null
,
"execution_count":
4
,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n",
"YOLOv5 🚀 v6.2-
15-g61adf01
Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"YOLOv5 🚀 v6.2-
41-g8665d55
Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt to yolov5x.pt...\n",
"100% 166M/166M [00:
06<00:00, 28.1
MB/s]\n",
"100% 166M/166M [00:
10<00:00, 16.6
MB/s]\n",
"\n",
"Fusing layers... \n",
"YOLOv5x summary: 444 layers, 86705005 parameters, 0 gradients\n",
"Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n",
"100% 755k/755k [00:00<00:00,
47.3
MB/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10
756.32
it/s]\n",
"100% 755k/755k [00:00<00:00,
1.39
MB/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10
506.48
it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n",
"
Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:07<00:00, 2.33
it/s]\n",
" all 5000 36335 0.743 0.625 0.683 0.504\n",
"Speed: 0.1ms pre-process, 4.6ms inference, 1.
2
ms NMS per image at shape (32, 3, 640, 640)\n",
"
Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:06<00:00, 2.36
it/s]\n",
"
all 5000 36335 0.743 0.625 0.683 0.504\n",
"Speed: 0.1ms pre-process, 4.6ms inference, 1.
1
ms NMS per image at shape (32, 3, 640, 640)\n",
"\n",
"Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n",
"loading annotations into memory...\n",
"Done (t=0.
41
s)\n",
"Done (t=0.
38
s)\n",
"creating index...\n",
"index created!\n",
"Loading and preparing results...\n",
"DONE (t=5.
64
s)\n",
"DONE (t=5.
49
s)\n",
"creating index...\n",
"index created!\n",
"Running per image evaluation...\n",
"Evaluate annotation type *bbox*\n",
"DONE (t=7
6.8
0s).\n",
"DONE (t=7
2.1
0s).\n",
"Accumulating evaluation results...\n",
"DONE (t=1
4.61
s).\n",
"DONE (t=1
3.94
s).\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.506\n",
" Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n",
" Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.549\n",
...
...
@@ -682,13 +717,13 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "
47759d5e-34f0-4a6a-c714-ff533391cfff
"
"outputId": "
f0ce0354-7f50-4546-f3f9-672b4b522d59
"
},
"source": [
"# Train YOLOv5s on COCO128 for 3 epochs\n",
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
],
"execution_count":
null
,
"execution_count":
5
,
"outputs": [
{
"output_type": "stream",
...
...
@@ -696,7 +731,7 @@
"text": [
"\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n",
"\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
"YOLOv5 🚀 v6.2-
15-g61adf01
Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"YOLOv5 🚀 v6.2-
41-g8665d55
Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"\n",
"\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
"\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n",
...
...
@@ -705,8 +740,8 @@
"\n",
"Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n",
"Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n",
"100% 6.66M/6.66M [00:00<00:00, 7
5.3
MB/s]\n",
"Dataset download success ✅ (0.
7
s), saved to \u001b[1m/content/datasets\u001b[0m\n",
"100% 6.66M/6.66M [00:00<00:00, 7
6.7
MB/s]\n",
"Dataset download success ✅ (0.
5
s), saved to \u001b[1m/content/datasets\u001b[0m\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n",
...
...
@@ -740,33 +775,33 @@
"\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
"\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
"\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7
246.20
it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7
984.87
it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00,
986.21
it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00,
1018.19
it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 269.10it/s]\n",
"Plotting labels to runs/train/exp/labels.jpg... \n",
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 246.87it/s]\n",
"\n",
"\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
"Plotting labels to runs/train/exp/labels.jpg... \n",
"Image sizes 640 train, 640 val\n",
"Using 8 dataloader workers\n",
"Logging results to \u001b[1mruns/train/exp\u001b[0m\n",
"Starting training for 3 epochs...\n",
"\n",
"
Epoch gpu_mem box obj cls labels img_s
ize\n",
"
0/2 3.76G 0.04529 0.06712 0.01835 323 640: 100% 8/8 [00:04<00:00, 1.65
it/s]\n",
"
Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00, 4.07
it/s]\n",
"
all 128 929 0.666 0.611 0.684 0.452
\n",
"
Epoch GPU_mem box_loss obj_loss cls_loss Instances S
ize\n",
"
0/2 3.77G 0.04529 0.06712 0.01835 323 640: 100% 8/8 [00:04<00:00, 1.96
it/s]\n",
"
Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00, 4.12
it/s]\n",
"
all 128 929 0.647 0.611 0.68 0.449
\n",
"\n",
"
Epoch gpu_mem box obj cls labels img_s
ize\n",
"
1/2 4.79G 0.04244 0.06423 0.01611 236 640: 100% 8/8 [00:01<00:00, 7.60
it/s]\n",
"
Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.90
it/s]\n",
"
all 128 929 0.746 0.626 0.722 0.481
\n",
"
Epoch GPU_mem box_loss obj_loss cls_loss Instances S
ize\n",
"
1/2 4.79G 0.04244 0.06423 0.01611 236 640: 100% 8/8 [00:00<00:00, 8.08
it/s]\n",
"
Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00, 4.43
it/s]\n",
"
all 128 929 0.737 0.623 0.72 0.482
\n",
"\n",
"
Epoch gpu_mem box obj cls labels img_s
ize\n",
"
2/2 4.79G 0.04695 0.06875 0.0173 189 640: 100% 8/8 [00:00<00:00, 8.49
it/s]\n",
"
Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00, 4.24
it/s]\n",
"
all 128 929 0.774 0.647 0.746 0.499
\n",
"
Epoch GPU_mem box_loss obj_loss cls_loss Instances S
ize\n",
"
2/2 4.79G 0.04695 0.06875 0.0173 189 640: 100% 8/8 [00:00<00:00, 8.87
it/s]\n",
"
Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00, 4.57
it/s]\n",
"
all 128 929 0.76 0.631 0.733 0.497
\n",
"\n",
"3 epochs completed in 0.003 hours.\n",
"Optimizer stripped from runs/train/exp/weights/last.pt, 14.9MB\n",
...
...
@@ -775,79 +810,79 @@
"Validating runs/train/exp/weights/best.pt...\n",
"Fusing layers... \n",
"Model summary: 213 layers, 7225885 parameters, 0 gradients, 16.4 GFLOPs\n",
"
Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.11
it/s]\n",
"
all 128 929 0.774 0.647 0.746 0.499
\n",
"
person 128 254 0.87 0.697 0.806 0.534
\n",
"
bicycle 128 6 0.759 0.528
0.725 0.444\n",
"
car 128 46 0.774 0.413 0.554 0.239
\n",
"
motorcycle 128 5 0.791 1 0.962 0.595
\n",
"
airplane 128 6 0.981
1 0.995 0.689\n",
"
bus 128 7 0.65
0.714 0.755 0.691\n",
"
train 128 3 1 0.573 0.995 0.602
\n",
"
truck 128 12 0.613 0.333 0.489 0.263
\n",
"
boat 128 6 0.933 0.333 0.507 0.209
\n",
"
traffic light 128 14 0.76 0.228 0.367 0.209
\n",
"
stop sign 128 2 0.821
1 0.995 0.821\n",
"
bench 128 9 0.824 0.526 0.676 0.31
\n",
"
bird 128 16 0.974 1 0.995 0.611
\n",
"
cat 128 4 0.859
1 0.995 0.772\n",
"
dog 128 9 1 0.666 0.883 0.647
\n",
"
horse 128 2 0.84
1 0.995 0.622\n",
"
elephant 128 17 0.926
0.882 0.93 0.716\n",
"
bear 128 1 0.709
1 0.995 0.995\n",
" zebra 128 4 0.866 1 0.995 0.922\n",
"
giraffe 128 9 0.777 0.778
0.891 0.705\n",
"
backpack 128 6 0.894
0.5 0.753 0.294\n",
"
umbrella 128 18 0.876 0.783 0.899 0.54
\n",
"
handbag 128 19 0.799 0.209 0.335 0.179
\n",
"
tie 128 7 0.782
0.714 0.787 0.478\n",
"
suitcase 128 4 0.658
1 0.945 0.581\n",
"
frisbee 128 5 0.726 0.8 0.76
0.701\n",
"
skis 128 1 0.8 1 0.995 0.103
\n",
"
snowboard 128 7 0.815 0.714 0.852 0.574
\n",
" sports ball 128 6 0.649 0.667 0.602 0.307\n",
"
kite 128 10 0.7 0.47 0.546 0.206
\n",
"
baseball bat 128 4 1 0.497 0.544 0.182
\n",
"
baseball glove 128 7 0.598 0.429 0.47 0.31
\n",
"
skateboard 128 5 0.851 0.6 0.685 0.495
\n",
"
tennis racket 128 7 0.754
0.429 0.544 0.34\n",
"
bottle 128 18 0.564 0.333 0.53 0.264
\n",
"
wine glass 128 16 0.715
0.875 0.907 0.528\n",
"
cup 128 36 0.825 0.639 0.803 0.535
\n",
"
fork 128 6 1 0.329 0.5 0.38
4\n",
"
knife 128 16 0.706 0.625 0.666 0.405
\n",
"
spoon 128 22 0.836 0.464 0.619 0.379
\n",
"
bowl 128 28 0.763 0.607 0.717 0.516
\n",
"
banana 128 1 0.886
1 0.995 0.399\n",
"
sandwich 128 2 1 0 0.62 0.54
6\n",
"
orange 128 4 1 0.75 0.995 0.62
2\n",
"
broccoli 128 11 0.548 0.443 0.467 0.35
\n",
"
carrot 128 24 0.7 0.585 0.699 0.458
\n",
"
hot dog 128 2 0.502
1 0.995 0.995\n",
"
pizza 128 5 0.813
1 0.962 0.747\n",
"
donut 128 14 0.662 1 0.96 0.838
\n",
"
cake 128 4 0.868
1 0.995 0.822\n",
"
chair 128 35 0.538 0.571 0.594 0.322
\n",
"
couch 128 6 0.924 0.667 0.828 0.538
\n",
"
potted plant 128 14 0.731 0.786 0.824
0.495\n",
"
bed 128 3 0.736 0.333 0.83 0.425
\n",
"
dining table 128 13 0.624 0.259 0.494 0.336
\n",
"
toilet 128 2 0.79
1 0.995 0.846\n",
"
tv 128 2 0.574
1 0.995 0.796\n",
"
laptop 128 3 1 0 0.695 0.367
\n",
"
mouse 128 2 1 0 0.173 0.0864
\n",
"
remote 128 8 1 0.62 0.634 0.557
\n",
"
cell phone 128 8 0.612 0.397 0.437 0.221
\n",
"
microwave 128 3 0.741
1 0.995 0.766\n",
"
oven 128 5 0.33 0.4 0.449 0.3
\n",
"
sink 128 6 0.444 0.333 0.331 0.231
\n",
"
refrigerator 128 5 0.561 0.8 0.798 0.546
\n",
"
book 128 29 0.635 0.276 0.355 0.164
\n",
"
clock 128 9 0.766 0.889 0.888 0.73
\n",
"
vase 128 2 0.303
1 0.995 0.895\n",
"
scissors 128 1 1 0 0.332 0.0397
\n",
"
teddy bear 128 21 0.842 0.508 0.739 0.49
9\n",
"
toothbrush 128 5 0.787
1 0.928 0.59\n",
"
Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.25
it/s]\n",
"
all 128 929 0.76 0.631 0.733 0.497
\n",
"
person 128 254 0.872 0.699 0.807 0.533
\n",
"
bicycle 128 6 0.761 0.536
0.725 0.444\n",
"
car 128 46 0.771 0.413 0.553 0.242
\n",
"
motorcycle 128 5 0.795 1 0.928 0.592
\n",
"
airplane 128 6 0.983
1 0.995 0.689\n",
"
bus 128 7 0.648
0.714 0.755 0.691\n",
"
train 128 3 1 0.586 0.995 0.603
\n",
"
truck 128 12 0.616 0.333 0.482 0.259
\n",
"
boat 128 6 0.921 0.333 0.524 0.211
\n",
"
traffic light 128 14 0.76 0.229 0.374 0.21
\n",
"
stop sign 128 2 0.824
1 0.995 0.821\n",
"
bench 128 9 0.822 0.519 0.674 0.316
\n",
"
bird 128 16 0.973 1 0.995 0.6
\n",
"
cat 128 4 0.861
1 0.995 0.772\n",
"
dog 128 9 1 0.666 0.88 0.645
\n",
"
horse 128 2 0.845
1 0.995 0.622\n",
"
elephant 128 17 0.923
0.882 0.93 0.716\n",
"
bear 128 1 0.71
1 0.995 0.995\n",
"
zebra 128 4 0.866 1 0.995 0.922\n",
"
giraffe 128 9 0.771 0.752
0.891 0.705\n",
"
backpack 128 6 0.888
0.5 0.753 0.294\n",
"
umbrella 128 18 0.876 0.784 0.899 0.539
\n",
"
handbag 128 19 0.8 0.21 0.335 0.181
\n",
"
tie 128 7 0.798
0.714 0.787 0.478\n",
"
suitcase 128 4 0.662
1 0.945 0.581\n",
"
frisbee 128 5 0.727 0.8 0.759
0.701\n",
"
skis 128 1 0 0 0.0585 0.0139
\n",
"
snowboard 128 7 0.807 0.714 0.853 0.591
\n",
"
sports ball 128 6 0.649 0.667 0.602 0.307\n",
"
kite 128 10 0.7 0.47 0.543 0.212
\n",
"
baseball bat 128 4 1 0.496 0.544 0.208
\n",
"
baseball glove 128 7 0.619 0.429 0.47 0.313
\n",
"
skateboard 128 5 0.847 0.6 0.712 0.496
\n",
"
tennis racket 128 7 0.757
0.429 0.544 0.34\n",
"
bottle 128 18 0.546 0.334 0.53 0.259
\n",
"
wine glass 128 16 0.716
0.875 0.907 0.528\n",
"
cup 128 36 0.826 0.639 0.802 0.538
\n",
"
fork 128 6 1 0.329 0.496 0.36
4\n",
"
knife 128 16 0.706 0.625 0.604 0.382
\n",
"
spoon 128 22 0.837 0.467 0.618 0.38
\n",
"
bowl 128 28 0.757 0.607 0.714 0.519
\n",
"
banana 128 1 0.889
1 0.995 0.399\n",
"
sandwich 128 2 1 0 0.638 0.5
6\n",
"
orange 128 4 1 0.663 0.945 0.59
2\n",
"
broccoli 128 11 0.545 0.437 0.471 0.351
\n",
"
carrot 128 24 0.701 0.585 0.697 0.454
\n",
"
hot dog 128 2 0.501
1 0.995 0.995\n",
"
pizza 128 5 0.809
1 0.962 0.747\n",
"
donut 128 14 0.66 1 0.96 0.837
\n",
"
cake 128 4 0.871
1 0.995 0.822\n",
"
chair 128 35 0.536 0.561 0.595 0.325
\n",
"
couch 128 6 0.931 0.667 0.828 0.539
\n",
"
potted plant 128 14 0.733 0.786 0.823
0.495\n",
"
bed 128 3 0.691 0.333 0.83 0.422
\n",
"
dining table 128 13 0.621 0.255 0.513 0.34
\n",
"
toilet 128 2 0.797
1 0.995 0.846\n",
"
tv 128 2 0.57
1 0.995 0.796\n",
"
laptop 128 3 1 0 0.694 0.316
\n",
"
mouse 128 2 1 0 0.172 0.0862
\n",
"
remote 128 8 1 0.62 0.634 0.551
\n",
"
cell phone 128 8 0.591 0.375 0.425 0.216
\n",
"
microwave 128 3 0.736
1 0.995 0.766\n",
"
oven 128 5 0.333 0.4 0.438 0.299
\n",
"
sink 128 6 0.427 0.333 0.329 0.23
\n",
"
refrigerator 128 5 0.559 0.8 0.798 0.565
\n",
"
book 128 29 0.558 0.241 0.307 0.155
\n",
"
clock 128 9 0.761 0.889 0.888 0.711
\n",
"
vase 128 2 0.287
1 0.995 0.895\n",
"
scissors 128 1 1 0 0.497 0.0574
\n",
"
teddy bear 128 21 0.838 0.493 0.745 0.50
9\n",
"
toothbrush 128 5 0.789
1 0.928 0.59\n",
"Results saved to \u001b[1mruns/train/exp\u001b[0m\n"
]
}
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论