Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
Y
yolov5
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
Administrator
yolov5
Commits
78cf4885
提交
78cf4885
authored
6月 08, 2021
作者:
Glenn Jocher
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Created using Colaboratory
上级
c6b51f41
隐藏空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
165 行增加
和
95 行删除
+165
-95
tutorial.ipynb
tutorial.ipynb
+165
-95
没有找到文件。
tutorial.ipynb
浏览文件 @
78cf4885
...
...
@@ -16,7 +16,7 @@
"accelerator": "GPU",
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"
8815626359d84416a2f44a95500580a4
": {
"
cef5e9351ca743bcba5febac0b096a30
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
...
...
@@ -28,15 +28,15 @@
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_
3b85609c4ce94a74823f2cfe141ce68e
",
"layout": "IPY_MODEL_
ec326c52378f4410920c328f221e0514
",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_8
76609753c2946248890344722963d44
",
"IPY_MODEL_
8abfdd8778e44b7ca0d29881cb1ada05
"
"IPY_MODEL_8
3000c64a11c4ae8abd6f0ef2f108cef
",
"IPY_MODEL_
0f7899eb719f4a9c9852426551f97be9
"
]
}
},
"
3b85609c4ce94a74823f2cfe141ce68e
": {
"
ec326c52378f4410920c328f221e0514
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
...
...
@@ -87,12 +87,12 @@
"left": null
}
},
"8
76609753c2946248890344722963d44
": {
"8
3000c64a11c4ae8abd6f0ef2f108cef
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_
78c6c3d97c484916b8ee167c63556800
",
"style": "IPY_MODEL_
886ac5b18b3c4c82bf15ad5055f1e17e
",
"_dom_classes": [],
"description": "100%",
"_model_name": "FloatProgressModel",
...
...
@@ -107,30 +107,30 @@
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_
9dd0f182db5d45378ceafb855e486eb8
"
"layout": "IPY_MODEL_
4e67b3c3a49849c7a7ba28b7eec96e7a
"
}
},
"
8abfdd8778e44b7ca0d29881cb1ada05
": {
"
0f7899eb719f4a9c9852426551f97be9
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_
a3dab28b45c247089a3d1b8b09f327de
",
"style": "IPY_MODEL_
62c3682ff1804571a483d46664533969
",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 781M/781M [0
8:43<00:00, 1.56
MB/s]",
"value": " 781M/781M [0
0:12<00:00, 67.1
MB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_
32451332b7a94ba9aacddeaa6ac94d50
"
"layout": "IPY_MODEL_
599dda3b608b432393760b2ca4ae7c7d
"
}
},
"
78c6c3d97c484916b8ee167c63556800
": {
"
886ac5b18b3c4c82bf15ad5055f1e17e
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
...
...
@@ -145,7 +145,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
"
9dd0f182db5d45378ceafb855e486eb8
": {
"
4e67b3c3a49849c7a7ba28b7eec96e7a
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
...
...
@@ -196,7 +196,7 @@
"left": null
}
},
"
a3dab28b45c247089a3d1b8b09f327de
": {
"
62c3682ff1804571a483d46664533969
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
...
...
@@ -210,7 +210,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
"
32451332b7a94ba9aacddeaa6ac94d50
": {
"
599dda3b608b432393760b2ca4ae7c7d
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
...
...
@@ -261,7 +261,7 @@
"left": null
}
},
"
0fffa335322b41658508e06aed0acbf0
": {
"
217ca488c82a4b7a80318b70887a556e
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
...
...
@@ -273,15 +273,15 @@
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_
a354c6f80ce347e5a3ef64af87c0eccb
",
"layout": "IPY_MODEL_
4e63af16f1084ca98a6fa5a282f2a81e
",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_
85823e71fea54c39bd11e2e97234883
6",
"IPY_MODEL_
fb11acd663fa4e71b041d67310d045fd
"
"IPY_MODEL_
49f4b3c7f6ff42b4b9132a8550e1218
6",
"IPY_MODEL_
8ec9e1a4883245daaf029458ee09721f
"
]
}
},
"
a354c6f80ce347e5a3ef64af87c0eccb
": {
"
4e63af16f1084ca98a6fa5a282f2a81e
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
...
...
@@ -332,12 +332,12 @@
"left": null
}
},
"
85823e71fea54c39bd11e2e97234883
6": {
"
49f4b3c7f6ff42b4b9132a8550e1218
6": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_
8a919053b780449aae5523658ad611fa
",
"style": "IPY_MODEL_
9d3e775ee11e4cf4b587b64fbc3cc6f7
",
"_dom_classes": [],
"description": "100%",
"_model_name": "FloatProgressModel",
...
...
@@ -352,30 +352,30 @@
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_
5bae9393a58b44f7b69fb04816f94f6f
"
"layout": "IPY_MODEL_
70f68a9a51ac46e6ab7e51fb4fc6bda3
"
}
},
"
fb11acd663fa4e71b041d67310d045fd
": {
"
8ec9e1a4883245daaf029458ee09721f
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_
d26c6d16c7f24030ab2da5285bf198ee
",
"style": "IPY_MODEL_
fdb8ab377c114bc3b862ba76eb93cef7
",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 21.1M/21.1M [00:
02<00:00, 9.36M
B/s]",
"value": " 21.1M/21.1M [00:
36<00:00, 605k
B/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_
f7767886b2364c8d9efdc79e175ad8eb
"
"layout": "IPY_MODEL_
cd267c153c244621a1f50706d2ddc897
"
}
},
"
8a919053b780449aae5523658ad611fa
": {
"
9d3e775ee11e4cf4b587b64fbc3cc6f7
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
...
...
@@ -390,7 +390,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
"
5bae9393a58b44f7b69fb04816f94f6f
": {
"
70f68a9a51ac46e6ab7e51fb4fc6bda3
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
...
...
@@ -441,7 +441,7 @@
"left": null
}
},
"
d26c6d16c7f24030ab2da5285bf198ee
": {
"
fdb8ab377c114bc3b862ba76eb93cef7
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
...
...
@@ -455,7 +455,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
"
f7767886b2364c8d9efdc79e175ad8eb
": {
"
cd267c153c244621a1f50706d2ddc897
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
...
...
@@ -517,8 +517,7 @@
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"<a href=\"https://kaggle.com/kernels/welcome?src=https://github.com/ultralytics/yolov5/blob/master/tutorial.ipynb\" target=\"_parent\"><img alt=\"Kaggle\" title=\"Open in Kaggle\" src=\"https://kaggle.com/static/images/open-in-kaggle.svg\"></a>"
"<a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
...
...
@@ -551,7 +550,7 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "
9b022435-4197-41fc-abea-81f86ce857d0
"
"outputId": "
0cabe440-e06c-48b9-9180-4b4ea1790ff5
"
},
"source": [
"!git clone https://github.com/ultralytics/yolov5 # clone repo\n",
...
...
@@ -564,7 +563,7 @@
"clear_output()\n",
"print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")"
],
"execution_count":
null
,
"execution_count":
1
,
"outputs": [
{
"output_type": "stream",
...
...
@@ -663,32 +662,32 @@
"id": "WQPtK1QYVaD_",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 6
5
,
"height": 6
6
,
"referenced_widgets": [
"
8815626359d84416a2f44a95500580a4
",
"
3b85609c4ce94a74823f2cfe141ce68e
",
"8
76609753c2946248890344722963d44
",
"
8abfdd8778e44b7ca0d29881cb1ada05
",
"
78c6c3d97c484916b8ee167c63556800
",
"
9dd0f182db5d45378ceafb855e486eb8
",
"
a3dab28b45c247089a3d1b8b09f327de
",
"
32451332b7a94ba9aacddeaa6ac94d50
"
"
cef5e9351ca743bcba5febac0b096a30
",
"
ec326c52378f4410920c328f221e0514
",
"8
3000c64a11c4ae8abd6f0ef2f108cef
",
"
0f7899eb719f4a9c9852426551f97be9
",
"
886ac5b18b3c4c82bf15ad5055f1e17e
",
"
4e67b3c3a49849c7a7ba28b7eec96e7a
",
"
62c3682ff1804571a483d46664533969
",
"
599dda3b608b432393760b2ca4ae7c7d
"
]
},
"outputId": "
81521192-cf67-4a47-a4cc-434cb0ebc363
"
"outputId": "
56b6402a-81d5-41d0-a3c8-8889db1fca6c
"
},
"source": [
"# Download COCO val2017\n",
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
"!unzip -q tmp.zip -d ../ && rm tmp.zip"
],
"execution_count":
null
,
"execution_count":
2
,
"outputs": [
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "
8815626359d84416a2f44a95500580a4
",
"model_id": "
cef5e9351ca743bcba5febac0b096a30
",
"version_minor": 0,
"version_major": 2
},
...
...
@@ -716,45 +715,45 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "
2340b131-9943-4cd6-fd3a-8272aeb0774f
"
"outputId": "
a5d41761-f1a0-41fe-d0bb-4cceebd7c4a6
"
},
"source": [
"# Run YOLOv5x on COCO val2017\n",
"!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65"
"!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65
--half
"
],
"execution_count":
null
,
"execution_count":
3
,
"outputs": [
{
"output_type": "stream",
"text": [
"Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n",
"YOLOv5 🚀 v5.0-1
-g0f395b3
torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False,
half=True,
img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n",
"YOLOv5 🚀 v5.0-1
57-gc6b51f4
torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n",
"100% 168M/168M [00:0
5<00:00, 32.3
MB/s]\n",
"100% 168M/168M [00:0
1<00:00, 156
MB/s]\n",
"\n",
"Fusing layers... \n",
"Model Summary: 476 layers, 87730285 parameters, 0 gradients
, 218.8 GFLOPs
\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels...
4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29
it/s]\n",
"Model Summary: 476 layers, 87730285 parameters, 0 gradients\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels...
4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3008.87
it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n",
" Class
Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87
it/s]\n",
" all
5000 36335 0.745 0.627 0.68
0.49\n",
"Speed: 5.3/1.
6/6.9
ms inference/NMS/total per 640x640 image at batch-size 32\n",
" Class
Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:17<00:00, 2.02
it/s]\n",
" all
5000 36335 0.746 0.626 0.68
0.49\n",
"Speed: 5.3/1.
5/6.8
ms inference/NMS/total per 640x640 image at batch-size 32\n",
"\n",
"Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n",
"loading annotations into memory...\n",
"Done (t=0.4
8
s)\n",
"Done (t=0.4
4
s)\n",
"creating index...\n",
"index created!\n",
"Loading and preparing results...\n",
"DONE (t=
5.0
8s)\n",
"DONE (t=
4.8
8s)\n",
"creating index...\n",
"index created!\n",
"Running per image evaluation...\n",
"Evaluate annotation type *bbox*\n",
"DONE (t=
90.51
s).\n",
"DONE (t=
83.47
s).\n",
"Accumulating evaluation results...\n",
"DONE (t=1
5.1
6s).\n",
"DONE (t=1
2.9
6s).\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n",
" Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n",
" Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n",
...
...
@@ -827,32 +826,32 @@
"id": "Knxi2ncxWffW",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 6
5
,
"height": 6
6
,
"referenced_widgets": [
"
0fffa335322b41658508e06aed0acbf0
",
"
a354c6f80ce347e5a3ef64af87c0eccb
",
"
85823e71fea54c39bd11e2e97234883
6",
"
fb11acd663fa4e71b041d67310d045fd
",
"
8a919053b780449aae5523658ad611fa
",
"
5bae9393a58b44f7b69fb04816f94f6f
",
"
d26c6d16c7f24030ab2da5285bf198ee
",
"
f7767886b2364c8d9efdc79e175ad8eb
"
"
217ca488c82a4b7a80318b70887a556e
",
"
4e63af16f1084ca98a6fa5a282f2a81e
",
"
49f4b3c7f6ff42b4b9132a8550e1218
6",
"
8ec9e1a4883245daaf029458ee09721f
",
"
9d3e775ee11e4cf4b587b64fbc3cc6f7
",
"
70f68a9a51ac46e6ab7e51fb4fc6bda3
",
"
fdb8ab377c114bc3b862ba76eb93cef7
",
"
cd267c153c244621a1f50706d2ddc897
"
]
},
"outputId": "
b41ac253-9e1b-4c26-d78b-700ea0154f43
"
"outputId": "
9e4788c2-e1d4-4a13-c3d2-984f5df7ffab
"
},
"source": [
"# Download COCO128\n",
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n",
"!unzip -q tmp.zip -d ../ && rm tmp.zip"
],
"execution_count":
null
,
"execution_count":
2
,
"outputs": [
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "
0fffa335322b41658508e06aed0acbf0
",
"model_id": "
217ca488c82a4b7a80318b70887a556e
",
"version_minor": 0,
"version_major": 2
},
...
...
@@ -918,23 +917,93 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "
e715d09c-5d93-4912-a0df-9da0893f2014
"
"outputId": "
70004839-0c90-4bc0-c0e5-9a92f3e65b01
"
},
"source": [
"# Train YOLOv5s on COCO128 for 3 epochs\n",
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --
nosave --
cache"
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
],
"execution_count":
null
,
"execution_count":
4
,
"outputs": [
{
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
"YOLOv5 🚀 v5.0-2-g54d6516 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
"Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=1, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n",
"\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
"2021-06-08 16:52:25.719745: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n",
"\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n",
"\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt to yolov5s.pt...\n",
"100% 14.1M/14.1M [00:00<00:00, 18.7MB/s]\n",
"\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 3520 models.common.Focus [3, 32, 3] \n",
" 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
" 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
" 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
" 4 -1 1 156928 models.common.C3 [128, 128, 3] \n",
" 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
" 6 -1 1 625152 models.common.C3 [256, 256, 3] \n",
" 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
" 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n",
" 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
" 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
" 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 12 [-1, 6] 1 0 models.common.Concat [1] \n",
" 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n",
" 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
" 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 16 [-1, 4] 1 0 models.common.Concat [1] \n",
" 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n",
" 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
" 19 [-1, 14] 1 0 models.common.Concat [1] \n",
" 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n",
" 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
" 22 [-1, 10] 1 0 models.common.Concat [1] \n",
" 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
" 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
"Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n",
"\n",
"Transferred 362/362 items from yolov5s.pt\n",
"\n",
"WARNING: Dataset not found, nonexistent paths: ['/content/coco128/images/train2017']\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip ...\n",
"100% 21.1M/21.1M [00:00<00:00, 68.2MB/s]\n",
"Dataset autodownload success\n",
"\n",
"Scaled weight_decay = 0.0005\n",
"Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2036.51it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 189.76it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 687414.74it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 93.37it/s]\n",
"Plotting labels... \n",
"\n",
"\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
"Image sizes 640 train, 640 test\n",
"Using 2 dataloader workers\n",
"Logging results to runs/train/exp\n",
"Starting training for 1 epochs...\n",
"\n",
" Epoch gpu_mem box obj cls total labels img_size\n",
" 0/0 10.8G 0.04226 0.06068 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.35it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:06<00:00, 1.53s/it]\n",
" all 128 929 0.633 0.641 0.668 0.439\n",
"1 epochs completed in 0.005 hours.\n",
"\n",
"Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
"Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n",
"\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
"YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
"Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=
Tru
e, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n",
"Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=
Fals
e, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n",
"\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
"2021-0
4-12 10:29:58.539457: I tensorflow/stream_executor/platform/default/dso_loader.cc:49
] Successfully opened dynamic library libcudart.so.11.0\n",
"2021-0
6-08 16:53:03.275914: I tensorflow/stream_executor/platform/default/dso_loader.cc:53
] Successfully opened dynamic library libcudart.so.11.0\n",
"\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n",
"\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n",
"\n",
...
...
@@ -969,10 +1038,10 @@
"Transferred 362/362 items from yolov5s.pt\n",
"Scaled weight_decay = 0.0005\n",
"Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00,
796544.38
it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00,
176.73
it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00,
500812.4
2it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:0
0<00:00, 134.10
it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00,
824686.50
it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00,
201.90
it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00,
23766.9
2it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:0
1<00:00, 98.35
it/s]\n",
"Plotting labels... \n",
"\n",
"\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
...
...
@@ -982,19 +1051,19 @@
"Starting training for 3 epochs...\n",
"\n",
" Epoch gpu_mem box obj cls total labels img_size\n",
" 0/2
3.29G 0.04368 0.065 0.02127 0.1299 183 640: 100% 8/8 [00:03<00:00, 2.2
1it/s]\n",
" Class
Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.09
s/it]\n",
" all
128 929 0.605 0.657 0.666 0.434
\n",
" 0/2
10.8G 0.04226 0.06067 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.4
1it/s]\n",
" Class
Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.21
s/it]\n",
" all
128 929 0.633 0.641 0.668 0.439
\n",
"\n",
" Epoch gpu_mem box obj cls total labels img_size\n",
" 1/2
6.65G 0.04556 0.0651 0.01987 0.1305 166 640: 100% 8/8 [00:01<00:00, 5.18
it/s]\n",
" Class
Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.72
it/s]\n",
" all
128 929 0.61 0.66 0.669
0.438\n",
" 1/2
8.29G 0.04571 0.06616 0.01952 0.1314 164 640: 100% 8/8 [00:01<00:00, 5.65
it/s]\n",
" Class
Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.21
it/s]\n",
" all
128 929 0.613 0.659 0.669
0.438\n",
"\n",
" Epoch gpu_mem box obj cls total labels img_size\n",
" 2/2
6.65G 0.04624 0.06923 0.0196 0.1351 182 640: 100% 8/8 [00:01<00:00, 5.1
9it/s]\n",
" Class
Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.27
it/s]\n",
" all
128 929 0.618 0.659 0.671 0.438
\n",
" 2/2
8.29G 0.04542 0.0718 0.01861 0.1358 191 640: 100% 8/8 [00:01<00:00, 4.8
9it/s]\n",
" Class
Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.48
it/s]\n",
" all
128 929 0.636 0.652 0.67 0.44
\n",
"3 epochs completed in 0.007 hours.\n",
"\n",
"Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
...
...
@@ -1261,4 +1330,4 @@
"outputs": []
}
]
}
}
\ No newline at end of file
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论