Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
Y
yolov5
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
图表
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
Administrator
yolov5
Commits
6b718e91
提交
6b718e91
authored
4月 12, 2021
作者:
Glenn Jocher
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Created using Colaboratory
上级
54d65160
显示空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
79 行增加
和
83 行删除
+79
-83
tutorial.ipynb
tutorial.ipynb
+79
-83
没有找到文件。
tutorial.ipynb
浏览文件 @
6b718e91
...
...
@@ -16,7 +16,7 @@
"accelerator": "GPU",
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"
b54ab52f1d4f4903897ab6cd49a3b9b2
": {
"
8815626359d84416a2f44a95500580a4
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
...
...
@@ -28,15 +28,15 @@
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_
1852f93fc2714d40adccb8aa161c42ff
",
"layout": "IPY_MODEL_
3b85609c4ce94a74823f2cfe141ce68e
",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_
3293cfe869bd4a1bbbe18b49b6815de1
",
"IPY_MODEL_8
d5ee8b8ab6d46b98818bd2c562ddd1c
"
"IPY_MODEL_
876609753c2946248890344722963d44
",
"IPY_MODEL_8
abfdd8778e44b7ca0d29881cb1ada05
"
]
}
},
"
1852f93fc2714d40adccb8aa161c42ff
": {
"
3b85609c4ce94a74823f2cfe141ce68e
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
...
...
@@ -87,12 +87,12 @@
"left": null
}
},
"
3293cfe869bd4a1bbbe18b49b6815de1
": {
"
876609753c2946248890344722963d44
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_
49fcb2adb0354430b76f491af98abfe9
",
"style": "IPY_MODEL_
78c6c3d97c484916b8ee167c63556800
",
"_dom_classes": [],
"description": "100%",
"_model_name": "FloatProgressModel",
...
...
@@ -107,30 +107,30 @@
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_
c7d76e0c53064363add56b8d05e561f5
"
"layout": "IPY_MODEL_
9dd0f182db5d45378ceafb855e486eb8
"
}
},
"8
d5ee8b8ab6d46b98818bd2c562ddd1c
": {
"8
abfdd8778e44b7ca0d29881cb1ada05
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_
48f321f789634aa584f8a29a3b925dd5
",
"style": "IPY_MODEL_
a3dab28b45c247089a3d1b8b09f327de
",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 781M/781M [0
0:13<00:00, 62.
6MB/s]",
"value": " 781M/781M [0
8:43<00:00, 1.5
6MB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_
6610d6275f3e49d9937d50ed0a105947
"
"layout": "IPY_MODEL_
32451332b7a94ba9aacddeaa6ac94d50
"
}
},
"
49fcb2adb0354430b76f491af98abfe9
": {
"
78c6c3d97c484916b8ee167c63556800
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
...
...
@@ -145,7 +145,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
"
c7d76e0c53064363add56b8d05e561f5
": {
"
9dd0f182db5d45378ceafb855e486eb8
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
...
...
@@ -196,7 +196,7 @@
"left": null
}
},
"
48f321f789634aa584f8a29a3b925dd5
": {
"
a3dab28b45c247089a3d1b8b09f327de
": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
...
...
@@ -210,7 +210,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
"
6610d6275f3e49d9937d50ed0a105947
": {
"
32451332b7a94ba9aacddeaa6ac94d50
": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
...
...
@@ -550,7 +550,7 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "
20027455-bf84-41fd-c902-b7282d53c91d
"
"outputId": "
4576b05f-d6d1-404a-fc99-5663c71e3dc4
"
},
"source": [
"!git clone https://github.com/ultralytics/yolov5 # clone repo\n",
...
...
@@ -563,12 +563,12 @@
"clear_output()\n",
"print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))"
],
"execution_count":
null
,
"execution_count":
1
,
"outputs": [
{
"output_type": "stream",
"text": [
"Setup complete. Using torch 1.8.
0
+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n"
"Setup complete. Using torch 1.8.
1
+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n"
],
"name": "stdout"
}
...
...
@@ -607,7 +607,7 @@
"output_type": "stream",
"text": [
"Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n",
"YOLOv5 🚀 v
5.0-1-g0f395b3 torch 1.8.1
+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"YOLOv5 🚀 v
4.0-137-g9b11f0c torch 1.8.0
+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
"Fusing layers... \n",
"Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n",
...
...
@@ -664,30 +664,30 @@
"base_uri": "https://localhost:8080/",
"height": 65,
"referenced_widgets": [
"
b54ab52f1d4f4903897ab6cd49a3b9b2
",
"
1852f93fc2714d40adccb8aa161c42ff
",
"
3293cfe869bd4a1bbbe18b49b6815de1
",
"8
d5ee8b8ab6d46b98818bd2c562ddd1c
",
"
49fcb2adb0354430b76f491af98abfe9
",
"
c7d76e0c53064363add56b8d05e561f5
",
"
48f321f789634aa584f8a29a3b925dd5
",
"
6610d6275f3e49d9937d50ed0a105947
"
"
8815626359d84416a2f44a95500580a4
",
"
3b85609c4ce94a74823f2cfe141ce68e
",
"
876609753c2946248890344722963d44
",
"8
abfdd8778e44b7ca0d29881cb1ada05
",
"
78c6c3d97c484916b8ee167c63556800
",
"
9dd0f182db5d45378ceafb855e486eb8
",
"
a3dab28b45c247089a3d1b8b09f327de
",
"
32451332b7a94ba9aacddeaa6ac94d50
"
]
},
"outputId": "
f0884441-78d9-443c-afa6-d00ec387908d
"
"outputId": "
81521192-cf67-4a47-a4cc-434cb0ebc363
"
},
"source": [
"# Download COCO val2017\n",
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
"!unzip -q tmp.zip -d ../ && rm tmp.zip"
],
"execution_count":
null
,
"execution_count":
2
,
"outputs": [
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "
b54ab52f1d4f4903897ab6cd49a3b9b2
",
"model_id": "
8815626359d84416a2f44a95500580a4
",
"version_minor": 0,
"version_major": 2
},
...
...
@@ -715,57 +715,57 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "
5b54c11e-9f4b-4d9a-8e6e-6a2a4f0cc60d
"
"outputId": "
2340b131-9943-4cd6-fd3a-8272aeb0774f
"
},
"source": [
"# Run YOLOv5x on COCO val2017\n",
"!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65"
],
"execution_count":
null
,
"execution_count":
6
,
"outputs": [
{
"output_type": "stream",
"text": [
"Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n",
"YOLOv5 🚀 v
4.0-137-g9b11f0c torch 1.8.0
+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"YOLOv5 🚀 v
5.0-1-g0f395b3 torch 1.8.1
+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v
4
.0/yolov5x.pt to yolov5x.pt...\n",
"100% 168M/168M [00:0
2<00:00, 59.1
MB/s]\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v
5
.0/yolov5x.pt to yolov5x.pt...\n",
"100% 168M/168M [00:0
5<00:00, 32.3
MB/s]\n",
"\n",
"Fusing layers... \n",
"Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017'
for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3236.68
it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017'
images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29
it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:2
0<00:00, 1.95
it/s]\n",
" all 5000 36335 0.74
9 0.619 0.68 0.486
\n",
"Speed: 5.3/1.
7
/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:2
3<00:00, 1.87
it/s]\n",
" all 5000 36335 0.74
5 0.627 0.68 0.49
\n",
"Speed: 5.3/1.
6
/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n",
"\n",
"Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n",
"loading annotations into memory...\n",
"Done (t=0.4
3
s)\n",
"Done (t=0.4
8
s)\n",
"creating index...\n",
"index created!\n",
"Loading and preparing results...\n",
"DONE (t=5.
10
s)\n",
"DONE (t=5.
08
s)\n",
"creating index...\n",
"index created!\n",
"Running per image evaluation...\n",
"Evaluate annotation type *bbox*\n",
"DONE (t=
88.52
s).\n",
"DONE (t=
90.51
s).\n",
"Accumulating evaluation results...\n",
"DONE (t=1
7.17
s).\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.50
1
\n",
" Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.68
7
\n",
" Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.54
4
\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.3
38
\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.5
48
\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.6
37
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.3
78
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.62
8
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.68
0
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.52
0
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.7
29
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.82
6
\n",
"DONE (t=1
5.16
s).\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.50
4
\n",
" Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.68
8
\n",
" Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.54
6
\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.3
51
\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.5
51
\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.6
44
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.3
82
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.62
9
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.68
1
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.52
4
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.7
35
\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.82
7
\n",
"Results saved to runs/test/exp\n"
],
"name": "stdout"
...
...
@@ -916,28 +916,25 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "
cf494627-09b9-4399-ff0c-fdb62b32340a
"
"outputId": "
e715d09c-5d93-4912-a0df-9da0893f2014
"
},
"source": [
"# Train YOLOv5s on COCO128 for 3 epochs\n",
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache"
],
"execution_count":
null
,
"execution_count":
12
,
"outputs": [
{
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
"YOLOv5 🚀 v
4.0-137-g9b11f0c torch 1.8.0
+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"YOLOv5 🚀 v
5.0-2-g54d6516 torch 1.8.1
+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
"Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n",
"\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n",
"Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n",
"2021-03-14 04:18:58.124672: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n",
"\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
"2021-04-12 10:29:58.539457: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
"\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n",
"100% 14.1M/14.1M [00:00<00:00, 63.1MB/s]\n",
"\n",
"\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 3520 models.common.Focus [3, 32, 3] \n",
...
...
@@ -970,11 +967,10 @@
"Transferred 362/362 items from yolov5s.pt\n",
"Scaled weight_decay = 0.0005\n",
"Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2956.76it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 205.30it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 604584.36it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 144.17it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 796544.38it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.73it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 500812.42it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 134.10it/s]\n",
"Plotting labels... \n",
"\n",
"\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
...
...
@@ -984,23 +980,23 @@
"Starting training for 3 epochs...\n",
"\n",
" Epoch gpu_mem box obj cls total labels img_size\n",
" 0/2 3.29G 0.04
237 0.06417 0.02121 0.1277 183 640: 100% 8/8 [00:03<00:00, 2.4
1it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.0
4
s/it]\n",
" all 128 929 0.6
42 0.637 0.661 0.432
\n",
" 0/2 3.29G 0.04
368 0.065 0.02127 0.1299 183 640: 100% 8/8 [00:03<00:00, 2.2
1it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.0
9
s/it]\n",
" all 128 929 0.6
05 0.657 0.666 0.434
\n",
"\n",
" Epoch gpu_mem box obj cls total labels img_size\n",
" 1/2 6.65G 0.04
431 0.06403 0.019 0.1273 166 640: 100% 8/8 [00:01<00:00, 5.73
it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00,
3.21
it/s]\n",
" all 128 929
0.662 0.626 0.658 0.433
\n",
" 1/2 6.65G 0.04
556 0.0651 0.01987 0.1305 166 640: 100% 8/8 [00:01<00:00, 5.18
it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00,
2.72
it/s]\n",
" all 128 929
0.61 0.66 0.669 0.438
\n",
"\n",
" Epoch gpu_mem box obj cls total labels img_size\n",
" 2/2 6.65G 0.04506 0.06836 0.01913 0.1325 182 640: 100% 8/8 [00:01<00:00, 5.51it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.35it/s]\n",
" all 128 929 0.658 0.625 0.661 0.433\n",
"Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
"Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n",
" 2/2 6.65G 0.04624 0.06923 0.0196 0.1351 182 640: 100% 8/8 [00:01<00:00, 5.19it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.27it/s]\n",
" all 128 929 0.618 0.659 0.671 0.438\n",
"3 epochs completed in 0.007 hours.\n",
"\n"
"\n",
"Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
"Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n"
],
"name": "stdout"
}
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论