提交 d631a690 authored 作者: Glenn Jocher's avatar Glenn Jocher

Created using Colaboratory

上级 20f1b7ea
......@@ -17,7 +17,7 @@
"accelerator": "GPU",
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"c79427d84662495db06b89a791d61f31": {
"c31d2039ccf74c22b67841f4877d1186": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"model_module_version": "1.5.0",
......@@ -32,14 +32,14 @@
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_469c8e5ae4d64adea773341ec22d5851",
"IPY_MODEL_2435573a321341878622d79e1f48f3db",
"IPY_MODEL_a4dcb697b08b4b70ab3ef3ffa54c28e4"
"IPY_MODEL_d4bba1727c714d94ad58a72bffa07c4c",
"IPY_MODEL_9aeff9f1780b45f892422fdc96e56913",
"IPY_MODEL_bf55a7c71d074d3fa88b10b997820825"
],
"layout": "IPY_MODEL_87495c10d22c4b82bd724a4d7c300df3"
"layout": "IPY_MODEL_d8b66044e2fb4f5b916696834d880c81"
}
},
"469c8e5ae4d64adea773341ec22d5851": {
"d4bba1727c714d94ad58a72bffa07c4c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
......@@ -54,13 +54,13 @@
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_098c321358c24cdbb50f6c0e6623bf6c",
"layout": "IPY_MODEL_102e1deda239436fa72751c58202fa0f",
"placeholder": "​",
"style": "IPY_MODEL_20184030ca9d4aef9dac0a149b89e4d3",
"style": "IPY_MODEL_4fd4431ced6c42368e18424912b877e4",
"value": "100%"
}
},
"2435573a321341878622d79e1f48f3db": {
"9aeff9f1780b45f892422fdc96e56913": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"model_module_version": "1.5.0",
......@@ -76,15 +76,15 @@
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_790808c9b4fb448aa136cc1ade0f95b5",
"layout": "IPY_MODEL_cdd709c4f40941bea1b2053523c9fac8",
"max": 818322941,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_99b822fd56b749318b38d8ccbc4ac469",
"style": "IPY_MODEL_a1ef2d8de2b741c78ca5d938e2ddbcdf",
"value": 818322941
}
},
"a4dcb697b08b4b70ab3ef3ffa54c28e4": {
"bf55a7c71d074d3fa88b10b997820825": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
......@@ -99,13 +99,13 @@
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_d542739146774953954e92db1666b951",
"layout": "IPY_MODEL_0dbce99bb6184238842cbec0587d564a",
"placeholder": "​",
"style": "IPY_MODEL_e11f3a2c51204778832631a5f150b21d",
"value": " 780M/780M [02:31<00:00, 4.89MB/s]"
"style": "IPY_MODEL_91ff5f93f2a24c5790ab29e347965946",
"value": " 780M/780M [01:10<00:00, 10.5MB/s]"
}
},
"87495c10d22c4b82bd724a4d7c300df3": {
"d8b66044e2fb4f5b916696834d880c81": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
......@@ -157,7 +157,7 @@
"width": null
}
},
"098c321358c24cdbb50f6c0e6623bf6c": {
"102e1deda239436fa72751c58202fa0f": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
......@@ -209,7 +209,7 @@
"width": null
}
},
"20184030ca9d4aef9dac0a149b89e4d3": {
"4fd4431ced6c42368e18424912b877e4": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
......@@ -224,7 +224,7 @@
"description_width": ""
}
},
"790808c9b4fb448aa136cc1ade0f95b5": {
"cdd709c4f40941bea1b2053523c9fac8": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
......@@ -276,7 +276,7 @@
"width": null
}
},
"99b822fd56b749318b38d8ccbc4ac469": {
"a1ef2d8de2b741c78ca5d938e2ddbcdf": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"model_module_version": "1.5.0",
......@@ -292,7 +292,7 @@
"description_width": ""
}
},
"d542739146774953954e92db1666b951": {
"0dbce99bb6184238842cbec0587d564a": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
......@@ -344,7 +344,7 @@
"width": null
}
},
"e11f3a2c51204778832631a5f150b21d": {
"91ff5f93f2a24c5790ab29e347965946": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
......@@ -404,7 +404,7 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "7728cbd8-6240-4814-e8fe-a223b9e57ed9"
"outputId": "185d0979-edcd-4860-e6fb-b8a27dbf5096"
},
"source": [
"!git clone https://github.com/ultralytics/yolov5 # clone\n",
......@@ -415,20 +415,20 @@
"import utils\n",
"display = utils.notebook_init() # checks"
],
"execution_count": null,
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"YOLOv5 🚀 v6.1-343-g685332e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n"
"YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n"
]
},
{
"output_type": "stream",
"name": "stdout",
"text": [
"Setup complete ✅ (8 CPUs, 51.0 GB RAM, 38.6/166.8 GB disk)\n"
"Setup complete ✅ (8 CPUs, 51.0 GB RAM, 37.4/166.8 GB disk)\n"
]
}
]
......@@ -461,29 +461,29 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "2d81665e-a0c4-489a-c92e-fe815223adfb"
"outputId": "4b13989f-32a4-4ef0-b403-06ff3aac255c"
},
"source": [
"!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n",
"#display.Image(filename='runs/detect/exp/zidane.jpg', width=600)"
],
"execution_count": null,
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n",
"YOLOv5 🚀 v6.1-343-g685332e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt to yolov5s.pt...\n",
"100% 14.1M/14.1M [00:02<00:00, 6.87MB/s]\n",
"100% 14.1M/14.1M [00:00<00:00, 53.9MB/s]\n",
"\n",
"Fusing layers... \n",
"YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n",
"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.014s)\n",
"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.019s)\n",
"Speed: 0.5ms pre-process, 16.3ms inference, 22.1ms NMS per image at shape (1, 3, 640, 640)\n",
"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.016s)\n",
"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.021s)\n",
"Speed: 0.6ms pre-process, 18.6ms inference, 25.0ms NMS per image at shape (1, 3, 640, 640)\n",
"Results saved to \u001b[1mruns/detect/exp\u001b[0m\n"
]
}
......@@ -527,27 +527,27 @@
"base_uri": "https://localhost:8080/",
"height": 49,
"referenced_widgets": [
"c79427d84662495db06b89a791d61f31",
"469c8e5ae4d64adea773341ec22d5851",
"2435573a321341878622d79e1f48f3db",
"a4dcb697b08b4b70ab3ef3ffa54c28e4",
"87495c10d22c4b82bd724a4d7c300df3",
"098c321358c24cdbb50f6c0e6623bf6c",
"20184030ca9d4aef9dac0a149b89e4d3",
"790808c9b4fb448aa136cc1ade0f95b5",
"99b822fd56b749318b38d8ccbc4ac469",
"d542739146774953954e92db1666b951",
"e11f3a2c51204778832631a5f150b21d"
"c31d2039ccf74c22b67841f4877d1186",
"d4bba1727c714d94ad58a72bffa07c4c",
"9aeff9f1780b45f892422fdc96e56913",
"bf55a7c71d074d3fa88b10b997820825",
"d8b66044e2fb4f5b916696834d880c81",
"102e1deda239436fa72751c58202fa0f",
"4fd4431ced6c42368e18424912b877e4",
"cdd709c4f40941bea1b2053523c9fac8",
"a1ef2d8de2b741c78ca5d938e2ddbcdf",
"0dbce99bb6184238842cbec0587d564a",
"91ff5f93f2a24c5790ab29e347965946"
]
},
"outputId": "d880071b-84ce-4567-9e42-a3c3a78bff73"
"outputId": "a9004b06-37a6-41ed-a1f2-ac956f3963b3"
},
"source": [
"# Download COCO val\n",
"torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n",
"!unzip -q tmp.zip -d ../datasets && rm tmp.zip"
],
"execution_count": null,
"execution_count": 3,
"outputs": [
{
"output_type": "display_data",
......@@ -558,7 +558,7 @@
"application/vnd.jupyter.widget-view+json": {
"version_major": 2,
"version_minor": 0,
"model_id": "c79427d84662495db06b89a791d61f31"
"model_id": "c31d2039ccf74c22b67841f4877d1186"
}
},
"metadata": {}
......@@ -572,48 +572,48 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "da9456fa-6663-44a8-975b-c99e89d0eb06"
"outputId": "c0f29758-4ec8-4def-893d-0efd6ed5b7f4"
},
"source": [
"# Run YOLOv5x on COCO val\n",
"!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half"
],
"execution_count": null,
"execution_count": 4,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n",
"YOLOv5 🚀 v6.1-343-g685332e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x.pt to yolov5x.pt...\n",
"100% 166M/166M [00:16<00:00, 10.3MB/s]\n",
"100% 166M/166M [00:35<00:00, 4.97MB/s]\n",
"\n",
"Fusing layers... \n",
"YOLOv5x summary: 444 layers, 86705005 parameters, 0 gradients\n",
"Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n",
"100% 755k/755k [00:00<00:00, 14.8MB/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 11214.34it/s]\n",
"100% 755k/755k [00:00<00:00, 49.4MB/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10716.86it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:05<00:00, 2.39it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:08<00:00, 2.28it/s]\n",
" all 5000 36335 0.743 0.625 0.683 0.504\n",
"Speed: 0.1ms pre-process, 4.7ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n",
"Speed: 0.1ms pre-process, 4.6ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n",
"\n",
"Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n",
"loading annotations into memory...\n",
"Done (t=0.38s)\n",
"Done (t=0.41s)\n",
"creating index...\n",
"index created!\n",
"Loading and preparing results...\n",
"DONE (t=5.39s)\n",
"DONE (t=5.64s)\n",
"creating index...\n",
"index created!\n",
"Running per image evaluation...\n",
"Evaluate annotation type *bbox*\n",
"DONE (t=71.33s).\n",
"DONE (t=72.86s).\n",
"Accumulating evaluation results...\n",
"DONE (t=12.45s).\n",
"DONE (t=14.20s).\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.506\n",
" Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n",
" Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.549\n",
......@@ -747,30 +747,31 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "9fe5caba-6b0f-4b6e-93a8-4075dae0ee35"
"outputId": "bce1b4bd-1a14-4c07-aebd-6c11e91ad24b"
},
"source": [
"# Train YOLOv5s on COCO128 for 3 epochs\n",
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
],
"execution_count": null,
"execution_count": 5,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n",
"\u001b[34m\u001b[1mgithub: \u001b[0mskipping check (Docker image), for updates see https://github.com/ultralytics/yolov5\n",
"YOLOv5 🚀 v6.1-343-g685332e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
"YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
"\n",
"\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
"\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\n",
"\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n",
"\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 runs in ClearML\n",
"\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
"\n",
"Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n",
"Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n",
"100% 6.66M/6.66M [00:00<00:00, 31.8MB/s]\n",
"Dataset download success ✅ (1.5s), saved to \u001b[1m/content/datasets\u001b[0m\n",
"100% 6.66M/6.66M [00:00<00:00, 75.2MB/s]\n",
"Dataset download success ✅ (0.7s), saved to \u001b[1m/content/datasets\u001b[0m\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n",
......@@ -802,14 +803,13 @@
"\n",
"Transferred 349/349 items from yolov5s.pt\n",
"\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
"Scaled weight_decay = 0.0005\n",
"\u001b[34m\u001b[1moptimizer:\u001b[0m SGD with parameter groups 57 weight (no decay), 60 weight, 60 bias\n",
"\u001b[34m\u001b[1malbumentations: \u001b[0mversion 1.0.3 required by YOLOv5, but version 0.1.12 is currently installed\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 13378.96it/s]\n",
"\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
"\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(always_apply=False, p=0.01, blur_limit=(3, 7)), MedianBlur(always_apply=False, p=0.01, blur_limit=(3, 7)), ToGray(always_apply=False, p=0.01), CLAHE(always_apply=False, p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7926.40it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 1053.85it/s]\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 975.81it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 296.75it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 258.62it/s]\n",
"Plotting labels to runs/train/exp/labels.jpg... \n",
"\n",
"\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
......@@ -819,19 +819,19 @@
"Starting training for 3 epochs...\n",
"\n",
" Epoch gpu_mem box obj cls labels img_size\n",
" 0/2 3.76G 0.04445 0.06016 0.01651 247 640: 100% 8/8 [00:04<00:00, 1.74it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00, 4.38it/s]\n",
" all 128 929 0.763 0.611 0.716 0.469\n",
" 0/2 3.76G 0.04529 0.06712 0.01835 323 640: 100% 8/8 [00:05<00:00, 1.59it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00, 4.05it/s]\n",
" all 128 929 0.806 0.593 0.718 0.472\n",
"\n",
" Epoch gpu_mem box obj cls labels img_size\n",
" 1/2 4.79G 0.0443 0.06624 0.01655 188 640: 100% 8/8 [00:00<00:00, 8.46it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00, 4.44it/s]\n",
" all 128 929 0.787 0.631 0.734 0.488\n",
" 1/2 4.79G 0.04244 0.06423 0.01611 236 640: 100% 8/8 [00:00<00:00, 8.11it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00, 4.20it/s]\n",
" all 128 929 0.811 0.615 0.74 0.493\n",
"\n",
" Epoch gpu_mem box obj cls labels img_size\n",
" 2/2 4.79G 0.04283 0.0579 0.01571 217 640: 100% 8/8 [00:00<00:00, 9.69it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00, 4.70it/s]\n",
" all 128 929 0.807 0.632 0.741 0.491\n",
" 2/2 4.79G 0.04695 0.06875 0.0173 189 640: 100% 8/8 [00:00<00:00, 9.12it/s]\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00, 4.24it/s]\n",
" all 128 929 0.784 0.634 0.747 0.502\n",
"\n",
"3 epochs completed in 0.003 hours.\n",
"Optimizer stripped from runs/train/exp/weights/last.pt, 14.9MB\n",
......@@ -840,79 +840,79 @@
"Validating runs/train/exp/weights/best.pt...\n",
"Fusing layers... \n",
"Model summary: 213 layers, 7225885 parameters, 0 gradients, 16.4 GFLOPs\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.36it/s]\n",
" all 128 929 0.808 0.631 0.741 0.491\n",
" person 128 254 0.886 0.689 0.812 0.53\n",
" bicycle 128 6 1 0.476 0.837 0.458\n",
" car 128 46 0.758 0.413 0.575 0.259\n",
" motorcycle 128 5 1 0.93 0.995 0.702\n",
" airplane 128 6 0.955 1 0.995 0.745\n",
" bus 128 7 0.755 0.714 0.832 0.691\n",
" train 128 3 1 0.553 0.789 0.493\n",
" truck 128 12 0.629 0.417 0.491 0.269\n",
" boat 128 6 1 0.332 0.507 0.201\n",
" traffic light 128 14 0.859 0.214 0.385 0.228\n",
" stop sign 128 2 0.809 1 0.995 0.796\n",
" bench 128 9 0.818 0.504 0.64 0.259\n",
" bird 128 16 0.921 1 0.995 0.64\n",
" cat 128 4 0.915 1 0.995 0.822\n",
" dog 128 9 0.869 0.556 0.902 0.6\n",
" horse 128 2 0.816 1 0.995 0.672\n",
" elephant 128 17 0.973 0.882 0.934 0.731\n",
" bear 128 1 0.699 1 0.995 0.895\n",
" zebra 128 4 0.877 1 0.995 0.947\n",
" giraffe 128 9 0.868 0.889 0.975 0.742\n",
" backpack 128 6 1 0.543 0.76 0.346\n",
" umbrella 128 18 0.864 0.611 0.898 0.522\n",
" handbag 128 19 0.701 0.127 0.335 0.174\n",
" tie 128 7 0.929 0.714 0.739 0.47\n",
" suitcase 128 4 0.658 0.75 0.795 0.536\n",
" frisbee 128 5 0.722 0.8 0.8 0.69\n",
" skis 128 1 0.776 1 0.995 0.3\n",
" snowboard 128 7 1 0.707 0.848 0.554\n",
" sports ball 128 6 0.662 0.667 0.602 0.316\n",
" kite 128 10 0.727 0.536 0.647 0.249\n",
" baseball bat 128 4 0.985 0.5 0.559 0.181\n",
" baseball glove 128 7 0.581 0.429 0.459 0.282\n",
" skateboard 128 5 0.739 0.6 0.705 0.501\n",
" tennis racket 128 7 0.759 0.429 0.566 0.31\n",
" bottle 128 18 0.593 0.405 0.574 0.294\n",
" wine glass 128 16 0.742 0.875 0.91 0.497\n",
" cup 128 36 0.833 0.694 0.817 0.514\n",
" fork 128 6 1 0.32 0.463 0.31\n",
" knife 128 16 0.757 0.585 0.73 0.385\n",
" spoon 128 22 0.812 0.393 0.632 0.374\n",
" bowl 128 28 0.869 0.571 0.718 0.496\n",
" banana 128 1 0.894 1 0.995 0.205\n",
" sandwich 128 2 1 0 0.308 0.263\n",
" orange 128 4 0.876 1 0.995 0.703\n",
" broccoli 128 11 0.821 0.364 0.442 0.323\n",
" carrot 128 24 0.709 0.625 0.72 0.464\n",
" hot dog 128 2 0.546 1 0.828 0.745\n",
" pizza 128 5 0.812 0.871 0.962 0.699\n",
" donut 128 14 0.686 1 0.96 0.825\n",
" cake 128 4 0.856 1 0.995 0.822\n",
" chair 128 35 0.591 0.577 0.616 0.311\n",
" couch 128 6 0.973 0.667 0.857 0.532\n",
" potted plant 128 14 0.839 0.786 0.827 0.45\n",
" bed 128 3 1 0 0.641 0.403\n",
" dining table 128 13 0.631 0.266 0.462 0.375\n",
" toilet 128 2 0.878 1 0.995 0.846\n",
" tv 128 2 0.707 1 0.995 0.821\n",
" laptop 128 3 1 0 0.863 0.498\n",
" mouse 128 2 1 0 0.0907 0.0544\n",
" remote 128 8 1 0.598 0.63 0.537\n",
" cell phone 128 8 0.661 0.5 0.465 0.249\n",
" microwave 128 3 0.823 1 0.995 0.767\n",
" oven 128 5 0.428 0.4 0.432 0.285\n",
" sink 128 6 0.354 0.167 0.268 0.178\n",
" refrigerator 128 5 0.649 0.8 0.806 0.551\n",
" book 128 29 0.618 0.207 0.333 0.161\n",
" clock 128 9 0.792 0.889 0.943 0.735\n",
" vase 128 2 0.502 1 0.995 0.895\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.20it/s]\n",
" all 128 929 0.781 0.637 0.747 0.502\n",
" person 128 254 0.872 0.693 0.81 0.534\n",
" bicycle 128 6 1 0.407 0.68 0.425\n",
" car 128 46 0.743 0.413 0.581 0.247\n",
" motorcycle 128 5 1 0.988 0.995 0.692\n",
" airplane 128 6 0.965 1 0.995 0.717\n",
" bus 128 7 0.706 0.714 0.814 0.697\n",
" train 128 3 1 0.582 0.806 0.477\n",
" truck 128 12 0.602 0.417 0.495 0.271\n",
" boat 128 6 0.961 0.333 0.464 0.224\n",
" traffic light 128 14 0.517 0.155 0.364 0.216\n",
" stop sign 128 2 0.782 1 0.995 0.821\n",
" bench 128 9 0.829 0.539 0.701 0.288\n",
" bird 128 16 0.924 1 0.995 0.655\n",
" cat 128 4 0.891 1 0.995 0.809\n",
" dog 128 9 1 0.659 0.883 0.604\n",
" horse 128 2 0.808 1 0.995 0.672\n",
" elephant 128 17 0.973 0.882 0.936 0.733\n",
" bear 128 1 0.692 1 0.995 0.995\n",
" zebra 128 4 0.872 1 0.995 0.922\n",
" giraffe 128 9 0.865 0.889 0.975 0.736\n",
" backpack 128 6 1 0.547 0.787 0.372\n",
" umbrella 128 18 0.823 0.667 0.889 0.504\n",
" handbag 128 19 0.516 0.105 0.304 0.153\n",
" tie 128 7 0.696 0.714 0.741 0.482\n",
" suitcase 128 4 0.716 1 0.995 0.553\n",
" frisbee 128 5 0.715 0.8 0.8 0.71\n",
" skis 128 1 0.694 1 0.995 0.398\n",
" snowboard 128 7 0.893 0.714 0.855 0.569\n",
" sports ball 128 6 0.659 0.667 0.602 0.307\n",
" kite 128 10 0.683 0.434 0.611 0.242\n",
" baseball bat 128 4 0.838 0.5 0.55 0.146\n",
" baseball glove 128 7 0.572 0.429 0.463 0.294\n",
" skateboard 128 5 0.697 0.6 0.702 0.476\n",
" tennis racket 128 7 0.62 0.429 0.544 0.29\n",
" bottle 128 18 0.591 0.402 0.572 0.295\n",
" wine glass 128 16 0.747 0.921 0.913 0.529\n",
" cup 128 36 0.824 0.639 0.826 0.535\n",
" fork 128 6 1 0.319 0.518 0.353\n",
" knife 128 16 0.768 0.62 0.654 0.374\n",
" spoon 128 22 0.824 0.427 0.65 0.382\n",
" bowl 128 28 0.8 0.643 0.726 0.525\n",
" banana 128 1 0.878 1 0.995 0.208\n",
" sandwich 128 2 1 0 0.62 0.546\n",
" orange 128 4 1 0.896 0.995 0.691\n",
" broccoli 128 11 0.586 0.364 0.481 0.349\n",
" carrot 128 24 0.702 0.589 0.722 0.475\n",
" hot dog 128 2 0.524 1 0.828 0.795\n",
" pizza 128 5 0.811 0.865 0.962 0.695\n",
" donut 128 14 0.653 1 0.964 0.853\n",
" cake 128 4 0.852 1 0.995 0.822\n",
" chair 128 35 0.536 0.571 0.593 0.31\n",
" couch 128 6 1 0.63 0.75 0.518\n",
" potted plant 128 14 0.775 0.738 0.839 0.478\n",
" bed 128 3 1 0 0.72 0.423\n",
" dining table 128 13 0.817 0.348 0.592 0.381\n",
" toilet 128 2 0.782 1 0.995 0.895\n",
" tv 128 2 0.711 1 0.995 0.821\n",
" laptop 128 3 1 0 0.789 0.42\n",
" mouse 128 2 1 0 0.0798 0.0399\n",
" remote 128 8 1 0.611 0.63 0.549\n",
" cell phone 128 8 0.685 0.375 0.428 0.245\n",
" microwave 128 3 0.803 1 0.995 0.767\n",
" oven 128 5 0.42 0.4 0.444 0.306\n",
" sink 128 6 0.288 0.167 0.34 0.247\n",
" refrigerator 128 5 0.632 0.8 0.805 0.572\n",
" book 128 29 0.494 0.207 0.332 0.161\n",
" clock 128 9 0.791 0.889 0.93 0.75\n",
" vase 128 2 0.355 1 0.995 0.895\n",
" scissors 128 1 1 0 0.332 0.0663\n",
" teddy bear 128 21 0.84 0.619 0.769 0.521\n",
" toothbrush 128 5 0.763 0.654 0.898 0.603\n",
" teddy bear 128 21 0.839 0.571 0.767 0.487\n",
" toothbrush 128 5 0.829 0.974 0.962 0.644\n",
"Results saved to \u001b[1mruns/train/exp\u001b[0m\n"
]
}
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论