提交 ebe563e7 authored 作者: Glenn Jocher's avatar Glenn Jocher

Created using Colaboratory

上级 9ac9ec34
...@@ -564,7 +564,7 @@ ...@@ -564,7 +564,7 @@
"clear_output()\n", "clear_output()\n",
"print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))"
], ],
"execution_count": 1, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -600,7 +600,7 @@ ...@@ -600,7 +600,7 @@
"!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source inference/images/\n", "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source inference/images/\n",
"Image(filename='inference/output/zidane.jpg', width=600)" "Image(filename='inference/output/zidane.jpg', width=600)"
], ],
"execution_count": 38, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -641,7 +641,7 @@ ...@@ -641,7 +641,7 @@
"id": "4qbaa3iEcrcE" "id": "4qbaa3iEcrcE"
}, },
"source": [ "source": [
"Available inference sources:\n", "Results are saved to `inference/output`. A full list of available inference sources:\n",
"<img src=\"https://user-images.githubusercontent.com/26833433/98274798-2b7a7a80-1f94-11eb-91a4-70c73593e26b.jpg\" width=\"900\"> " "<img src=\"https://user-images.githubusercontent.com/26833433/98274798-2b7a7a80-1f94-11eb-91a4-70c73593e26b.jpg\" width=\"900\"> "
] ]
}, },
...@@ -690,7 +690,7 @@ ...@@ -690,7 +690,7 @@
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
"!unzip -q tmp.zip -d ../ && rm tmp.zip" "!unzip -q tmp.zip -d ../ && rm tmp.zip"
], ],
"execution_count": 16, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "display_data", "output_type": "display_data",
...@@ -730,7 +730,7 @@ ...@@ -730,7 +730,7 @@
"# Run YOLOv5x on COCO val2017\n", "# Run YOLOv5x on COCO val2017\n",
"!python test.py --weights yolov5x.pt --data coco.yaml --img 640" "!python test.py --weights yolov5x.pt --data coco.yaml --img 640"
], ],
"execution_count": 17, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -797,9 +797,10 @@ ...@@ -797,9 +797,10 @@
}, },
"source": [ "source": [
"# Download COCO test-dev2017\n", "# Download COCO test-dev2017\n",
"gdrive_download('1cXZR_ckHki6nddOmcysCuuJFM--T-Q6L','coco2017labels.zip') # annotations\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n",
"!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n",
"!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n", "!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n",
"!mv ./test2017 ./coco/images && mv ./coco ../ # move images into /coco and move /coco alongside /yolov5" "%mv ./test2017 ./coco/images && mv ./coco ../ # move images to /coco and move /coco next to /yolov5"
], ],
"execution_count": null, "execution_count": null,
"outputs": [] "outputs": []
...@@ -852,7 +853,7 @@ ...@@ -852,7 +853,7 @@
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n",
"!unzip -q tmp.zip -d ../ && rm tmp.zip" "!unzip -q tmp.zip -d ../ && rm tmp.zip"
], ],
"execution_count": 22, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "display_data", "output_type": "display_data",
...@@ -916,7 +917,7 @@ ...@@ -916,7 +917,7 @@
"# Train YOLOv5s on COCO128 for 3 epochs\n", "# Train YOLOv5s on COCO128 for 3 epochs\n",
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache"
], ],
"execution_count": 23, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -1023,7 +1024,7 @@ ...@@ -1023,7 +1024,7 @@
"source": [ "source": [
"## 4.2 Local Logging\n", "## 4.2 Local Logging\n",
"\n", "\n",
"All results are logged by default to the `runs/exp0` directory, with a new directory created for each new training as `runs/exp1`, `runs/exp2`, etc. View `train_batch*.jpg` to see training images, labels and augmentation effects. A **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)." "All results are logged by default to the `runs/exp0` directory, with a new directory created for each new training as `runs/exp1`, `runs/exp2`, etc. View train and test jpgs to see mosaics, labels/predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)."
] ]
}, },
{ {
...@@ -1046,7 +1047,7 @@ ...@@ -1046,7 +1047,7 @@
}, },
"source": [ "source": [
"> <img src=\"https://user-images.githubusercontent.com/26833433/83667642-90fcb200-a583-11ea-8fa3-338bbf7da194.jpeg\" width=\"750\"> \n", "> <img src=\"https://user-images.githubusercontent.com/26833433/83667642-90fcb200-a583-11ea-8fa3-338bbf7da194.jpeg\" width=\"750\"> \n",
"`test_batch0_gt.jpg` train batch 0 mosaics and labels\n", "`train_batch0.jpg` train batch 0 mosaics and labels\n",
"\n", "\n",
"> <img src=\"https://user-images.githubusercontent.com/26833433/83667626-8c37fe00-a583-11ea-997b-0923fe59b29b.jpeg\" width=\"750\"> \n", "> <img src=\"https://user-images.githubusercontent.com/26833433/83667626-8c37fe00-a583-11ea-997b-0923fe59b29b.jpeg\" width=\"750\"> \n",
"`test_batch0_gt.jpg` shows test batch 0 ground truth\n", "`test_batch0_gt.jpg` shows test batch 0 ground truth\n",
...@@ -1061,7 +1062,7 @@ ...@@ -1061,7 +1062,7 @@
"id": "7KN5ghjE6ZWh" "id": "7KN5ghjE6ZWh"
}, },
"source": [ "source": [
"Training losses and performance metrics are also logged to Tensorboard and a custom `runs/exp0/results.txt` logfile. `results.txt` is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `yolov5s.pt` (orange)." "Training losses and performance metrics are also logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and a custom `results.txt` logfile which is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `--weights yolov5s.pt` (orange)."
] ]
}, },
{ {
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论