tutorial.ipynb 45.8 KB
Newer Older
Glenn Jocher's avatar
Glenn Jocher committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "YOLOv5 Tutorial",
      "provenance": [],
      "collapsed_sections": [],
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
Glenn Jocher's avatar
Glenn Jocher committed
15
16
17
    "accelerator": "GPU",
    "widgets": {
      "application/vnd.jupyter.widget-state+json": {
Glenn Jocher's avatar
Glenn Jocher committed
18
        "484511f272e64eab8b42e68dac5f7a66": {
Glenn Jocher's avatar
Glenn Jocher committed
19
20
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HBoxModel",
Glenn Jocher's avatar
Glenn Jocher committed
21
          "model_module_version": "1.5.0",
Glenn Jocher's avatar
Glenn Jocher committed
22
23
24
25
26
27
28
29
30
          "state": {
            "_view_name": "HBoxView",
            "_dom_classes": [],
            "_model_name": "HBoxModel",
            "_view_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_view_count": null,
            "_view_module_version": "1.5.0",
            "box_style": "",
Glenn Jocher's avatar
Glenn Jocher committed
31
            "layout": "IPY_MODEL_78cceec059784f2bb36988d3336e4d56",
Glenn Jocher's avatar
Glenn Jocher committed
32
33
            "_model_module": "@jupyter-widgets/controls",
            "children": [
Glenn Jocher's avatar
Glenn Jocher committed
34
35
36
              "IPY_MODEL_ab93d8b65c134605934ff9ec5efb1bb6",
              "IPY_MODEL_30df865ded4c434191bce772c9a82f3a",
              "IPY_MODEL_20cdc61eb3404f42a12b37901b0d85fb"
Glenn Jocher's avatar
Glenn Jocher committed
37
38
39
            ]
          }
        },
Glenn Jocher's avatar
Glenn Jocher committed
40
        "78cceec059784f2bb36988d3336e4d56": {
Glenn Jocher's avatar
Glenn Jocher committed
41
42
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
Glenn Jocher's avatar
Glenn Jocher committed
43
          "model_module_version": "1.2.0",
Glenn Jocher's avatar
Glenn Jocher committed
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
          "state": {
            "_view_name": "LayoutView",
            "grid_template_rows": null,
            "right": null,
            "justify_content": null,
            "_view_module": "@jupyter-widgets/base",
            "overflow": null,
            "_model_module_version": "1.2.0",
            "_view_count": null,
            "flex_flow": null,
            "width": null,
            "min_width": null,
            "border": null,
            "align_items": null,
            "bottom": null,
            "_model_module": "@jupyter-widgets/base",
            "top": null,
            "grid_column": null,
            "overflow_y": null,
            "overflow_x": null,
            "grid_auto_flow": null,
            "grid_area": null,
            "grid_template_columns": null,
            "flex": null,
            "_model_name": "LayoutModel",
            "justify_items": null,
            "grid_row": null,
            "max_height": null,
            "align_content": null,
            "visibility": null,
            "align_self": null,
            "height": null,
            "min_height": null,
            "padding": null,
            "grid_auto_rows": null,
            "grid_gap": null,
            "max_width": null,
            "order": null,
            "_view_module_version": "1.2.0",
            "grid_template_areas": null,
            "object_position": null,
            "object_fit": null,
            "grid_auto_columns": null,
            "margin": null,
            "display": null,
            "left": null
          }
        },
Glenn Jocher's avatar
Glenn Jocher committed
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
        "ab93d8b65c134605934ff9ec5efb1bb6": {
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
          "model_module_version": "1.5.0",
          "state": {
            "_view_name": "HTMLView",
            "style": "IPY_MODEL_2d7239993a9645b09b221405ac682743",
            "_dom_classes": [],
            "description": "",
            "_model_name": "HTMLModel",
            "placeholder": "​",
            "_view_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "value": "100%",
            "_view_count": null,
            "_view_module_version": "1.5.0",
            "description_tooltip": null,
            "_model_module": "@jupyter-widgets/controls",
            "layout": "IPY_MODEL_17b5a87f92104ec7ab96bf507637d0d2"
          }
        },
        "30df865ded4c434191bce772c9a82f3a": {
Glenn Jocher's avatar
Glenn Jocher committed
114
115
          "model_module": "@jupyter-widgets/controls",
          "model_name": "FloatProgressModel",
Glenn Jocher's avatar
Glenn Jocher committed
116
          "model_module_version": "1.5.0",
Glenn Jocher's avatar
Glenn Jocher committed
117
118
          "state": {
            "_view_name": "ProgressView",
Glenn Jocher's avatar
Glenn Jocher committed
119
            "style": "IPY_MODEL_2358bfb2270247359e94b066b3cc3d1f",
Glenn Jocher's avatar
Glenn Jocher committed
120
            "_dom_classes": [],
Glenn Jocher's avatar
Glenn Jocher committed
121
            "description": "",
Glenn Jocher's avatar
Glenn Jocher committed
122
123
            "_model_name": "FloatProgressModel",
            "bar_style": "success",
Glenn Jocher's avatar
Glenn Jocher committed
124
            "max": 818322941,
Glenn Jocher's avatar
Glenn Jocher committed
125
126
            "_view_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
Glenn Jocher's avatar
Glenn Jocher committed
127
            "value": 818322941,
Glenn Jocher's avatar
Glenn Jocher committed
128
129
130
131
132
133
            "_view_count": null,
            "_view_module_version": "1.5.0",
            "orientation": "horizontal",
            "min": 0,
            "description_tooltip": null,
            "_model_module": "@jupyter-widgets/controls",
Glenn Jocher's avatar
Glenn Jocher committed
134
            "layout": "IPY_MODEL_3e984405db654b0b83b88b2db08baffd"
Glenn Jocher's avatar
Glenn Jocher committed
135
136
          }
        },
Glenn Jocher's avatar
Glenn Jocher committed
137
        "20cdc61eb3404f42a12b37901b0d85fb": {
Glenn Jocher's avatar
Glenn Jocher committed
138
139
          "model_module": "@jupyter-widgets/controls",
          "model_name": "HTMLModel",
Glenn Jocher's avatar
Glenn Jocher committed
140
          "model_module_version": "1.5.0",
Glenn Jocher's avatar
Glenn Jocher committed
141
142
          "state": {
            "_view_name": "HTMLView",
Glenn Jocher's avatar
Glenn Jocher committed
143
            "style": "IPY_MODEL_654d8a19b9f949c6bbdaf8b0875c931e",
Glenn Jocher's avatar
Glenn Jocher committed
144
145
146
147
148
149
            "_dom_classes": [],
            "description": "",
            "_model_name": "HTMLModel",
            "placeholder": "​",
            "_view_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
Glenn Jocher's avatar
Glenn Jocher committed
150
            "value": " 780M/780M [00:33<00:00, 24.4MB/s]",
Glenn Jocher's avatar
Glenn Jocher committed
151
152
153
154
            "_view_count": null,
            "_view_module_version": "1.5.0",
            "description_tooltip": null,
            "_model_module": "@jupyter-widgets/controls",
Glenn Jocher's avatar
Glenn Jocher committed
155
            "layout": "IPY_MODEL_896030c5d13b415aaa05032818d81a6e"
Glenn Jocher's avatar
Glenn Jocher committed
156
157
          }
        },
Glenn Jocher's avatar
Glenn Jocher committed
158
        "2d7239993a9645b09b221405ac682743": {
Glenn Jocher's avatar
Glenn Jocher committed
159
160
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
Glenn Jocher's avatar
Glenn Jocher committed
161
          "model_module_version": "1.5.0",
Glenn Jocher's avatar
Glenn Jocher committed
162
163
164
165
166
167
168
169
170
171
172
          "state": {
            "_view_name": "StyleView",
            "_model_name": "DescriptionStyleModel",
            "description_width": "",
            "_view_module": "@jupyter-widgets/base",
            "_model_module_version": "1.5.0",
            "_view_count": null,
            "_view_module_version": "1.2.0",
            "_model_module": "@jupyter-widgets/controls"
          }
        },
Glenn Jocher's avatar
Glenn Jocher committed
173
        "17b5a87f92104ec7ab96bf507637d0d2": {
Glenn Jocher's avatar
Glenn Jocher committed
174
175
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
Glenn Jocher's avatar
Glenn Jocher committed
176
          "model_module_version": "1.2.0",
Glenn Jocher's avatar
Glenn Jocher committed
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
          "state": {
            "_view_name": "LayoutView",
            "grid_template_rows": null,
            "right": null,
            "justify_content": null,
            "_view_module": "@jupyter-widgets/base",
            "overflow": null,
            "_model_module_version": "1.2.0",
            "_view_count": null,
            "flex_flow": null,
            "width": null,
            "min_width": null,
            "border": null,
            "align_items": null,
            "bottom": null,
            "_model_module": "@jupyter-widgets/base",
            "top": null,
            "grid_column": null,
            "overflow_y": null,
            "overflow_x": null,
            "grid_auto_flow": null,
            "grid_area": null,
            "grid_template_columns": null,
            "flex": null,
            "_model_name": "LayoutModel",
            "justify_items": null,
            "grid_row": null,
            "max_height": null,
            "align_content": null,
            "visibility": null,
            "align_self": null,
            "height": null,
            "min_height": null,
            "padding": null,
            "grid_auto_rows": null,
            "grid_gap": null,
            "max_width": null,
            "order": null,
            "_view_module_version": "1.2.0",
            "grid_template_areas": null,
            "object_position": null,
            "object_fit": null,
            "grid_auto_columns": null,
            "margin": null,
            "display": null,
            "left": null
          }
        },
Glenn Jocher's avatar
Glenn Jocher committed
225
        "2358bfb2270247359e94b066b3cc3d1f": {
Glenn Jocher's avatar
Glenn Jocher committed
226
227
          "model_module": "@jupyter-widgets/controls",
          "model_name": "ProgressStyleModel",
Glenn Jocher's avatar
Glenn Jocher committed
228
          "model_module_version": "1.5.0",
Glenn Jocher's avatar
Glenn Jocher committed
229
230
231
          "state": {
            "_view_name": "StyleView",
            "_model_name": "ProgressStyleModel",
Glenn Jocher's avatar
Glenn Jocher committed
232
            "description_width": "",
Glenn Jocher's avatar
Glenn Jocher committed
233
234
235
236
237
238
239
240
            "_view_module": "@jupyter-widgets/base",
            "_model_module_version": "1.5.0",
            "_view_count": null,
            "_view_module_version": "1.2.0",
            "bar_color": null,
            "_model_module": "@jupyter-widgets/controls"
          }
        },
Glenn Jocher's avatar
Glenn Jocher committed
241
        "3e984405db654b0b83b88b2db08baffd": {
Glenn Jocher's avatar
Glenn Jocher committed
242
243
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
Glenn Jocher's avatar
Glenn Jocher committed
244
          "model_module_version": "1.2.0",
Glenn Jocher's avatar
Glenn Jocher committed
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
          "state": {
            "_view_name": "LayoutView",
            "grid_template_rows": null,
            "right": null,
            "justify_content": null,
            "_view_module": "@jupyter-widgets/base",
            "overflow": null,
            "_model_module_version": "1.2.0",
            "_view_count": null,
            "flex_flow": null,
            "width": null,
            "min_width": null,
            "border": null,
            "align_items": null,
            "bottom": null,
            "_model_module": "@jupyter-widgets/base",
            "top": null,
            "grid_column": null,
            "overflow_y": null,
            "overflow_x": null,
            "grid_auto_flow": null,
            "grid_area": null,
            "grid_template_columns": null,
            "flex": null,
            "_model_name": "LayoutModel",
            "justify_items": null,
            "grid_row": null,
            "max_height": null,
            "align_content": null,
            "visibility": null,
            "align_self": null,
            "height": null,
            "min_height": null,
            "padding": null,
            "grid_auto_rows": null,
            "grid_gap": null,
            "max_width": null,
            "order": null,
            "_view_module_version": "1.2.0",
            "grid_template_areas": null,
            "object_position": null,
            "object_fit": null,
            "grid_auto_columns": null,
            "margin": null,
            "display": null,
            "left": null
          }
        },
Glenn Jocher's avatar
Glenn Jocher committed
293
        "654d8a19b9f949c6bbdaf8b0875c931e": {
Glenn Jocher's avatar
Glenn Jocher committed
294
295
          "model_module": "@jupyter-widgets/controls",
          "model_name": "DescriptionStyleModel",
Glenn Jocher's avatar
Glenn Jocher committed
296
          "model_module_version": "1.5.0",
Glenn Jocher's avatar
Glenn Jocher committed
297
298
299
300
301
302
303
304
305
306
307
          "state": {
            "_view_name": "StyleView",
            "_model_name": "DescriptionStyleModel",
            "description_width": "",
            "_view_module": "@jupyter-widgets/base",
            "_model_module_version": "1.5.0",
            "_view_count": null,
            "_view_module_version": "1.2.0",
            "_model_module": "@jupyter-widgets/controls"
          }
        },
Glenn Jocher's avatar
Glenn Jocher committed
308
        "896030c5d13b415aaa05032818d81a6e": {
Glenn Jocher's avatar
Glenn Jocher committed
309
310
          "model_module": "@jupyter-widgets/base",
          "model_name": "LayoutModel",
Glenn Jocher's avatar
Glenn Jocher committed
311
          "model_module_version": "1.2.0",
Glenn Jocher's avatar
Glenn Jocher committed
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
          "state": {
            "_view_name": "LayoutView",
            "grid_template_rows": null,
            "right": null,
            "justify_content": null,
            "_view_module": "@jupyter-widgets/base",
            "overflow": null,
            "_model_module_version": "1.2.0",
            "_view_count": null,
            "flex_flow": null,
            "width": null,
            "min_width": null,
            "border": null,
            "align_items": null,
            "bottom": null,
            "_model_module": "@jupyter-widgets/base",
            "top": null,
            "grid_column": null,
            "overflow_y": null,
            "overflow_x": null,
            "grid_auto_flow": null,
            "grid_area": null,
            "grid_template_columns": null,
            "flex": null,
            "_model_name": "LayoutModel",
            "justify_items": null,
            "grid_row": null,
            "max_height": null,
            "align_content": null,
            "visibility": null,
            "align_self": null,
            "height": null,
            "min_height": null,
            "padding": null,
            "grid_auto_rows": null,
            "grid_gap": null,
            "max_width": null,
            "order": null,
            "_view_module_version": "1.2.0",
            "grid_template_areas": null,
            "object_position": null,
            "object_fit": null,
            "grid_auto_columns": null,
            "margin": null,
            "display": null,
            "left": null
          }
        }
      }
    }
Glenn Jocher's avatar
Glenn Jocher committed
362
363
364
365
366
367
368
369
370
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
371
        "<a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
Glenn Jocher's avatar
Glenn Jocher committed
372
373
374
375
376
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
377
        "id": "t6MPjfT5NrKQ"
Glenn Jocher's avatar
Glenn Jocher committed
378
379
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
380
        "<a align=\"left\" href=\"https://ultralytics.com/yolov5\" target=\"_blank\">\n",
Glenn Jocher's avatar
Glenn Jocher committed
381
        "<img src=\"https://user-images.githubusercontent.com/26833433/125273437-35b3fc00-e30d-11eb-9079-46f313325424.png\"></a>\n",
Glenn Jocher's avatar
Glenn Jocher committed
382
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
383
        "This is the **official YOLOv5 🚀 notebook** by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n",
384
        "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!"
Glenn Jocher's avatar
Glenn Jocher committed
385
386
387
388
389
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
390
        "id": "7mGmQbAO5pQb"
Glenn Jocher's avatar
Glenn Jocher committed
391
392
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
393
        "# Setup\n",
Glenn Jocher's avatar
Glenn Jocher committed
394
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
395
        "Clone repo, install dependencies and check PyTorch and GPU."
Glenn Jocher's avatar
Glenn Jocher committed
396
397
398
399
400
401
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "wbvMlHd_QwMG",
Glenn Jocher's avatar
Glenn Jocher committed
402
        "colab": {
Glenn Jocher's avatar
Glenn Jocher committed
403
          "base_uri": "https://localhost:8080/"
Glenn Jocher's avatar
Glenn Jocher committed
404
        },
Glenn Jocher's avatar
Glenn Jocher committed
405
        "outputId": "4d67116a-43e9-4d84-d19e-1edd83f23a04"
Glenn Jocher's avatar
Glenn Jocher committed
406
407
408
      },
      "source": [
        "!git clone https://github.com/ultralytics/yolov5  # clone repo\n",
Glenn Jocher's avatar
Glenn Jocher committed
409
        "%cd yolov5\n",
Glenn Jocher's avatar
Glenn Jocher committed
410
        "%pip install -qr requirements.txt  # install dependencies\n",
Glenn Jocher's avatar
Glenn Jocher committed
411
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
412
        "import torch\n",
Glenn Jocher's avatar
Glenn Jocher committed
413
        "from IPython.display import Image, clear_output  # to display images\n",
Glenn Jocher's avatar
Glenn Jocher committed
414
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
415
        "clear_output()\n",
Glenn Jocher's avatar
Glenn Jocher committed
416
        "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")"
Glenn Jocher's avatar
Glenn Jocher committed
417
      ],
Glenn Jocher's avatar
Glenn Jocher committed
418
      "execution_count": 1,
Glenn Jocher's avatar
Glenn Jocher committed
419
420
421
422
      "outputs": [
        {
          "output_type": "stream",
          "text": [
Glenn Jocher's avatar
Glenn Jocher committed
423
            "Setup complete. Using torch 1.9.0+cu102 (Tesla V100-SXM2-16GB)\n"
Glenn Jocher's avatar
Glenn Jocher committed
424
425
426
427
          ],
          "name": "stdout"
        }
      ]
Glenn Jocher's avatar
Glenn Jocher committed
428
429
430
431
    },
    {
      "cell_type": "markdown",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
432
        "id": "4JnkELT0cIJg"
Glenn Jocher's avatar
Glenn Jocher committed
433
434
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
435
        "# 1. Inference\n",
Glenn Jocher's avatar
Glenn Jocher committed
436
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
437
438
        "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n",
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
439
440
441
442
443
444
445
446
447
        "```shell\n",
        "python detect.py --source 0  # webcam\n",
        "                          file.jpg  # image \n",
        "                          file.mp4  # video\n",
        "                          path/  # directory\n",
        "                          path/*.jpg  # glob\n",
        "                          'https://youtu.be/NUsoVlDFqZg'  # YouTube\n",
        "                          'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream\n",
        "```"
Glenn Jocher's avatar
Glenn Jocher committed
448
449
450
451
452
453
454
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "zR9ZbuQCH7FX",
        "colab": {
Glenn Jocher's avatar
Glenn Jocher committed
455
          "base_uri": "https://localhost:8080/"
Glenn Jocher's avatar
Glenn Jocher committed
456
        },
Glenn Jocher's avatar
Glenn Jocher committed
457
        "outputId": "8b728908-81ab-4861-edb0-4d0c46c439fb"
Glenn Jocher's avatar
Glenn Jocher committed
458
459
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
460
        "%rm -rf runs\n",
461
        "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n",
Glenn Jocher's avatar
Glenn Jocher committed
462
        "#Image(filename='runs/detect/exp/zidane.jpg', width=600)"
Glenn Jocher's avatar
Glenn Jocher committed
463
      ],
Glenn Jocher's avatar
Glenn Jocher committed
464
      "execution_count": 4,
Glenn Jocher's avatar
Glenn Jocher committed
465
466
467
468
      "outputs": [
        {
          "output_type": "stream",
          "text": [
Glenn Jocher's avatar
Glenn Jocher committed
469
            "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images/, imgsz=640, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False\n",
Glenn Jocher's avatar
Glenn Jocher committed
470
            "YOLOv5 🚀 v5.0-367-g01cdb76 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
Glenn Jocher's avatar
Glenn Jocher committed
471
            "\n",
Glenn Jocher's avatar
Glenn Jocher committed
472
            "Fusing layers... \n",
Glenn Jocher's avatar
Glenn Jocher committed
473
            "Model Summary: 224 layers, 7266973 parameters, 0 gradients\n",
Glenn Jocher's avatar
Glenn Jocher committed
474
475
476
            "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 1 fire hydrant, Done. (0.007s)\n",
            "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.007s)\n",
            "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n",
Glenn Jocher's avatar
Glenn Jocher committed
477
            "Done. (0.091s)\n"
Glenn Jocher's avatar
Glenn Jocher committed
478
479
480
481
482
          ],
          "name": "stdout"
        }
      ]
    },
Glenn Jocher's avatar
Glenn Jocher committed
483
484
485
486
487
488
489
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "hkAzDWJ7cWTr"
      },
      "source": [
        "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\n",
Glenn Jocher's avatar
Glenn Jocher committed
490
        "<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/127574988-6a558aa1-d268-44b9-bf6b-62d4c605cc72.jpg\" width=\"600\">"
Glenn Jocher's avatar
Glenn Jocher committed
491
492
      ]
    },
Glenn Jocher's avatar
Glenn Jocher committed
493
494
495
    {
      "cell_type": "markdown",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
496
        "id": "0eq1SMWl6Sfn"
Glenn Jocher's avatar
Glenn Jocher committed
497
498
      },
      "source": [
499
500
        "# 2. Validate\n",
        "Validate a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation."
Glenn Jocher's avatar
Glenn Jocher committed
501
502
503
504
505
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
506
        "id": "eyTZYGgRjnMc"
Glenn Jocher's avatar
Glenn Jocher committed
507
508
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
509
        "## COCO val2017\n",
Glenn Jocher's avatar
Glenn Jocher committed
510
        "Download [COCO val 2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L14) dataset (1GB - 5000 images), and test model accuracy."
Glenn Jocher's avatar
Glenn Jocher committed
511
512
513
514
515
516
517
518
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WQPtK1QYVaD_",
        "colab": {
          "base_uri": "https://localhost:8080/",
Glenn Jocher's avatar
Glenn Jocher committed
519
          "height": 48,
Glenn Jocher's avatar
Glenn Jocher committed
520
          "referenced_widgets": [
Glenn Jocher's avatar
Glenn Jocher committed
521
522
523
524
525
526
527
528
529
530
531
            "484511f272e64eab8b42e68dac5f7a66",
            "78cceec059784f2bb36988d3336e4d56",
            "ab93d8b65c134605934ff9ec5efb1bb6",
            "30df865ded4c434191bce772c9a82f3a",
            "20cdc61eb3404f42a12b37901b0d85fb",
            "2d7239993a9645b09b221405ac682743",
            "17b5a87f92104ec7ab96bf507637d0d2",
            "2358bfb2270247359e94b066b3cc3d1f",
            "3e984405db654b0b83b88b2db08baffd",
            "654d8a19b9f949c6bbdaf8b0875c931e",
            "896030c5d13b415aaa05032818d81a6e"
Glenn Jocher's avatar
Glenn Jocher committed
532
          ]
Glenn Jocher's avatar
Glenn Jocher committed
533
        },
Glenn Jocher's avatar
Glenn Jocher committed
534
        "outputId": "7e6f5c96-c819-43e1-cd03-d3b9878cf8de"
Glenn Jocher's avatar
Glenn Jocher committed
535
536
537
      },
      "source": [
        "# Download COCO val2017\n",
Glenn Jocher's avatar
Glenn Jocher committed
538
        "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
Glenn Jocher's avatar
Glenn Jocher committed
539
        "!unzip -q tmp.zip -d ../datasets && rm tmp.zip"
Glenn Jocher's avatar
Glenn Jocher committed
540
      ],
Glenn Jocher's avatar
Glenn Jocher committed
541
      "execution_count": 5,
Glenn Jocher's avatar
Glenn Jocher committed
542
      "outputs": [
Glenn Jocher's avatar
Glenn Jocher committed
543
544
545
546
        {
          "output_type": "display_data",
          "data": {
            "application/vnd.jupyter.widget-view+json": {
Glenn Jocher's avatar
Glenn Jocher committed
547
              "model_id": "484511f272e64eab8b42e68dac5f7a66",
Glenn Jocher's avatar
Glenn Jocher committed
548
549
550
551
              "version_minor": 0,
              "version_major": 2
            },
            "text/plain": [
Glenn Jocher's avatar
Glenn Jocher committed
552
              "  0%|          | 0.00/780M [00:00<?, ?B/s]"
Glenn Jocher's avatar
Glenn Jocher committed
553
554
555
556
557
            ]
          },
          "metadata": {
            "tags": []
          }
Glenn Jocher's avatar
Glenn Jocher committed
558
559
560
561
562
563
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
564
565
        "id": "X58w8JLpMnjH",
        "colab": {
Glenn Jocher's avatar
Glenn Jocher committed
566
          "base_uri": "https://localhost:8080/"
Glenn Jocher's avatar
Glenn Jocher committed
567
        },
Glenn Jocher's avatar
Glenn Jocher committed
568
        "outputId": "3dd0e2fc-aecf-4108-91b1-6392da1863cb"
Glenn Jocher's avatar
Glenn Jocher committed
569
570
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
571
        "# Run YOLOv5x on COCO val2017\n",
572
        "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half"
Glenn Jocher's avatar
Glenn Jocher committed
573
      ],
Glenn Jocher's avatar
Glenn Jocher committed
574
      "execution_count": 6,
Glenn Jocher's avatar
Glenn Jocher committed
575
576
577
578
      "outputs": [
        {
          "output_type": "stream",
          "text": [
Glenn Jocher's avatar
Glenn Jocher committed
579
            "\u001b[34m\u001b[1mval: \u001b[0mdata=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True\n",
Glenn Jocher's avatar
Glenn Jocher committed
580
            "YOLOv5 🚀 v5.0-367-g01cdb76 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
Glenn Jocher's avatar
Glenn Jocher committed
581
            "\n",
Glenn Jocher's avatar
Glenn Jocher committed
582
            "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n",
Glenn Jocher's avatar
Glenn Jocher committed
583
            "100% 168M/168M [00:08<00:00, 20.6MB/s]\n",
Glenn Jocher's avatar
Glenn Jocher committed
584
            "\n",
Glenn Jocher's avatar
Glenn Jocher committed
585
            "Fusing layers... \n",
Glenn Jocher's avatar
Glenn Jocher committed
586
            "Model Summary: 476 layers, 87730285 parameters, 0 gradients\n",
Glenn Jocher's avatar
Glenn Jocher committed
587
            "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2749.96it/s]\n",
Glenn Jocher's avatar
Glenn Jocher committed
588
            "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../datasets/coco/val2017.cache\n",
Glenn Jocher's avatar
Glenn Jocher committed
589
            "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 157/157 [01:08<00:00,  2.28it/s]\n",
Glenn Jocher's avatar
Glenn Jocher committed
590
            "                 all       5000      36335      0.746      0.626       0.68       0.49\n",
Glenn Jocher's avatar
Glenn Jocher committed
591
            "Speed: 0.1ms pre-process, 5.1ms inference, 1.6ms NMS per image at shape (32, 3, 640, 640)\n",
Glenn Jocher's avatar
Glenn Jocher committed
592
            "\n",
593
            "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n",
Glenn Jocher's avatar
Glenn Jocher committed
594
            "loading annotations into memory...\n",
Glenn Jocher's avatar
Glenn Jocher committed
595
            "Done (t=0.46s)\n",
Glenn Jocher's avatar
Glenn Jocher committed
596
597
598
            "creating index...\n",
            "index created!\n",
            "Loading and preparing results...\n",
Glenn Jocher's avatar
Glenn Jocher committed
599
            "DONE (t=4.94s)\n",
Glenn Jocher's avatar
Glenn Jocher committed
600
601
602
603
            "creating index...\n",
            "index created!\n",
            "Running per image evaluation...\n",
            "Evaluate annotation type *bbox*\n",
Glenn Jocher's avatar
Glenn Jocher committed
604
            "DONE (t=83.60s).\n",
Glenn Jocher's avatar
Glenn Jocher committed
605
            "Accumulating evaluation results...\n",
Glenn Jocher's avatar
Glenn Jocher committed
606
            "DONE (t=13.22s).\n",
Glenn Jocher's avatar
Glenn Jocher committed
607
608
609
610
611
612
613
614
615
616
617
618
            " Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.504\n",
            " Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = 0.688\n",
            " Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = 0.546\n",
            " Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351\n",
            " Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551\n",
            " Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644\n",
            " Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = 0.382\n",
            " Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = 0.629\n",
            " Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.681\n",
            " Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524\n",
            " Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735\n",
            " Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.827\n",
Glenn Jocher's avatar
Glenn Jocher committed
619
            "Results saved to \u001b[1mruns/val/exp\u001b[0m\n"
Glenn Jocher's avatar
Glenn Jocher committed
620
621
622
623
          ],
          "name": "stdout"
        }
      ]
Glenn Jocher's avatar
Glenn Jocher committed
624
625
626
627
    },
    {
      "cell_type": "markdown",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
628
        "id": "rc_KbFk0juX2"
Glenn Jocher's avatar
Glenn Jocher committed
629
630
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
631
        "## COCO test-dev2017\n",
Glenn Jocher's avatar
Glenn Jocher committed
632
        "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (**20,000 images, no labels**). Results are saved to a `*.json` file which should be **zipped** and submitted to the evaluation server at https://competitions.codalab.org/competitions/20794."
Glenn Jocher's avatar
Glenn Jocher committed
633
634
635
636
637
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
638
        "id": "V0AJnSeCIHyJ"
Glenn Jocher's avatar
Glenn Jocher committed
639
640
641
      },
      "source": [
        "# Download COCO test-dev2017\n",
Glenn Jocher's avatar
Glenn Jocher committed
642
        "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n",
643
        "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n",
Glenn Jocher's avatar
Glenn Jocher committed
644
        "!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f  # 7GB,  41k images\n",
645
        "%mv ./test2017 ../coco/images  # move to /coco"
Glenn Jocher's avatar
Glenn Jocher committed
646
      ],
Glenn Jocher's avatar
Glenn Jocher committed
647
      "execution_count": null,
Glenn Jocher's avatar
Glenn Jocher committed
648
      "outputs": []
Glenn Jocher's avatar
Glenn Jocher committed
649
650
651
652
    },
    {
      "cell_type": "code",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
653
        "id": "29GJXAP_lPrt"
Glenn Jocher's avatar
Glenn Jocher committed
654
655
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
656
        "# Run YOLOv5s on COCO test-dev2017 using --task test\n",
657
        "!python val.py --weights yolov5s.pt --data coco.yaml --task test"
Glenn Jocher's avatar
Glenn Jocher committed
658
      ],
Glenn Jocher's avatar
Glenn Jocher committed
659
      "execution_count": null,
Glenn Jocher's avatar
Glenn Jocher committed
660
661
662
663
664
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
665
        "id": "VUOiNLtMP5aG"
Glenn Jocher's avatar
Glenn Jocher committed
666
667
668
669
      },
      "source": [
        "# 3. Train\n",
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
670
        "Download [COCO128](https://www.kaggle.com/ultralytics/coco128), a small 128-image tutorial dataset, start tensorboard and train YOLOv5s from a pretrained checkpoint for 3 epochs (note actual training is typically much longer, around **300-1000 epochs**, depending on your dataset)."
Glenn Jocher's avatar
Glenn Jocher committed
671
672
673
674
675
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
676
        "id": "Knxi2ncxWffW"
Glenn Jocher's avatar
Glenn Jocher committed
677
678
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
679
680
        "# Download COCO128\n",
        "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n",
Glenn Jocher's avatar
Glenn Jocher committed
681
        "!unzip -q tmp.zip -d ../datasets && rm tmp.zip"
Glenn Jocher's avatar
Glenn Jocher committed
682
      ],
Glenn Jocher's avatar
Glenn Jocher committed
683
      "execution_count": null,
Glenn Jocher's avatar
Glenn Jocher committed
684
      "outputs": []
Glenn Jocher's avatar
Glenn Jocher committed
685
    },
Glenn Jocher's avatar
Glenn Jocher committed
686
687
688
    {
      "cell_type": "markdown",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
689
        "id": "_pOkGLv1dMqh"
Glenn Jocher's avatar
Glenn Jocher committed
690
691
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
692
        "Train a YOLOv5s model on [COCO128](https://www.kaggle.com/ultralytics/coco128) with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and **COCO, COCO128, and VOC datasets are downloaded automatically** on first use.\n",
Glenn Jocher's avatar
Glenn Jocher committed
693
        "\n",
694
        "All training results are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n"
Glenn Jocher's avatar
Glenn Jocher committed
695
696
      ]
    },
Glenn Jocher's avatar
Glenn Jocher committed
697
698
699
    {
      "cell_type": "code",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
700
        "id": "bOy5KI2ncnWd"
Glenn Jocher's avatar
Glenn Jocher committed
701
702
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
703
        "# Tensorboard  (optional)\n",
Glenn Jocher's avatar
Glenn Jocher committed
704
        "%load_ext tensorboard\n",
Glenn Jocher's avatar
Glenn Jocher committed
705
        "%tensorboard --logdir runs/train"
Glenn Jocher's avatar
Glenn Jocher committed
706
      ],
Glenn Jocher's avatar
Glenn Jocher committed
707
      "execution_count": null,
Glenn Jocher's avatar
Glenn Jocher committed
708
709
      "outputs": []
    },
Glenn Jocher's avatar
Glenn Jocher committed
710
711
712
713
714
715
    {
      "cell_type": "code",
      "metadata": {
        "id": "2fLAV42oNb7M"
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
716
717
718
719
        "# Weights & Biases  (optional)\n",
        "%pip install -q wandb\n",
        "import wandb\n",
        "wandb.login()"
Glenn Jocher's avatar
Glenn Jocher committed
720
721
722
723
      ],
      "execution_count": null,
      "outputs": []
    },
Glenn Jocher's avatar
Glenn Jocher committed
724
725
726
727
728
    {
      "cell_type": "code",
      "metadata": {
        "id": "1NcFxRcFdJ_O",
        "colab": {
Glenn Jocher's avatar
Glenn Jocher committed
729
          "base_uri": "https://localhost:8080/"
Glenn Jocher's avatar
Glenn Jocher committed
730
        },
Glenn Jocher's avatar
Glenn Jocher committed
731
        "outputId": "00ea4b14-a75c-44a2-a913-03b431b69de5"
Glenn Jocher's avatar
Glenn Jocher committed
732
733
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
734
        "# Train YOLOv5s on COCO128 for 3 epochs\n",
Glenn Jocher's avatar
Glenn Jocher committed
735
        "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
Glenn Jocher's avatar
Glenn Jocher committed
736
      ],
Glenn Jocher's avatar
Glenn Jocher committed
737
      "execution_count": 8,
Glenn Jocher's avatar
Glenn Jocher committed
738
739
740
741
      "outputs": [
        {
          "output_type": "stream",
          "text": [
Glenn Jocher's avatar
Glenn Jocher committed
742
            "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, adam=False, sync_bn=False, workers=8, project=runs/train, entity=None, name=exp, exist_ok=False, quad=False, linear_lr=False, label_smoothing=0.0, upload_dataset=False, bbox_interval=-1, save_period=-1, artifact_alias=latest, local_rank=-1, freeze=0\n",
Glenn Jocher's avatar
Glenn Jocher committed
743
            "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
Glenn Jocher's avatar
Glenn Jocher committed
744
            "YOLOv5 🚀 v5.0-367-g01cdb76 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
Glenn Jocher's avatar
Glenn Jocher committed
745
746
747
748
            "\n",
            "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
            "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\n",
            "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
Glenn Jocher's avatar
Glenn Jocher committed
749
            "2021-08-15 14:40:43.449642: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n",
Glenn Jocher's avatar
Glenn Jocher committed
750
751
752
753
            "\n",
            "                 from  n    params  module                                  arguments                     \n",
            "  0                -1  1      3520  models.common.Focus                     [3, 32, 3]                    \n",
            "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
Glenn Jocher's avatar
Glenn Jocher committed
754
            "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
Glenn Jocher's avatar
Glenn Jocher committed
755
            "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
Glenn Jocher's avatar
Glenn Jocher committed
756
            "  4                -1  3    156928  models.common.C3                        [128, 128, 3]                 \n",
Glenn Jocher's avatar
Glenn Jocher committed
757
            "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
Glenn Jocher's avatar
Glenn Jocher committed
758
            "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
Glenn Jocher's avatar
Glenn Jocher committed
759
760
            "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
            "  8                -1  1    656896  models.common.SPP                       [512, 512, [5, 9, 13]]        \n",
Glenn Jocher's avatar
Glenn Jocher committed
761
            "  9                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
Glenn Jocher's avatar
Glenn Jocher committed
762
763
764
            " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
            " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
            " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
Glenn Jocher's avatar
Glenn Jocher committed
765
            " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
Glenn Jocher's avatar
Glenn Jocher committed
766
767
768
            " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
            " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
            " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
Glenn Jocher's avatar
Glenn Jocher committed
769
            " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
Glenn Jocher's avatar
Glenn Jocher committed
770
771
            " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
            " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
Glenn Jocher's avatar
Glenn Jocher committed
772
            " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
Glenn Jocher's avatar
Glenn Jocher committed
773
774
            " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
            " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
Glenn Jocher's avatar
Glenn Jocher committed
775
            " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
Glenn Jocher's avatar
Glenn Jocher committed
776
            " 24      [17, 20, 23]  1    229245  models.yolo.Detect                      [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
777
            "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n",
Glenn Jocher's avatar
Glenn Jocher committed
778
            "\n",
Glenn Jocher's avatar
Glenn Jocher committed
779
780
            "Transferred 362/362 items from yolov5s.pt\n",
            "Scaled weight_decay = 0.0005\n",
Glenn Jocher's avatar
Glenn Jocher committed
781
782
            "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD with parameter groups 59 weight, 62 weight (no decay), 62 bias\n",
            "\u001b[34m\u001b[1malbumentations: \u001b[0mversion 1.0.3 required by YOLOv5, but version 0.1.12 is currently installed\n",
Glenn Jocher's avatar
Glenn Jocher committed
783
            "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2440.28it/s]\n",
Glenn Jocher's avatar
Glenn Jocher committed
784
            "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../datasets/coco128/labels/train2017.cache\n",
Glenn Jocher's avatar
Glenn Jocher committed
785
786
787
            "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 302.61it/s]\n",
            "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<?, ?it/s]\n",
            "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 142.55it/s]\n",
Glenn Jocher's avatar
Glenn Jocher committed
788
789
            "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n",
            "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n",
Glenn Jocher's avatar
Glenn Jocher committed
790
            "Plotting labels... \n",
Glenn Jocher's avatar
Glenn Jocher committed
791
            "\n",
Glenn Jocher's avatar
Glenn Jocher committed
792
            "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.27, Best Possible Recall (BPR) = 0.9935\n",
793
            "Image sizes 640 train, 640 val\n",
Glenn Jocher's avatar
Glenn Jocher committed
794
            "Using 2 dataloader workers\n",
795
            "Logging results to runs/train/exp\n",
Glenn Jocher's avatar
Glenn Jocher committed
796
            "Starting training for 3 epochs...\n",
Glenn Jocher's avatar
Glenn Jocher committed
797
            "\n",
Glenn Jocher's avatar
Glenn Jocher committed
798
            "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
Glenn Jocher's avatar
Glenn Jocher committed
799
800
801
            "       0/2     3.64G   0.04492    0.0674   0.02213       298       640: 100% 8/8 [00:03<00:00,  2.05it/s]\n",
            "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00,  4.70it/s]\n",
            "                 all        128        929      0.686      0.565      0.642      0.421\n",
Glenn Jocher's avatar
Glenn Jocher committed
802
            "\n",
Glenn Jocher's avatar
Glenn Jocher committed
803
            "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
Glenn Jocher's avatar
Glenn Jocher committed
804
805
806
            "       1/2     5.04G   0.04403    0.0611   0.01986       232       640: 100% 8/8 [00:01<00:00,  5.59it/s]\n",
            "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 4/4 [00:00<00:00,  4.46it/s]\n",
            "                 all        128        929      0.694      0.563      0.654      0.425\n",
Glenn Jocher's avatar
Glenn Jocher committed
807
            "\n",
Glenn Jocher's avatar
Glenn Jocher committed
808
            "     Epoch   gpu_mem       box       obj       cls    labels  img_size\n",
Glenn Jocher's avatar
Glenn Jocher committed
809
810
811
            "       2/2     5.04G   0.04616   0.07056   0.02071       214       640: 100% 8/8 [00:01<00:00,  5.94it/s]\n",
            "               Class     Images     Labels          P          R     mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00,  1.52it/s]\n",
            "                 all        128        929      0.711      0.562       0.66      0.431\n",
Glenn Jocher's avatar
Glenn Jocher committed
812
            "\n",
Glenn Jocher's avatar
Glenn Jocher committed
813
            "3 epochs completed in 0.005 hours.\n",
Glenn Jocher's avatar
Glenn Jocher committed
814
            "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
Glenn Jocher's avatar
Glenn Jocher committed
815
816
            "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n",
            "Results saved to \u001b[1mruns/train/exp\u001b[0m\n"
Glenn Jocher's avatar
Glenn Jocher committed
817
818
819
820
821
          ],
          "name": "stdout"
        }
      ]
    },
Glenn Jocher's avatar
Glenn Jocher committed
822
823
824
825
826
827
828
829
830
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "15glLzbQx5u0"
      },
      "source": [
        "# 4. Visualize"
      ]
    },
Glenn Jocher's avatar
Glenn Jocher committed
831
832
833
    {
      "cell_type": "markdown",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
834
        "id": "DLI1JmHU7B0l"
Glenn Jocher's avatar
Glenn Jocher committed
835
836
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
837
        "## Weights & Biases Logging 🌟 NEW\n",
Glenn Jocher's avatar
Glenn Jocher committed
838
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
839
        "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n",
Glenn Jocher's avatar
Glenn Jocher committed
840
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
841
        "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n",
Glenn Jocher's avatar
Glenn Jocher committed
842
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
843
        "<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/125274843-a27bc600-e30e-11eb-9a44-62af0b7a50a2.png\" width=\"800\">"
Glenn Jocher's avatar
Glenn Jocher committed
844
845
846
847
848
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
849
        "id": "-WPvRbS5Swl6"
Glenn Jocher's avatar
Glenn Jocher committed
850
851
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
852
        "## Local Logging\n",
Glenn Jocher's avatar
Glenn Jocher committed
853
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
854
        "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combines 4 images into 1 mosaic during training.\n",
Glenn Jocher's avatar
Glenn Jocher committed
855
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
856
        "> <img src=\"https://user-images.githubusercontent.com/26833433/124931219-48bf8700-e002-11eb-84f0-e05d95b118dd.jpg\" width=\"700\">  \n",
Glenn Jocher's avatar
Glenn Jocher committed
857
        "`train_batch0.jpg` shows train batch 0 mosaics and labels\n",
Glenn Jocher's avatar
Glenn Jocher committed
858
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
859
        "> <img src=\"https://user-images.githubusercontent.com/26833433/124931217-4826f080-e002-11eb-87b9-ae0925a8c94b.jpg\" width=\"700\">  \n",
860
        "`test_batch0_labels.jpg` shows val batch 0 labels\n",
Glenn Jocher's avatar
Glenn Jocher committed
861
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
862
        "> <img src=\"https://user-images.githubusercontent.com/26833433/124931209-46f5c380-e002-11eb-9bd5-7a3de2be9851.jpg\" width=\"700\">  \n",
Glenn Jocher's avatar
Glenn Jocher committed
863
864
865
866
867
        "`test_batch0_pred.jpg` shows val batch 0 _predictions_\n",
        "\n",
        "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) as `results.csv`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.csv` file manually:\n",
        "\n",
        "```python\n",
Glenn Jocher's avatar
Glenn Jocher committed
868
        "from utils.plots import plot_results \n",
Glenn Jocher's avatar
Glenn Jocher committed
869
870
871
        "plot_results('path/to/results.csv')  # plot 'results.csv' as 'results.png'\n",
        "```\n",
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
872
        "<img align=\"left\" width=\"800\" alt=\"COCO128 Training Results\" src=\"https://user-images.githubusercontent.com/26833433/126906780-8c5e2990-6116-4de6-b78a-367244a33ccf.png\">"
Glenn Jocher's avatar
Glenn Jocher committed
873
      ]
Glenn Jocher's avatar
Glenn Jocher committed
874
875
876
877
    },
    {
      "cell_type": "markdown",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
878
        "id": "Zelyeqbyt3GD"
Glenn Jocher's avatar
Glenn Jocher committed
879
880
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
881
        "# Environments\n",
Glenn Jocher's avatar
Glenn Jocher committed
882
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
883
        "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
Glenn Jocher's avatar
Glenn Jocher committed
884
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
        "- **Google Colab and Kaggle** notebooks with free GPU: <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
        "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n",
        "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n",
        "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "6Qu7Iesl0p54"
      },
      "source": [
        "# Status\n",
        "\n",
        "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n",
        "\n",
901
        "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
Glenn Jocher's avatar
Glenn Jocher committed
902
903
904
      ]
    },
    {
Glenn Jocher's avatar
Glenn Jocher committed
905
      "cell_type": "markdown",
Glenn Jocher's avatar
Glenn Jocher committed
906
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
907
        "id": "IEijrePND_2I"
Glenn Jocher's avatar
Glenn Jocher committed
908
909
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
910
911
912
913
        "# Appendix\n",
        "\n",
        "Optional extras below. Unit tests validate repo functionality and should be run on any PRs submitted.\n"
      ]
Glenn Jocher's avatar
Glenn Jocher committed
914
915
916
917
    },
    {
      "cell_type": "code",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
918
        "id": "mcKoSIK2WSzj"
Glenn Jocher's avatar
Glenn Jocher committed
919
920
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
921
        "# Reproduce\n",
Glenn Jocher's avatar
Glenn Jocher committed
922
        "for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n",
923
924
        "  !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45  # speed\n",
        "  !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65  # mAP"
Glenn Jocher's avatar
Glenn Jocher committed
925
      ],
Glenn Jocher's avatar
Glenn Jocher committed
926
      "execution_count": null,
Glenn Jocher's avatar
Glenn Jocher committed
927
928
      "outputs": []
    },
Glenn Jocher's avatar
Glenn Jocher committed
929
930
931
932
933
934
935
936
937
938
939
940
941
    {
      "cell_type": "code",
      "metadata": {
        "id": "GMusP4OAxFu6"
      },
      "source": [
        "# PyTorch Hub\n",
        "import torch\n",
        "\n",
        "# Model\n",
        "model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n",
        "\n",
        "# Images\n",
Glenn Jocher's avatar
Glenn Jocher committed
942
        "dir = 'https://ultralytics.com/images/'\n",
Glenn Jocher's avatar
Glenn Jocher committed
943
944
945
946
947
948
949
950
951
        "imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')]  # batch of images\n",
        "\n",
        "# Inference\n",
        "results = model(imgs)\n",
        "results.print()  # or .show(), .save()"
      ],
      "execution_count": null,
      "outputs": []
    },
Glenn Jocher's avatar
Glenn Jocher committed
952
953
954
    {
      "cell_type": "code",
      "metadata": {
Glenn Jocher's avatar
Glenn Jocher committed
955
        "id": "FGH0ZjkGjejy"
Glenn Jocher's avatar
Glenn Jocher committed
956
957
      },
      "source": [
Glenn Jocher's avatar
Glenn Jocher committed
958
        "# Unit tests\n",
Glenn Jocher's avatar
Glenn Jocher committed
959
        "%%shell\n",
Glenn Jocher's avatar
Glenn Jocher committed
960
        "export PYTHONPATH=\"$PWD\"  # to run *.py. files in subdirectories\n",
Glenn Jocher's avatar
Glenn Jocher committed
961
        "\n",
Glenn Jocher's avatar
Glenn Jocher committed
962
        "rm -rf runs  # remove runs/\n",
Glenn Jocher's avatar
Glenn Jocher committed
963
964
        "for m in yolov5s; do  # models\n",
        "  python train.py --weights $m.pt --epochs 3 --img 320 --device 0  # train pretrained\n",
Glenn Jocher's avatar
Glenn Jocher committed
965
        "  python train.py --weights '' --cfg $m.yaml --epochs 3 --img 320 --device 0  # train scratch\n",
Glenn Jocher's avatar
Glenn Jocher committed
966
967
968
        "  for d in 0 cpu; do  # devices\n",
        "    python detect.py --weights $m.pt --device $d  # detect official\n",
        "    python detect.py --weights runs/train/exp/weights/best.pt --device $d  # detect custom\n",
969
970
        "    python val.py --weights $m.pt --device $d # val official\n",
        "    python val.py --weights runs/train/exp/weights/best.pt --device $d # val custom\n",
Glenn Jocher's avatar
Glenn Jocher committed
971
        "  done\n",
Glenn Jocher's avatar
Glenn Jocher committed
972
973
        "  python hubconf.py  # hub\n",
        "  python models/yolo.py --cfg $m.yaml  # inspect\n",
974
        "  python export.py --weights $m.pt --img 640 --batch 1  # export\n",
Glenn Jocher's avatar
Glenn Jocher committed
975
976
        "done"
      ],
Glenn Jocher's avatar
Glenn Jocher committed
977
      "execution_count": null,
Glenn Jocher's avatar
Glenn Jocher committed
978
      "outputs": []
Glenn Jocher's avatar
Glenn Jocher committed
979
    },
Glenn Jocher's avatar
Glenn Jocher committed
980
981
982
983
984
985
986
    {
      "cell_type": "code",
      "metadata": {
        "id": "gogI-kwi3Tye"
      },
      "source": [
        "# Profile\n",
987
        "from utils.torch_utils import profile\n",
Glenn Jocher's avatar
Glenn Jocher committed
988
989
990
        "\n",
        "m1 = lambda x: x * torch.sigmoid(x)\n",
        "m2 = torch.nn.SiLU()\n",
991
        "results = profile(input=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)"
Glenn Jocher's avatar
Glenn Jocher committed
992
993
994
995
      ],
      "execution_count": null,
      "outputs": []
    },
Glenn Jocher's avatar
Glenn Jocher committed
996
997
998
999
1000
    {
      "cell_type": "code",
      "metadata": {
        "id": "RVRSOhEvUdb5"
      },
For faster browsing, not all history is shown. View entire blame