diff --git a/tutorial.ipynb b/tutorial.ipynb index 881632daa375cc9d2cd5174f8d73767708b80cb4..e4344d3ddcecdfb68a3fb9dc27586709a5d8ab3f 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "b54ab52f1d4f4903897ab6cd49a3b9b2": { + "8815626359d84416a2f44a95500580a4": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_1852f93fc2714d40adccb8aa161c42ff", + "layout": "IPY_MODEL_3b85609c4ce94a74823f2cfe141ce68e", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_3293cfe869bd4a1bbbe18b49b6815de1", - "IPY_MODEL_8d5ee8b8ab6d46b98818bd2c562ddd1c" + "IPY_MODEL_876609753c2946248890344722963d44", + "IPY_MODEL_8abfdd8778e44b7ca0d29881cb1ada05" ] } }, - "1852f93fc2714d40adccb8aa161c42ff": { + "3b85609c4ce94a74823f2cfe141ce68e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,12 +87,12 @@ "left": null } }, - "3293cfe869bd4a1bbbe18b49b6815de1": { + "876609753c2946248890344722963d44": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_49fcb2adb0354430b76f491af98abfe9", + "style": "IPY_MODEL_78c6c3d97c484916b8ee167c63556800", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -107,30 +107,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_c7d76e0c53064363add56b8d05e561f5" + "layout": "IPY_MODEL_9dd0f182db5d45378ceafb855e486eb8" } }, - "8d5ee8b8ab6d46b98818bd2c562ddd1c": { + "8abfdd8778e44b7ca0d29881cb1ada05": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_48f321f789634aa584f8a29a3b925dd5", + "style": "IPY_MODEL_a3dab28b45c247089a3d1b8b09f327de", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [00:13<00:00, 62.6MB/s]", + "value": " 781M/781M [08:43<00:00, 1.56MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_6610d6275f3e49d9937d50ed0a105947" + "layout": "IPY_MODEL_32451332b7a94ba9aacddeaa6ac94d50" } }, - "49fcb2adb0354430b76f491af98abfe9": { + "78c6c3d97c484916b8ee167c63556800": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "c7d76e0c53064363add56b8d05e561f5": { + "9dd0f182db5d45378ceafb855e486eb8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "48f321f789634aa584f8a29a3b925dd5": { + "a3dab28b45c247089a3d1b8b09f327de": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "6610d6275f3e49d9937d50ed0a105947": { + "32451332b7a94ba9aacddeaa6ac94d50": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -550,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "20027455-bf84-41fd-c902-b7282d53c91d" + "outputId": "4576b05f-d6d1-404a-fc99-5663c71e3dc4" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -563,12 +563,12 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "text": [ - "Setup complete. Using torch 1.8.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" + "Setup complete. Using torch 1.8.1+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" ], "name": "stdout" } @@ -607,7 +607,7 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", @@ -664,30 +664,30 @@ "base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": [ - "b54ab52f1d4f4903897ab6cd49a3b9b2", - "1852f93fc2714d40adccb8aa161c42ff", - "3293cfe869bd4a1bbbe18b49b6815de1", - "8d5ee8b8ab6d46b98818bd2c562ddd1c", - "49fcb2adb0354430b76f491af98abfe9", - "c7d76e0c53064363add56b8d05e561f5", - "48f321f789634aa584f8a29a3b925dd5", - "6610d6275f3e49d9937d50ed0a105947" + "8815626359d84416a2f44a95500580a4", + "3b85609c4ce94a74823f2cfe141ce68e", + "876609753c2946248890344722963d44", + "8abfdd8778e44b7ca0d29881cb1ada05", + "78c6c3d97c484916b8ee167c63556800", + "9dd0f182db5d45378ceafb855e486eb8", + "a3dab28b45c247089a3d1b8b09f327de", + "32451332b7a94ba9aacddeaa6ac94d50" ] }, - "outputId": "f0884441-78d9-443c-afa6-d00ec387908d" + "outputId": "81521192-cf67-4a47-a4cc-434cb0ebc363" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "b54ab52f1d4f4903897ab6cd49a3b9b2", + "model_id": "8815626359d84416a2f44a95500580a4", "version_minor": 0, "version_major": 2 }, @@ -715,57 +715,57 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "5b54c11e-9f4b-4d9a-8e6e-6a2a4f0cc60d" + "outputId": "2340b131-9943-4cd6-fd3a-8272aeb0774f" }, "source": [ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": null, + "execution_count": 6, "outputs": [ { "output_type": "stream", "text": [ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n", - "100% 168M/168M [00:02<00:00, 59.1MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n", + "100% 168M/168M [00:05<00:00, 32.3MB/s]\n", "\n", "Fusing layers... \n", "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3236.68it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:20<00:00, 1.95it/s]\n", - " all 5000 36335 0.749 0.619 0.68 0.486\n", - "Speed: 5.3/1.7/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n", + " all 5000 36335 0.745 0.627 0.68 0.49\n", + "Speed: 5.3/1.6/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.43s)\n", + "Done (t=0.48s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.10s)\n", + "DONE (t=5.08s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=88.52s).\n", + "DONE (t=90.51s).\n", "Accumulating evaluation results...\n", - "DONE (t=17.17s).\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n", - " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n", - " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.338\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.548\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.637\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.378\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.628\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.680\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.520\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.729\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.826\n", + "DONE (t=15.16s).\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.629\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.827\n", "Results saved to runs/test/exp\n" ], "name": "stdout" @@ -916,28 +916,25 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "cf494627-09b9-4399-ff0c-fdb62b32340a" + "outputId": "e715d09c-5d93-4912-a0df-9da0893f2014" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": null, + "execution_count": 12, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-2-g54d6516 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", - "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", - "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n", - "2021-03-14 04:18:58.124672: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n", + "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", + "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", + "2021-04-12 10:29:58.539457: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 63.1MB/s]\n", - "\n", + "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", @@ -970,11 +967,10 @@ "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2956.76it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 205.30it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 604584.36it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 144.17it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 796544.38it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.73it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 500812.42it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 134.10it/s]\n", "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", @@ -984,23 +980,23 @@ "Starting training for 3 epochs...\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 0/2 3.29G 0.04237 0.06417 0.02121 0.1277 183 640: 100% 8/8 [00:03<00:00, 2.41it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.04s/it]\n", - " all 128 929 0.642 0.637 0.661 0.432\n", + " 0/2 3.29G 0.04368 0.065 0.02127 0.1299 183 640: 100% 8/8 [00:03<00:00, 2.21it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.09s/it]\n", + " all 128 929 0.605 0.657 0.666 0.434\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 1/2 6.65G 0.04431 0.06403 0.019 0.1273 166 640: 100% 8/8 [00:01<00:00, 5.73it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.21it/s]\n", - " all 128 929 0.662 0.626 0.658 0.433\n", + " 1/2 6.65G 0.04556 0.0651 0.01987 0.1305 166 640: 100% 8/8 [00:01<00:00, 5.18it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.72it/s]\n", + " all 128 929 0.61 0.66 0.669 0.438\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 2/2 6.65G 0.04506 0.06836 0.01913 0.1325 182 640: 100% 8/8 [00:01<00:00, 5.51it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.35it/s]\n", - " all 128 929 0.658 0.625 0.661 0.433\n", - "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", - "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n", + " 2/2 6.65G 0.04624 0.06923 0.0196 0.1351 182 640: 100% 8/8 [00:01<00:00, 5.19it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.27it/s]\n", + " all 128 929 0.618 0.659 0.671 0.438\n", "3 epochs completed in 0.007 hours.\n", - "\n" + "\n", + "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", + "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n" ], "name": "stdout" } @@ -1263,4 +1259,4 @@ "outputs": [] } ] -} +} \ No newline at end of file