1222 lines
382 KiB
Plaintext
1222 lines
382 KiB
Plaintext
|
{
|
|||
|
"nbformat": 4,
|
|||
|
"nbformat_minor": 0,
|
|||
|
"metadata": {
|
|||
|
"colab": {
|
|||
|
"name": "YOLOv5 Tutorial",
|
|||
|
"provenance": [],
|
|||
|
"collapsed_sections": [],
|
|||
|
"toc_visible": true,
|
|||
|
"include_colab_link": true
|
|||
|
},
|
|||
|
"kernelspec": {
|
|||
|
"name": "python3",
|
|||
|
"display_name": "Python 3"
|
|||
|
},
|
|||
|
"accelerator": "GPU",
|
|||
|
"widgets": {
|
|||
|
"application/vnd.jupyter.widget-state+json": {
|
|||
|
"02ac0588602847eea00a0205f87bcce2": {
|
|||
|
"model_module": "@jupyter-widgets/controls",
|
|||
|
"model_name": "HBoxModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "HBoxView",
|
|||
|
"_dom_classes": [],
|
|||
|
"_model_name": "HBoxModel",
|
|||
|
"_view_module": "@jupyter-widgets/controls",
|
|||
|
"_model_module_version": "1.5.0",
|
|||
|
"_view_count": null,
|
|||
|
"_view_module_version": "1.5.0",
|
|||
|
"box_style": "",
|
|||
|
"layout": "IPY_MODEL_c472ea49806447a68b5a9221a4ddae85",
|
|||
|
"_model_module": "@jupyter-widgets/controls",
|
|||
|
"children": [
|
|||
|
"IPY_MODEL_091fdf499bd44a80af7281d16da4aa93",
|
|||
|
"IPY_MODEL_c79f69c959de4427ba102a87a9f46d80"
|
|||
|
]
|
|||
|
}
|
|||
|
},
|
|||
|
"c472ea49806447a68b5a9221a4ddae85": {
|
|||
|
"model_module": "@jupyter-widgets/base",
|
|||
|
"model_name": "LayoutModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "LayoutView",
|
|||
|
"grid_template_rows": null,
|
|||
|
"right": null,
|
|||
|
"justify_content": null,
|
|||
|
"_view_module": "@jupyter-widgets/base",
|
|||
|
"overflow": null,
|
|||
|
"_model_module_version": "1.2.0",
|
|||
|
"_view_count": null,
|
|||
|
"flex_flow": null,
|
|||
|
"width": null,
|
|||
|
"min_width": null,
|
|||
|
"border": null,
|
|||
|
"align_items": null,
|
|||
|
"bottom": null,
|
|||
|
"_model_module": "@jupyter-widgets/base",
|
|||
|
"top": null,
|
|||
|
"grid_column": null,
|
|||
|
"overflow_y": null,
|
|||
|
"overflow_x": null,
|
|||
|
"grid_auto_flow": null,
|
|||
|
"grid_area": null,
|
|||
|
"grid_template_columns": null,
|
|||
|
"flex": null,
|
|||
|
"_model_name": "LayoutModel",
|
|||
|
"justify_items": null,
|
|||
|
"grid_row": null,
|
|||
|
"max_height": null,
|
|||
|
"align_content": null,
|
|||
|
"visibility": null,
|
|||
|
"align_self": null,
|
|||
|
"height": null,
|
|||
|
"min_height": null,
|
|||
|
"padding": null,
|
|||
|
"grid_auto_rows": null,
|
|||
|
"grid_gap": null,
|
|||
|
"max_width": null,
|
|||
|
"order": null,
|
|||
|
"_view_module_version": "1.2.0",
|
|||
|
"grid_template_areas": null,
|
|||
|
"object_position": null,
|
|||
|
"object_fit": null,
|
|||
|
"grid_auto_columns": null,
|
|||
|
"margin": null,
|
|||
|
"display": null,
|
|||
|
"left": null
|
|||
|
}
|
|||
|
},
|
|||
|
"091fdf499bd44a80af7281d16da4aa93": {
|
|||
|
"model_module": "@jupyter-widgets/controls",
|
|||
|
"model_name": "FloatProgressModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "ProgressView",
|
|||
|
"style": "IPY_MODEL_c42ae5af74a0491187827d0a1fc259bb",
|
|||
|
"_dom_classes": [],
|
|||
|
"description": "100%",
|
|||
|
"_model_name": "FloatProgressModel",
|
|||
|
"bar_style": "success",
|
|||
|
"max": 819257867,
|
|||
|
"_view_module": "@jupyter-widgets/controls",
|
|||
|
"_model_module_version": "1.5.0",
|
|||
|
"value": 819257867,
|
|||
|
"_view_count": null,
|
|||
|
"_view_module_version": "1.5.0",
|
|||
|
"orientation": "horizontal",
|
|||
|
"min": 0,
|
|||
|
"description_tooltip": null,
|
|||
|
"_model_module": "@jupyter-widgets/controls",
|
|||
|
"layout": "IPY_MODEL_5a90f72d3a2d46cb9ad915daa3ead8b4"
|
|||
|
}
|
|||
|
},
|
|||
|
"c79f69c959de4427ba102a87a9f46d80": {
|
|||
|
"model_module": "@jupyter-widgets/controls",
|
|||
|
"model_name": "HTMLModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "HTMLView",
|
|||
|
"style": "IPY_MODEL_2a7ed6611da34662b10e37fd4f4e4438",
|
|||
|
"_dom_classes": [],
|
|||
|
"description": "",
|
|||
|
"_model_name": "HTMLModel",
|
|||
|
"placeholder": "",
|
|||
|
"_view_module": "@jupyter-widgets/controls",
|
|||
|
"_model_module_version": "1.5.0",
|
|||
|
"value": " 781M/781M [00:23<00:00, 35.1MB/s]",
|
|||
|
"_view_count": null,
|
|||
|
"_view_module_version": "1.5.0",
|
|||
|
"description_tooltip": null,
|
|||
|
"_model_module": "@jupyter-widgets/controls",
|
|||
|
"layout": "IPY_MODEL_fead0160658445bf9e966daa4481cad0"
|
|||
|
}
|
|||
|
},
|
|||
|
"c42ae5af74a0491187827d0a1fc259bb": {
|
|||
|
"model_module": "@jupyter-widgets/controls",
|
|||
|
"model_name": "ProgressStyleModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "StyleView",
|
|||
|
"_model_name": "ProgressStyleModel",
|
|||
|
"description_width": "initial",
|
|||
|
"_view_module": "@jupyter-widgets/base",
|
|||
|
"_model_module_version": "1.5.0",
|
|||
|
"_view_count": null,
|
|||
|
"_view_module_version": "1.2.0",
|
|||
|
"bar_color": null,
|
|||
|
"_model_module": "@jupyter-widgets/controls"
|
|||
|
}
|
|||
|
},
|
|||
|
"5a90f72d3a2d46cb9ad915daa3ead8b4": {
|
|||
|
"model_module": "@jupyter-widgets/base",
|
|||
|
"model_name": "LayoutModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "LayoutView",
|
|||
|
"grid_template_rows": null,
|
|||
|
"right": null,
|
|||
|
"justify_content": null,
|
|||
|
"_view_module": "@jupyter-widgets/base",
|
|||
|
"overflow": null,
|
|||
|
"_model_module_version": "1.2.0",
|
|||
|
"_view_count": null,
|
|||
|
"flex_flow": null,
|
|||
|
"width": null,
|
|||
|
"min_width": null,
|
|||
|
"border": null,
|
|||
|
"align_items": null,
|
|||
|
"bottom": null,
|
|||
|
"_model_module": "@jupyter-widgets/base",
|
|||
|
"top": null,
|
|||
|
"grid_column": null,
|
|||
|
"overflow_y": null,
|
|||
|
"overflow_x": null,
|
|||
|
"grid_auto_flow": null,
|
|||
|
"grid_area": null,
|
|||
|
"grid_template_columns": null,
|
|||
|
"flex": null,
|
|||
|
"_model_name": "LayoutModel",
|
|||
|
"justify_items": null,
|
|||
|
"grid_row": null,
|
|||
|
"max_height": null,
|
|||
|
"align_content": null,
|
|||
|
"visibility": null,
|
|||
|
"align_self": null,
|
|||
|
"height": null,
|
|||
|
"min_height": null,
|
|||
|
"padding": null,
|
|||
|
"grid_auto_rows": null,
|
|||
|
"grid_gap": null,
|
|||
|
"max_width": null,
|
|||
|
"order": null,
|
|||
|
"_view_module_version": "1.2.0",
|
|||
|
"grid_template_areas": null,
|
|||
|
"object_position": null,
|
|||
|
"object_fit": null,
|
|||
|
"grid_auto_columns": null,
|
|||
|
"margin": null,
|
|||
|
"display": null,
|
|||
|
"left": null
|
|||
|
}
|
|||
|
},
|
|||
|
"2a7ed6611da34662b10e37fd4f4e4438": {
|
|||
|
"model_module": "@jupyter-widgets/controls",
|
|||
|
"model_name": "DescriptionStyleModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "StyleView",
|
|||
|
"_model_name": "DescriptionStyleModel",
|
|||
|
"description_width": "",
|
|||
|
"_view_module": "@jupyter-widgets/base",
|
|||
|
"_model_module_version": "1.5.0",
|
|||
|
"_view_count": null,
|
|||
|
"_view_module_version": "1.2.0",
|
|||
|
"_model_module": "@jupyter-widgets/controls"
|
|||
|
}
|
|||
|
},
|
|||
|
"fead0160658445bf9e966daa4481cad0": {
|
|||
|
"model_module": "@jupyter-widgets/base",
|
|||
|
"model_name": "LayoutModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "LayoutView",
|
|||
|
"grid_template_rows": null,
|
|||
|
"right": null,
|
|||
|
"justify_content": null,
|
|||
|
"_view_module": "@jupyter-widgets/base",
|
|||
|
"overflow": null,
|
|||
|
"_model_module_version": "1.2.0",
|
|||
|
"_view_count": null,
|
|||
|
"flex_flow": null,
|
|||
|
"width": null,
|
|||
|
"min_width": null,
|
|||
|
"border": null,
|
|||
|
"align_items": null,
|
|||
|
"bottom": null,
|
|||
|
"_model_module": "@jupyter-widgets/base",
|
|||
|
"top": null,
|
|||
|
"grid_column": null,
|
|||
|
"overflow_y": null,
|
|||
|
"overflow_x": null,
|
|||
|
"grid_auto_flow": null,
|
|||
|
"grid_area": null,
|
|||
|
"grid_template_columns": null,
|
|||
|
"flex": null,
|
|||
|
"_model_name": "LayoutModel",
|
|||
|
"justify_items": null,
|
|||
|
"grid_row": null,
|
|||
|
"max_height": null,
|
|||
|
"align_content": null,
|
|||
|
"visibility": null,
|
|||
|
"align_self": null,
|
|||
|
"height": null,
|
|||
|
"min_height": null,
|
|||
|
"padding": null,
|
|||
|
"grid_auto_rows": null,
|
|||
|
"grid_gap": null,
|
|||
|
"max_width": null,
|
|||
|
"order": null,
|
|||
|
"_view_module_version": "1.2.0",
|
|||
|
"grid_template_areas": null,
|
|||
|
"object_position": null,
|
|||
|
"object_fit": null,
|
|||
|
"grid_auto_columns": null,
|
|||
|
"margin": null,
|
|||
|
"display": null,
|
|||
|
"left": null
|
|||
|
}
|
|||
|
},
|
|||
|
"cf1ab9fde7444d3e874fcd407ba8f0f8": {
|
|||
|
"model_module": "@jupyter-widgets/controls",
|
|||
|
"model_name": "HBoxModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "HBoxView",
|
|||
|
"_dom_classes": [],
|
|||
|
"_model_name": "HBoxModel",
|
|||
|
"_view_module": "@jupyter-widgets/controls",
|
|||
|
"_model_module_version": "1.5.0",
|
|||
|
"_view_count": null,
|
|||
|
"_view_module_version": "1.5.0",
|
|||
|
"box_style": "",
|
|||
|
"layout": "IPY_MODEL_9ee03f9c85f34155b2645e89c9211547",
|
|||
|
"_model_module": "@jupyter-widgets/controls",
|
|||
|
"children": [
|
|||
|
"IPY_MODEL_933ebc451c09490aadf71afbbb3dff2a",
|
|||
|
"IPY_MODEL_8e7c55cbca624432a84fa7ad8f3a4016"
|
|||
|
]
|
|||
|
}
|
|||
|
},
|
|||
|
"9ee03f9c85f34155b2645e89c9211547": {
|
|||
|
"model_module": "@jupyter-widgets/base",
|
|||
|
"model_name": "LayoutModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "LayoutView",
|
|||
|
"grid_template_rows": null,
|
|||
|
"right": null,
|
|||
|
"justify_content": null,
|
|||
|
"_view_module": "@jupyter-widgets/base",
|
|||
|
"overflow": null,
|
|||
|
"_model_module_version": "1.2.0",
|
|||
|
"_view_count": null,
|
|||
|
"flex_flow": null,
|
|||
|
"width": null,
|
|||
|
"min_width": null,
|
|||
|
"border": null,
|
|||
|
"align_items": null,
|
|||
|
"bottom": null,
|
|||
|
"_model_module": "@jupyter-widgets/base",
|
|||
|
"top": null,
|
|||
|
"grid_column": null,
|
|||
|
"overflow_y": null,
|
|||
|
"overflow_x": null,
|
|||
|
"grid_auto_flow": null,
|
|||
|
"grid_area": null,
|
|||
|
"grid_template_columns": null,
|
|||
|
"flex": null,
|
|||
|
"_model_name": "LayoutModel",
|
|||
|
"justify_items": null,
|
|||
|
"grid_row": null,
|
|||
|
"max_height": null,
|
|||
|
"align_content": null,
|
|||
|
"visibility": null,
|
|||
|
"align_self": null,
|
|||
|
"height": null,
|
|||
|
"min_height": null,
|
|||
|
"padding": null,
|
|||
|
"grid_auto_rows": null,
|
|||
|
"grid_gap": null,
|
|||
|
"max_width": null,
|
|||
|
"order": null,
|
|||
|
"_view_module_version": "1.2.0",
|
|||
|
"grid_template_areas": null,
|
|||
|
"object_position": null,
|
|||
|
"object_fit": null,
|
|||
|
"grid_auto_columns": null,
|
|||
|
"margin": null,
|
|||
|
"display": null,
|
|||
|
"left": null
|
|||
|
}
|
|||
|
},
|
|||
|
"933ebc451c09490aadf71afbbb3dff2a": {
|
|||
|
"model_module": "@jupyter-widgets/controls",
|
|||
|
"model_name": "FloatProgressModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "ProgressView",
|
|||
|
"style": "IPY_MODEL_dd62d83b35d04a178840772e82bd2f2e",
|
|||
|
"_dom_classes": [],
|
|||
|
"description": "100%",
|
|||
|
"_model_name": "FloatProgressModel",
|
|||
|
"bar_style": "success",
|
|||
|
"max": 22090455,
|
|||
|
"_view_module": "@jupyter-widgets/controls",
|
|||
|
"_model_module_version": "1.5.0",
|
|||
|
"value": 22090455,
|
|||
|
"_view_count": null,
|
|||
|
"_view_module_version": "1.5.0",
|
|||
|
"orientation": "horizontal",
|
|||
|
"min": 0,
|
|||
|
"description_tooltip": null,
|
|||
|
"_model_module": "@jupyter-widgets/controls",
|
|||
|
"layout": "IPY_MODEL_d5c4f3d1c8b046e3a163faaa6b3a51ab"
|
|||
|
}
|
|||
|
},
|
|||
|
"8e7c55cbca624432a84fa7ad8f3a4016": {
|
|||
|
"model_module": "@jupyter-widgets/controls",
|
|||
|
"model_name": "HTMLModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "HTMLView",
|
|||
|
"style": "IPY_MODEL_78d1da8efb504b03878ca9ce5b404006",
|
|||
|
"_dom_classes": [],
|
|||
|
"description": "",
|
|||
|
"_model_name": "HTMLModel",
|
|||
|
"placeholder": "",
|
|||
|
"_view_module": "@jupyter-widgets/controls",
|
|||
|
"_model_module_version": "1.5.0",
|
|||
|
"value": " 21.1M/21.1M [00:01<00:00, 16.9MB/s]",
|
|||
|
"_view_count": null,
|
|||
|
"_view_module_version": "1.5.0",
|
|||
|
"description_tooltip": null,
|
|||
|
"_model_module": "@jupyter-widgets/controls",
|
|||
|
"layout": "IPY_MODEL_d28208ba1213436a93926a01d99d97ae"
|
|||
|
}
|
|||
|
},
|
|||
|
"dd62d83b35d04a178840772e82bd2f2e": {
|
|||
|
"model_module": "@jupyter-widgets/controls",
|
|||
|
"model_name": "ProgressStyleModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "StyleView",
|
|||
|
"_model_name": "ProgressStyleModel",
|
|||
|
"description_width": "initial",
|
|||
|
"_view_module": "@jupyter-widgets/base",
|
|||
|
"_model_module_version": "1.5.0",
|
|||
|
"_view_count": null,
|
|||
|
"_view_module_version": "1.2.0",
|
|||
|
"bar_color": null,
|
|||
|
"_model_module": "@jupyter-widgets/controls"
|
|||
|
}
|
|||
|
},
|
|||
|
"d5c4f3d1c8b046e3a163faaa6b3a51ab": {
|
|||
|
"model_module": "@jupyter-widgets/base",
|
|||
|
"model_name": "LayoutModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "LayoutView",
|
|||
|
"grid_template_rows": null,
|
|||
|
"right": null,
|
|||
|
"justify_content": null,
|
|||
|
"_view_module": "@jupyter-widgets/base",
|
|||
|
"overflow": null,
|
|||
|
"_model_module_version": "1.2.0",
|
|||
|
"_view_count": null,
|
|||
|
"flex_flow": null,
|
|||
|
"width": null,
|
|||
|
"min_width": null,
|
|||
|
"border": null,
|
|||
|
"align_items": null,
|
|||
|
"bottom": null,
|
|||
|
"_model_module": "@jupyter-widgets/base",
|
|||
|
"top": null,
|
|||
|
"grid_column": null,
|
|||
|
"overflow_y": null,
|
|||
|
"overflow_x": null,
|
|||
|
"grid_auto_flow": null,
|
|||
|
"grid_area": null,
|
|||
|
"grid_template_columns": null,
|
|||
|
"flex": null,
|
|||
|
"_model_name": "LayoutModel",
|
|||
|
"justify_items": null,
|
|||
|
"grid_row": null,
|
|||
|
"max_height": null,
|
|||
|
"align_content": null,
|
|||
|
"visibility": null,
|
|||
|
"align_self": null,
|
|||
|
"height": null,
|
|||
|
"min_height": null,
|
|||
|
"padding": null,
|
|||
|
"grid_auto_rows": null,
|
|||
|
"grid_gap": null,
|
|||
|
"max_width": null,
|
|||
|
"order": null,
|
|||
|
"_view_module_version": "1.2.0",
|
|||
|
"grid_template_areas": null,
|
|||
|
"object_position": null,
|
|||
|
"object_fit": null,
|
|||
|
"grid_auto_columns": null,
|
|||
|
"margin": null,
|
|||
|
"display": null,
|
|||
|
"left": null
|
|||
|
}
|
|||
|
},
|
|||
|
"78d1da8efb504b03878ca9ce5b404006": {
|
|||
|
"model_module": "@jupyter-widgets/controls",
|
|||
|
"model_name": "DescriptionStyleModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "StyleView",
|
|||
|
"_model_name": "DescriptionStyleModel",
|
|||
|
"description_width": "",
|
|||
|
"_view_module": "@jupyter-widgets/base",
|
|||
|
"_model_module_version": "1.5.0",
|
|||
|
"_view_count": null,
|
|||
|
"_view_module_version": "1.2.0",
|
|||
|
"_model_module": "@jupyter-widgets/controls"
|
|||
|
}
|
|||
|
},
|
|||
|
"d28208ba1213436a93926a01d99d97ae": {
|
|||
|
"model_module": "@jupyter-widgets/base",
|
|||
|
"model_name": "LayoutModel",
|
|||
|
"state": {
|
|||
|
"_view_name": "LayoutView",
|
|||
|
"grid_template_rows": null,
|
|||
|
"right": null,
|
|||
|
"justify_content": null,
|
|||
|
"_view_module": "@jupyter-widgets/base",
|
|||
|
"overflow": null,
|
|||
|
"_model_module_version": "1.2.0",
|
|||
|
"_view_count": null,
|
|||
|
"flex_flow": null,
|
|||
|
"width": null,
|
|||
|
"min_width": null,
|
|||
|
"border": null,
|
|||
|
"align_items": null,
|
|||
|
"bottom": null,
|
|||
|
"_model_module": "@jupyter-widgets/base",
|
|||
|
"top": null,
|
|||
|
"grid_column": null,
|
|||
|
"overflow_y": null,
|
|||
|
"overflow_x": null,
|
|||
|
"grid_auto_flow": null,
|
|||
|
"grid_area": null,
|
|||
|
"grid_template_columns": null,
|
|||
|
"flex": null,
|
|||
|
"_model_name": "LayoutModel",
|
|||
|
"justify_items": null,
|
|||
|
"grid_row": null,
|
|||
|
"max_height": null,
|
|||
|
"align_content": null,
|
|||
|
"visibility": null,
|
|||
|
"align_self": null,
|
|||
|
"height": null,
|
|||
|
"min_height": null,
|
|||
|
"padding": null,
|
|||
|
"grid_auto_rows": null,
|
|||
|
"grid_gap": null,
|
|||
|
"max_width": null,
|
|||
|
"order": null,
|
|||
|
"_view_module_version": "1.2.0",
|
|||
|
"grid_template_areas": null,
|
|||
|
"object_position": null,
|
|||
|
"object_fit": null,
|
|||
|
"grid_auto_columns": null,
|
|||
|
"margin": null,
|
|||
|
"display": null,
|
|||
|
"left": null
|
|||
|
}
|
|||
|
}
|
|||
|
}
|
|||
|
}
|
|||
|
},
|
|||
|
"cells": [
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "view-in-github",
|
|||
|
"colab_type": "text"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"<a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "HvhYZrIZCEyo"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"<img src=\"https://user-images.githubusercontent.com/26833433/98702494-b71c4e80-237a-11eb-87ed-17fcd6b3f066.jpg\">\n",
|
|||
|
"\n",
|
|||
|
"This notebook was written by Ultralytics LLC, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n",
|
|||
|
"For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com."
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "7mGmQbAO5pQb"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Setup\n",
|
|||
|
"\n",
|
|||
|
"Clone repo, install dependencies and check PyTorch and GPU."
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "wbvMlHd_QwMG",
|
|||
|
"colab": {
|
|||
|
"base_uri": "https://localhost:8080/"
|
|||
|
},
|
|||
|
"outputId": "888d5c41-00e9-47d8-d230-dded99325bea"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"!git clone https://github.com/ultralytics/yolov5 # clone repo\n",
|
|||
|
"%cd yolov5\n",
|
|||
|
"%pip install -qr requirements.txt # install dependencies\n",
|
|||
|
"\n",
|
|||
|
"import torch\n",
|
|||
|
"from IPython.display import Image, clear_output # to display images\n",
|
|||
|
"\n",
|
|||
|
"clear_output()\n",
|
|||
|
"print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": [
|
|||
|
{
|
|||
|
"output_type": "stream",
|
|||
|
"text": [
|
|||
|
"Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16130MB, multi_processor_count=80)\n"
|
|||
|
],
|
|||
|
"name": "stdout"
|
|||
|
}
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "4JnkELT0cIJg"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# 1. Inference\n",
|
|||
|
"\n",
|
|||
|
"`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)."
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "zR9ZbuQCH7FX",
|
|||
|
"colab": {
|
|||
|
"base_uri": "https://localhost:8080/",
|
|||
|
"height": 534
|
|||
|
},
|
|||
|
"outputId": "c9a308f7-2216-4805-8003-eca8dd0dc30d"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n",
|
|||
|
"Image(filename='runs/detect/exp/zidane.jpg', width=600)"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": [
|
|||
|
{
|
|||
|
"output_type": "stream",
|
|||
|
"text": [
|
|||
|
"Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, save_conf=False, save_dir='runs/detect', save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n",
|
|||
|
"Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n",
|
|||
|
"\n",
|
|||
|
"Fusing layers... \n",
|
|||
|
"Model Summary: 232 layers, 7459581 parameters, 0 gradients\n",
|
|||
|
"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.012s)\n",
|
|||
|
"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.012s)\n",
|
|||
|
"Results saved to runs/detect/exp\n",
|
|||
|
"Done. (0.113s)\n"
|
|||
|
],
|
|||
|
"name": "stdout"
|
|||
|
},
|
|||
|
{
|
|||
|
"output_type": "execute_result",
|
|||
|
"data": {
|
|||
|
"image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCALQBQADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD8347F5pkSP5t38P3ttaFjZzR2rzOMjfs+/wDNVi10+5kh877Gqv8AwfP96tOz0+2b99sw0e1drfxV87HY+wjHm94z4bOZ2WZ4dgV9vzN81Tx6a8jHvu+bd/DV+HT51uHd0Up95Pl21bhtfIkH2ncqfN8q/e21NS0dUbU4/ZMf7Oi52OzMu1UVU+an/wBjlW3w7l2t8y/3q3pNPRl2I+1tn/AqZZ280cXk3Nrub+7v+6tefKtLl5onZGm48qMqbQ3k/wBJeb5lb5PMf5l/2aZcaW6tshhyzffZn3ba3biHzI5USFfmX7tQyWc3zTXltuWPb+8jT+LbXJWxVWO534XDxkchrmm/KZt+d3yvurBm0maHLvu2su1G/vV3OsWsMe5xyWTd5bVh3VikkLJ5Pyqu7b/easaNacX7x6nsYyicrJYws3nom1m/vf3qWC3uYW32zr8v95v/AEGtK6s5I9iJuDMu51aq62827502Nt3Jur6zAylKUTlqREj+0wsiI7OzNuRW/wBr+7ViSPy4/wBzud9+1vm+Wq0aurIJtxdf4qtLayeX8nyusu5mb+KvqMPSlKJ58qnvco65uHaNpvlTdt2fJ8y0kjSbER3Vtq7tzJtqbyPtDLDNtx96nTKjR/Ii7t38X3a9D2fKebUkoy5SHyXjnP75l/i/3amSSVm+0v5joqbfv/Ky/wB6i3/fRrv+9911j+6rUsMMuxvJufu/fXZXPKXLE4OaUuaxPBv3b9n+r/hjl3LVqH9zJ/qV2t823/eqtbwpHGkP+qVn+dY/l/4FVuzZLqRI5plV13b12fdX+GvLxHvF04825p2cm1Ucopdvl+V9taVvDcSSK6fd+ZXrN0+GGS637F+V1aXd/d/hq7b75mX51Db9zMr/AC/7Py14WIqSNadHuaVjNLJCsP2pmTfuddvzNU8jO3yQ7X2/e/iaq8IeGNPLRW+bbu2fdq95n2OZXhhV2b5V3V4dap7+h6VOnHqWob792yI6o6orfLVCZJpPnudrBf4v97+KpmuIWmDzTKsrfdXft+7VCS5dpmR5o3/vq392uJSjztQOlx928hzbIZXSFFLs7fMqf6yopmubzY63jIVb7qrU32OGSP8AhRPveXHSyKluy/J975VXf/FWkqnNqLk5fdEntdy/3vl2eZs/76pU3yQyJsYeX8if3lqwsE0iy2zzfuvl/d/7VVr6O6WTf8yfe/d7/u1n71TRSMK0R8d1cxwrvRQv3dzfdWoprp75hNc3cjtHtSLzG+61OaGaS3RJnV1+88bVVkkRlKWtthlf+GspRhKRjH3Y8rKuoXtvHteN8qy7X/vVga9cXisrpcthkVfm/u1pXk00zAu+R/d/utWDq14+5n342/6rav3a78PFRj8JyVqhj6lM/wC8+8f/AB3dXManN82/fjd/CtdBqW+4bM0/Gzc1Yd48Pls/Vm+Xb/FXsUYy5NDxsVLmiYF9avt+07F21QVXmuNmzb/utW9cWbyR56hVqnHp7rMJvJ8xK9CnKMeU82T5hljlWZE3fN9//ZrodI3x7ntn+Rk2srfM1V9N03bGOdu7/wAdrVhs4I5BGiMk0f8ADJ8tEqhrToz+I1NLtUinR9+fLf5F/wDsa7bQZnjwibU2/N+7X5VrjdH/AHKxBE3f367TRZE+x7E2/wB1dv3mqo1PfOj2fuWOu0W4k+ziF5sOzfxfw11ui6uNyu6Mrqu1/Mfb8v8As1wWk3KOuy28xVVvnb+7W/puqQxsU3/eiVmj+9XZGpzmMoyj8R3Wn6kQN8Myh1f/AEfb93/eatXT9am8ve+1vvbmrgrHWd0iXOcFfl3L/F/wGtCHxB5K+d8wSR9qKq/M3/Aa6OYw9+J2q69C3zpZttX5Ub+9/vUybV4IYd+//WbtzL/CtcqutbYf3fmHc+1/mqvcawk3ybJCu/b9/wC9U/DAfunT/wBtusCv0/2d/wDDWbqGuosbO8jEt91tvystYN9q226ldH2xtt8qNX3f8B3VVvtUm2l3TLsnzLu/i/hqJRjI25vslPxRNDdZm85iv3fLb+GuMvJ3dXR/uK23/erW1PVHuomQXLFpJfkZvur/ALNZGqQ/aFb5G+V/3sa1x1I8x0UeaOjOa1SG2ml85Pv/AMO5vlWqtvbupYOmPLf5d3yturcbTkjdt6Mxb/lm38NQXWnpJcM8iSO38Un8K1nKn7p2RqQ5tTPWFJpD5czIn97726mTWVzIHfez+Z/yz/vVZa1eSTZDCqqqNu+fbSLYwzRuXhxufd9/71cNSnI0lUM2SN1CwpMuyT5tv/stJbxurI/nL+8ba0cn92tXybaOSHyYfuxbtrN8v3qq3Eltu+0+T86tt+VK5q1P3tCoVOXWRbtWdcoltv2tu2t8u6uj01na3TZuAVt27+61YNu7s0jzbWlb5U/hrQ0+aGObzo3bzl+X7/y7q+Ox1GXNKTPewtT4ZI7LT2T/AFM03mt8q7v4a0WuvLUI+6H5v9Wvzbv+BVzVnfTeSH/55q25d/3m/wBmp/7UdpI+Nqt8rbWr5DEYeUqp9DRrfDzG5cXySsN9zuVot6qybvu1m3mpRrD5iO0KSRbvlf5aqSal8zbNuPm2/J8q1Uk1QSM73KKrrF8nlr8u6tKOHUZe8dvtOhPeahD5yc7v3X975t1Zs0zrsfo2/wCZW/h/4FS3F4jKkEyMXX5X3fdaqzLBNJscrsZNqqv8NexhcPGPuozqVOWHKJe+c0hf7Tv3fL8tVri3DSPD9pUyr/F91d1aEljH/wAvMylG+4yp91aktdPeRc+Tv+f5fk3V9XluH5dTwcdiIx+0YLK6tvfcKry6bN5ezZ+7b/lpG+35q7BfDiNa+XNC37xtq7m27qdY+DXuN0m/hX/1f8NfY4ej7lz5XGYjm+E5C10e/Ece+2+fdtXb81XF8P7bqPztwkVGV9vyrt/2a7ux8KzRyJCkLM6/Nt3/ACtU7eDXkmj811Ty2+f91ub5q1lTjGZwRrcp5wuihpJIPmZGf/v2tQDwrMzHyXbZ93aqV6ovg/y5FT7zL99VT7y0kngvM3nfZmQbWZFWuKpR5vdN6dbl+0eUyeG7mO4Dp0Zf/Hqfp+jzQtLNczZK/wAP92vS28HmaOL/AEXa21n/AOA1m3HhWaxmm32fySIv+1uX/drxsVR+yejh63N7xysmnwxqrwp5rtztV/4f/iqJLRLVVT7HIo2bd27+Kuqj8Nos29BiKRdySN/d/u1U
|
|||
|
"text/plain": [
|
|||
|
"<IPython.core.display.Image object>"
|
|||
|
]
|
|||
|
},
|
|||
|
"metadata": {
|
|||
|
"tags": [],
|
|||
|
"image/jpeg": {
|
|||
|
"width": 600
|
|||
|
}
|
|||
|
},
|
|||
|
"execution_count": 38
|
|||
|
}
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "4qbaa3iEcrcE"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"Results are saved to `runs/detect`. A full list of available inference sources:\n",
|
|||
|
"<img src=\"https://user-images.githubusercontent.com/26833433/98274798-2b7a7a80-1f94-11eb-91a4-70c73593e26b.jpg\" width=\"900\"> "
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "0eq1SMWl6Sfn"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# 2. Test\n",
|
|||
|
"Test a model on [COCO](https://cocodataset.org/#home) val or test-dev dataset to evaluate trained accuracy. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be 1-2% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation."
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "eyTZYGgRjnMc"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"## COCO val2017\n",
|
|||
|
"Download [COCO val 2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L14) dataset (1GB - 5000 images), and test model accuracy."
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "WQPtK1QYVaD_",
|
|||
|
"colab": {
|
|||
|
"base_uri": "https://localhost:8080/",
|
|||
|
"height": 66,
|
|||
|
"referenced_widgets": [
|
|||
|
"02ac0588602847eea00a0205f87bcce2",
|
|||
|
"c472ea49806447a68b5a9221a4ddae85",
|
|||
|
"091fdf499bd44a80af7281d16da4aa93",
|
|||
|
"c79f69c959de4427ba102a87a9f46d80",
|
|||
|
"c42ae5af74a0491187827d0a1fc259bb",
|
|||
|
"5a90f72d3a2d46cb9ad915daa3ead8b4",
|
|||
|
"2a7ed6611da34662b10e37fd4f4e4438",
|
|||
|
"fead0160658445bf9e966daa4481cad0"
|
|||
|
]
|
|||
|
},
|
|||
|
"outputId": "780d8f5f-766e-4b99-e370-11f9b884c27a"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Download COCO val2017\n",
|
|||
|
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
|
|||
|
"!unzip -q tmp.zip -d ../ && rm tmp.zip"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": [
|
|||
|
{
|
|||
|
"output_type": "display_data",
|
|||
|
"data": {
|
|||
|
"application/vnd.jupyter.widget-view+json": {
|
|||
|
"model_id": "02ac0588602847eea00a0205f87bcce2",
|
|||
|
"version_minor": 0,
|
|||
|
"version_major": 2
|
|||
|
},
|
|||
|
"text/plain": [
|
|||
|
"HBox(children=(FloatProgress(value=0.0, max=819257867.0), HTML(value='')))"
|
|||
|
]
|
|||
|
},
|
|||
|
"metadata": {
|
|||
|
"tags": []
|
|||
|
}
|
|||
|
},
|
|||
|
{
|
|||
|
"output_type": "stream",
|
|||
|
"text": [
|
|||
|
"\n"
|
|||
|
],
|
|||
|
"name": "stdout"
|
|||
|
}
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "X58w8JLpMnjH",
|
|||
|
"colab": {
|
|||
|
"base_uri": "https://localhost:8080/"
|
|||
|
},
|
|||
|
"outputId": "013935a5-ba81-4810-b723-0cb01cf7bc79"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Run YOLOv5x on COCO val2017\n",
|
|||
|
"!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": [
|
|||
|
{
|
|||
|
"output_type": "stream",
|
|||
|
"text": [
|
|||
|
"Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n",
|
|||
|
"Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n",
|
|||
|
"\n",
|
|||
|
"Downloading https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5x.pt to yolov5x.pt...\n",
|
|||
|
"100% 170M/170M [00:05<00:00, 32.6MB/s]\n",
|
|||
|
"\n",
|
|||
|
"Fusing layers... \n",
|
|||
|
"Model Summary: 484 layers, 88922205 parameters, 0 gradients\n",
|
|||
|
"Scanning labels ../coco/labels/val2017.cache (4952 found, 0 missing, 48 empty, 0 duplicate, for 5000 images): 5000it [00:00, 14785.71it/s]\n",
|
|||
|
" Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:30<00:00, 1.74it/s]\n",
|
|||
|
" all 5e+03 3.63e+04 0.409 0.754 0.672 0.484\n",
|
|||
|
"Speed: 5.9/2.1/7.9 ms inference/NMS/total per 640x640 image at batch-size 32\n",
|
|||
|
"\n",
|
|||
|
"Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n",
|
|||
|
"loading annotations into memory...\n",
|
|||
|
"Done (t=0.43s)\n",
|
|||
|
"creating index...\n",
|
|||
|
"index created!\n",
|
|||
|
"Loading and preparing results...\n",
|
|||
|
"DONE (t=4.67s)\n",
|
|||
|
"creating index...\n",
|
|||
|
"index created!\n",
|
|||
|
"Running per image evaluation...\n",
|
|||
|
"Evaluate annotation type *bbox*\n",
|
|||
|
"DONE (t=92.11s).\n",
|
|||
|
"Accumulating evaluation results...\n",
|
|||
|
"DONE (t=13.24s).\n",
|
|||
|
" Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.492\n",
|
|||
|
" Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.676\n",
|
|||
|
" Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.534\n",
|
|||
|
" Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.318\n",
|
|||
|
" Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.541\n",
|
|||
|
" Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.633\n",
|
|||
|
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.376\n",
|
|||
|
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.617\n",
|
|||
|
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.670\n",
|
|||
|
" Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.493\n",
|
|||
|
" Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.723\n",
|
|||
|
" Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.812\n",
|
|||
|
"Results saved to runs/test/exp\n"
|
|||
|
],
|
|||
|
"name": "stdout"
|
|||
|
}
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "rc_KbFk0juX2"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"## COCO test-dev2017\n",
|
|||
|
"Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (20,000 images). Results are saved to a `*.json` file which can be submitted to the evaluation server at https://competitions.codalab.org/competitions/20794."
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "V0AJnSeCIHyJ"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Download COCO test-dev2017\n",
|
|||
|
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n",
|
|||
|
"!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n",
|
|||
|
"!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n",
|
|||
|
"%mv ./test2017 ./coco/images && mv ./coco ../ # move images to /coco and move /coco next to /yolov5"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": []
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "29GJXAP_lPrt"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Run YOLOv5s on COCO test-dev2017 using --task test\n",
|
|||
|
"!python test.py --weights yolov5s.pt --data coco.yaml --task test"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": []
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "VUOiNLtMP5aG"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# 3. Train\n",
|
|||
|
"\n",
|
|||
|
"Download [COCO128](https://www.kaggle.com/ultralytics/coco128), a small 128-image tutorial dataset, start tensorboard and train YOLOv5s from a pretrained checkpoint for 3 epochs (note actual training is typically much longer, around **300-1000 epochs**, depending on your dataset)."
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "Knxi2ncxWffW",
|
|||
|
"colab": {
|
|||
|
"base_uri": "https://localhost:8080/",
|
|||
|
"height": 66,
|
|||
|
"referenced_widgets": [
|
|||
|
"cf1ab9fde7444d3e874fcd407ba8f0f8",
|
|||
|
"9ee03f9c85f34155b2645e89c9211547",
|
|||
|
"933ebc451c09490aadf71afbbb3dff2a",
|
|||
|
"8e7c55cbca624432a84fa7ad8f3a4016",
|
|||
|
"dd62d83b35d04a178840772e82bd2f2e",
|
|||
|
"d5c4f3d1c8b046e3a163faaa6b3a51ab",
|
|||
|
"78d1da8efb504b03878ca9ce5b404006",
|
|||
|
"d28208ba1213436a93926a01d99d97ae"
|
|||
|
]
|
|||
|
},
|
|||
|
"outputId": "59f9a94b-21e1-4626-f36a-a8e1b1e5c8f6"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Download COCO128\n",
|
|||
|
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n",
|
|||
|
"!unzip -q tmp.zip -d ../ && rm tmp.zip"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": [
|
|||
|
{
|
|||
|
"output_type": "display_data",
|
|||
|
"data": {
|
|||
|
"application/vnd.jupyter.widget-view+json": {
|
|||
|
"model_id": "cf1ab9fde7444d3e874fcd407ba8f0f8",
|
|||
|
"version_minor": 0,
|
|||
|
"version_major": 2
|
|||
|
},
|
|||
|
"text/plain": [
|
|||
|
"HBox(children=(FloatProgress(value=0.0, max=22090455.0), HTML(value='')))"
|
|||
|
]
|
|||
|
},
|
|||
|
"metadata": {
|
|||
|
"tags": []
|
|||
|
}
|
|||
|
},
|
|||
|
{
|
|||
|
"output_type": "stream",
|
|||
|
"text": [
|
|||
|
"\n"
|
|||
|
],
|
|||
|
"name": "stdout"
|
|||
|
}
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "_pOkGLv1dMqh"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"Train a YOLOv5s model on [COCO128](https://www.kaggle.com/ultralytics/coco128) with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and **COCO, COCO128, and VOC datasets are downloaded automatically** on first use.\n",
|
|||
|
"\n",
|
|||
|
"All training results are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n"
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "bOy5KI2ncnWd"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Tensorboard (optional)\n",
|
|||
|
"%load_ext tensorboard\n",
|
|||
|
"%tensorboard --logdir runs/train"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": []
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "2fLAV42oNb7M"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Weights & Biases (optional)\n",
|
|||
|
"%pip install -q wandb \n",
|
|||
|
"!wandb login # use 'wandb disabled' or 'wandb enabled' to disable or enable"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": []
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "1NcFxRcFdJ_O",
|
|||
|
"colab": {
|
|||
|
"base_uri": "https://localhost:8080/"
|
|||
|
},
|
|||
|
"outputId": "138f2d1d-364c-405a-cf13-ea91a2aff915"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Train YOLOv5s on COCO128 for 3 epochs\n",
|
|||
|
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": [
|
|||
|
{
|
|||
|
"output_type": "stream",
|
|||
|
"text": [
|
|||
|
"Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n",
|
|||
|
"\n",
|
|||
|
"Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], local_rank=-1, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n",
|
|||
|
"Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n",
|
|||
|
"2020-11-20 11:45:17.042357: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1\n",
|
|||
|
"Hyperparameters {'lr0': 0.01, 'lrf': 0.2, 'momentum': 0.937, 'weight_decay': 0.0005, 'warmup_epochs': 3.0, 'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1, 'box': 0.05, 'cls': 0.5, 'cls_pw': 1.0, 'obj': 1.0, 'obj_pw': 1.0, 'iou_t': 0.2, 'anchor_t': 4.0, 'fl_gamma': 0.0, 'hsv_h': 0.015, 'hsv_s': 0.7, 'hsv_v': 0.4, 'degrees': 0.0, 'translate': 0.1, 'scale': 0.5, 'shear': 0.0, 'perspective': 0.0, 'flipud': 0.0, 'fliplr': 0.5, 'mosaic': 1.0, 'mixup': 0.0}\n",
|
|||
|
"Downloading https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5s.pt to yolov5s.pt...\n",
|
|||
|
"100% 14.5M/14.5M [00:01<00:00, 14.8MB/s]\n",
|
|||
|
"\n",
|
|||
|
"\n",
|
|||
|
" from n params module arguments \n",
|
|||
|
" 0 -1 1 3520 models.common.Focus [3, 32, 3] \n",
|
|||
|
" 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
|
|||
|
" 2 -1 1 19904 models.common.BottleneckCSP [64, 64, 1] \n",
|
|||
|
" 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
|
|||
|
" 4 -1 1 161152 models.common.BottleneckCSP [128, 128, 3] \n",
|
|||
|
" 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
|
|||
|
" 6 -1 1 641792 models.common.BottleneckCSP [256, 256, 3] \n",
|
|||
|
" 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
|
|||
|
" 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n",
|
|||
|
" 9 -1 1 1248768 models.common.BottleneckCSP [512, 512, 1, False] \n",
|
|||
|
" 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
|
|||
|
" 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
|||
|
" 12 [-1, 6] 1 0 models.common.Concat [1] \n",
|
|||
|
" 13 -1 1 378624 models.common.BottleneckCSP [512, 256, 1, False] \n",
|
|||
|
" 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
|
|||
|
" 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
|||
|
" 16 [-1, 4] 1 0 models.common.Concat [1] \n",
|
|||
|
" 17 -1 1 95104 models.common.BottleneckCSP [256, 128, 1, False] \n",
|
|||
|
" 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
|
|||
|
" 19 [-1, 14] 1 0 models.common.Concat [1] \n",
|
|||
|
" 20 -1 1 313088 models.common.BottleneckCSP [256, 256, 1, False] \n",
|
|||
|
" 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
|
|||
|
" 22 [-1, 10] 1 0 models.common.Concat [1] \n",
|
|||
|
" 23 -1 1 1248768 models.common.BottleneckCSP [512, 512, 1, False] \n",
|
|||
|
" 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
|
|||
|
"Model Summary: 283 layers, 7468157 parameters, 7468157 gradients\n",
|
|||
|
"\n",
|
|||
|
"Transferred 370/370 items from yolov5s.pt\n",
|
|||
|
"Optimizer groups: 62 .bias, 70 conv.weight, 59 other\n",
|
|||
|
"Scanning images: 100% 128/128 [00:00<00:00, 5395.63it/s]\n",
|
|||
|
"Scanning labels ../coco128/labels/train2017.cache (126 found, 0 missing, 2 empty, 0 duplicate, for 128 images): 128it [00:00, 13972.28it/s]\n",
|
|||
|
"Caching images (0.1GB): 100% 128/128 [00:00<00:00, 173.55it/s]\n",
|
|||
|
"Scanning labels ../coco128/labels/train2017.cache (126 found, 0 missing, 2 empty, 0 duplicate, for 128 images): 128it [00:00, 8693.98it/s]\n",
|
|||
|
"Caching images (0.1GB): 100% 128/128 [00:00<00:00, 133.30it/s]\n",
|
|||
|
"NumExpr defaulting to 2 threads.\n",
|
|||
|
"\n",
|
|||
|
"Analyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
|
|||
|
"Image sizes 640 train, 640 test\n",
|
|||
|
"Using 2 dataloader workers\n",
|
|||
|
"Logging results to runs/train/exp\n",
|
|||
|
"Starting training for 3 epochs...\n",
|
|||
|
"\n",
|
|||
|
" Epoch gpu_mem box obj cls total targets img_size\n",
|
|||
|
" 0/2 5.24G 0.04202 0.06745 0.01503 0.1245 194 640: 100% 8/8 [00:03<00:00, 2.01it/s]\n",
|
|||
|
" Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:03<00:00, 2.40it/s]\n",
|
|||
|
" all 128 929 0.404 0.758 0.701 0.45\n",
|
|||
|
"\n",
|
|||
|
" Epoch gpu_mem box obj cls total targets img_size\n",
|
|||
|
" 1/2 5.12G 0.04461 0.05874 0.0169 0.1202 142 640: 100% 8/8 [00:01<00:00, 4.14it/s]\n",
|
|||
|
" Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:01<00:00, 5.75it/s]\n",
|
|||
|
" all 128 929 0.403 0.772 0.703 0.453\n",
|
|||
|
"\n",
|
|||
|
" Epoch gpu_mem box obj cls total targets img_size\n",
|
|||
|
" 2/2 5.12G 0.04445 0.06545 0.01667 0.1266 149 640: 100% 8/8 [00:01<00:00, 4.15it/s]\n",
|
|||
|
" Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:06<00:00, 1.18it/s]\n",
|
|||
|
" all 128 929 0.395 0.767 0.702 0.452\n",
|
|||
|
"Optimizer stripped from runs/train/exp/weights/last.pt, 15.2MB\n",
|
|||
|
"3 epochs completed in 0.006 hours.\n",
|
|||
|
"\n"
|
|||
|
],
|
|||
|
"name": "stdout"
|
|||
|
}
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "15glLzbQx5u0"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# 4. Visualize"
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "DLI1JmHU7B0l"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"## Weights & Biases Logging 🌟 NEW\n",
|
|||
|
"\n",
|
|||
|
"[Weights & Biases](https://www.wandb.com/) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n",
|
|||
|
"\n",
|
|||
|
"During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n",
|
|||
|
"\n",
|
|||
|
"<img src=\"https://user-images.githubusercontent.com/26833433/98184457-bd3da580-1f0a-11eb-8461-95d908a71893.jpg\" width=\"800\">"
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "-WPvRbS5Swl6"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"## Local Logging\n",
|
|||
|
"\n",
|
|||
|
"All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and test jpgs to see mosaics, labels, predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)."
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "riPdhraOTCO0"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"Image(filename='runs/train/exp/train_batch0.jpg', width=800) # train batch 0 mosaics and labels\n",
|
|||
|
"Image(filename='runs/train/exp/test_batch0_labels.jpg', width=800) # test batch 0 labels\n",
|
|||
|
"Image(filename='runs/train/exp/test_batch0_pred.jpg', width=800) # test batch 0 predictions"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": []
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "OYG4WFEnTVrI"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"> <img src=\"https://user-images.githubusercontent.com/26833433/83667642-90fcb200-a583-11ea-8fa3-338bbf7da194.jpeg\" width=\"750\"> \n",
|
|||
|
"`train_batch0.jpg` shows train batch 0 mosaics and labels\n",
|
|||
|
"\n",
|
|||
|
"> <img src=\"https://user-images.githubusercontent.com/26833433/83667626-8c37fe00-a583-11ea-997b-0923fe59b29b.jpeg\" width=\"750\"> \n",
|
|||
|
"`test_batch0_labels.jpg` shows test batch 0 labels\n",
|
|||
|
"\n",
|
|||
|
"> <img src=\"https://user-images.githubusercontent.com/26833433/83667635-90641b80-a583-11ea-8075-606316cebb9c.jpeg\" width=\"750\"> \n",
|
|||
|
"`test_batch0_pred.jpg` shows test batch 0 _predictions_\n"
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "7KN5ghjE6ZWh"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"Training losses and performance metrics are also logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and a custom `results.txt` logfile which is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `--weights yolov5s.pt` (orange)."
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "MDznIqPF7nk3"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"from utils.plots import plot_results \n",
|
|||
|
"plot_results(save_dir='runs/train/exp') # plot all results*.txt as results.png\n",
|
|||
|
"Image(filename='runs/train/exp/results.png', width=800)"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": []
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "lfrEegCSW3fK"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"<img src=\"https://user-images.githubusercontent.com/26833433/97808309-8182b180-1c66-11eb-8461-bffe1a79511d.png\" width=\"800\">\n"
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "Zelyeqbyt3GD"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Environments\n",
|
|||
|
"\n",
|
|||
|
"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
|
|||
|
"\n",
|
|||
|
"- **Google Colab Notebook** with free GPU: <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
|
|||
|
"- **Kaggle Notebook** with free GPU: [https://www.kaggle.com/ultralytics/yolov5](https://www.kaggle.com/ultralytics/yolov5)\n",
|
|||
|
"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) \n",
|
|||
|
"- **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker)\n"
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "markdown",
|
|||
|
"metadata": {
|
|||
|
"id": "IEijrePND_2I"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Appendix\n",
|
|||
|
"\n",
|
|||
|
"Optional extras below. Unit tests validate repo functionality and should be run on any PRs submitted.\n"
|
|||
|
]
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "gI6NoBev8Ib1"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Re-clone repo\n",
|
|||
|
"%cd ..\n",
|
|||
|
"%rm -rf yolov5 && git clone https://github.com/ultralytics/yolov5\n",
|
|||
|
"%cd yolov5"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": []
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "mcKoSIK2WSzj"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Reproduce\n",
|
|||
|
"%%shell\n",
|
|||
|
"for x in yolov5s yolov5m yolov5l yolov5x; do\n",
|
|||
|
" python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n",
|
|||
|
" python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP\n",
|
|||
|
"done"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": []
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "FGH0ZjkGjejy"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Unit tests\n",
|
|||
|
"%%shell\n",
|
|||
|
"export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n",
|
|||
|
"\n",
|
|||
|
"rm -rf runs # remove runs/\n",
|
|||
|
"for m in yolov5s; do # models\n",
|
|||
|
" python train.py --weights $m.pt --epochs 3 --img 320 --device 0 # train pretrained\n",
|
|||
|
" python train.py --weights '' --cfg $m.yaml --epochs 3 --img 320 --device 0 # train scratch\n",
|
|||
|
" for d in 0 cpu; do # devices\n",
|
|||
|
" python detect.py --weights $m.pt --device $d # detect official\n",
|
|||
|
" python detect.py --weights runs/train/exp/weights/best.pt --device $d # detect custom\n",
|
|||
|
" python test.py --weights $m.pt --device $d # test official\n",
|
|||
|
" python test.py --weights runs/train/exp/weights/best.pt --device $d # test custom\n",
|
|||
|
" done\n",
|
|||
|
" python hubconf.py # hub\n",
|
|||
|
" python models/yolo.py --cfg $m.yaml # inspect\n",
|
|||
|
" python models/export.py --weights $m.pt --img 640 --batch 1 # export\n",
|
|||
|
"done"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": []
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "gogI-kwi3Tye"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# Profile\n",
|
|||
|
"from utils.torch_utils import profile \n",
|
|||
|
"\n",
|
|||
|
"m1 = lambda x: x * torch.sigmoid(x)\n",
|
|||
|
"m2 = torch.nn.SiLU()\n",
|
|||
|
"profile(x=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": []
|
|||
|
},
|
|||
|
{
|
|||
|
"cell_type": "code",
|
|||
|
"metadata": {
|
|||
|
"id": "BSgFCAcMbk1R"
|
|||
|
},
|
|||
|
"source": [
|
|||
|
"# VOC\n",
|
|||
|
"for b, m in zip([64, 48, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n",
|
|||
|
" !python train.py --batch {b} --weights {m}.pt --data voc.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.finetune.yaml --project VOC --name {m}"
|
|||
|
],
|
|||
|
"execution_count": null,
|
|||
|
"outputs": []
|
|||
|
}
|
|||
|
]
|
|||
|
}
|